2 * Copyright 2008 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Jerome Glisse <glisse@freedesktop.org>
27 #include <linux/list_sort.h>
29 #include <drm/amdgpu_drm.h>
31 #include "amdgpu_trace.h"
33 #define AMDGPU_CS_MAX_PRIORITY 32u
34 #define AMDGPU_CS_NUM_BUCKETS (AMDGPU_CS_MAX_PRIORITY + 1)
36 /* This is based on the bucket sort with O(n) time complexity.
37 * An item with priority "i" is added to bucket[i]. The lists are then
38 * concatenated in descending order.
40 struct amdgpu_cs_buckets
{
41 struct list_head bucket
[AMDGPU_CS_NUM_BUCKETS
];
44 static void amdgpu_cs_buckets_init(struct amdgpu_cs_buckets
*b
)
48 for (i
= 0; i
< AMDGPU_CS_NUM_BUCKETS
; i
++)
49 INIT_LIST_HEAD(&b
->bucket
[i
]);
52 static void amdgpu_cs_buckets_add(struct amdgpu_cs_buckets
*b
,
53 struct list_head
*item
, unsigned priority
)
55 /* Since buffers which appear sooner in the relocation list are
56 * likely to be used more often than buffers which appear later
57 * in the list, the sort mustn't change the ordering of buffers
58 * with the same priority, i.e. it must be stable.
60 list_add_tail(item
, &b
->bucket
[min(priority
, AMDGPU_CS_MAX_PRIORITY
)]);
63 static void amdgpu_cs_buckets_get_list(struct amdgpu_cs_buckets
*b
,
64 struct list_head
*out_list
)
68 /* Connect the sorted buckets in the output list. */
69 for (i
= 0; i
< AMDGPU_CS_NUM_BUCKETS
; i
++) {
70 list_splice(&b
->bucket
[i
], out_list
);
74 int amdgpu_cs_get_ring(struct amdgpu_device
*adev
, u32 ip_type
,
75 u32 ip_instance
, u32 ring
,
76 struct amdgpu_ring
**out_ring
)
78 /* Right now all IPs have only one instance - multiple rings. */
79 if (ip_instance
!= 0) {
80 DRM_ERROR("invalid ip instance: %d\n", ip_instance
);
86 DRM_ERROR("unknown ip type: %d\n", ip_type
);
88 case AMDGPU_HW_IP_GFX
:
89 if (ring
< adev
->gfx
.num_gfx_rings
) {
90 *out_ring
= &adev
->gfx
.gfx_ring
[ring
];
92 DRM_ERROR("only %d gfx rings are supported now\n",
93 adev
->gfx
.num_gfx_rings
);
97 case AMDGPU_HW_IP_COMPUTE
:
98 if (ring
< adev
->gfx
.num_compute_rings
) {
99 *out_ring
= &adev
->gfx
.compute_ring
[ring
];
101 DRM_ERROR("only %d compute rings are supported now\n",
102 adev
->gfx
.num_compute_rings
);
106 case AMDGPU_HW_IP_DMA
:
108 *out_ring
= &adev
->sdma
[ring
].ring
;
110 DRM_ERROR("only two SDMA rings are supported\n");
114 case AMDGPU_HW_IP_UVD
:
115 *out_ring
= &adev
->uvd
.ring
;
117 case AMDGPU_HW_IP_VCE
:
119 *out_ring
= &adev
->vce
.ring
[ring
];
121 DRM_ERROR("only two VCE rings are supported\n");
129 struct amdgpu_cs_parser
*amdgpu_cs_parser_create(struct amdgpu_device
*adev
,
130 struct drm_file
*filp
,
131 struct amdgpu_ctx
*ctx
,
132 struct amdgpu_ib
*ibs
,
135 struct amdgpu_cs_parser
*parser
;
138 parser
= kzalloc(sizeof(struct amdgpu_cs_parser
), GFP_KERNEL
);
146 parser
->num_ibs
= num_ibs
;
147 for (i
= 0; i
< num_ibs
; i
++)
153 int amdgpu_cs_parser_init(struct amdgpu_cs_parser
*p
, void *data
)
155 union drm_amdgpu_cs
*cs
= data
;
156 uint64_t *chunk_array_user
;
157 uint64_t *chunk_array
= NULL
;
158 struct amdgpu_fpriv
*fpriv
= p
->filp
->driver_priv
;
159 struct amdgpu_bo_list
*bo_list
= NULL
;
163 if (!cs
->in
.num_chunks
)
166 p
->ctx
= amdgpu_ctx_get(fpriv
, cs
->in
.ctx_id
);
171 bo_list
= amdgpu_bo_list_get(fpriv
, cs
->in
.bo_list_handle
);
172 if (!amdgpu_enable_scheduler
)
173 p
->bo_list
= bo_list
;
175 if (bo_list
&& !bo_list
->has_userptr
) {
176 p
->bo_list
= amdgpu_bo_list_clone(bo_list
);
177 amdgpu_bo_list_put(bo_list
);
180 } else if (bo_list
&& bo_list
->has_userptr
)
181 p
->bo_list
= bo_list
;
187 INIT_LIST_HEAD(&p
->validated
);
188 chunk_array
= kmalloc_array(cs
->in
.num_chunks
, sizeof(uint64_t), GFP_KERNEL
);
189 if (chunk_array
== NULL
) {
194 chunk_array_user
= (uint64_t __user
*)(cs
->in
.chunks
);
195 if (copy_from_user(chunk_array
, chunk_array_user
,
196 sizeof(uint64_t)*cs
->in
.num_chunks
)) {
201 p
->nchunks
= cs
->in
.num_chunks
;
202 p
->chunks
= kmalloc_array(p
->nchunks
, sizeof(struct amdgpu_cs_chunk
),
204 if (p
->chunks
== NULL
) {
209 for (i
= 0; i
< p
->nchunks
; i
++) {
210 struct drm_amdgpu_cs_chunk __user
**chunk_ptr
= NULL
;
211 struct drm_amdgpu_cs_chunk user_chunk
;
212 uint32_t __user
*cdata
;
214 chunk_ptr
= (void __user
*)chunk_array
[i
];
215 if (copy_from_user(&user_chunk
, chunk_ptr
,
216 sizeof(struct drm_amdgpu_cs_chunk
))) {
220 p
->chunks
[i
].chunk_id
= user_chunk
.chunk_id
;
221 p
->chunks
[i
].length_dw
= user_chunk
.length_dw
;
223 size
= p
->chunks
[i
].length_dw
;
224 cdata
= (void __user
*)user_chunk
.chunk_data
;
225 p
->chunks
[i
].user_ptr
= cdata
;
227 p
->chunks
[i
].kdata
= drm_malloc_ab(size
, sizeof(uint32_t));
228 if (p
->chunks
[i
].kdata
== NULL
) {
232 size
*= sizeof(uint32_t);
233 if (copy_from_user(p
->chunks
[i
].kdata
, cdata
, size
)) {
238 switch (p
->chunks
[i
].chunk_id
) {
239 case AMDGPU_CHUNK_ID_IB
:
243 case AMDGPU_CHUNK_ID_FENCE
:
244 size
= sizeof(struct drm_amdgpu_cs_chunk_fence
);
245 if (p
->chunks
[i
].length_dw
* sizeof(uint32_t) >= size
) {
247 struct drm_gem_object
*gobj
;
248 struct drm_amdgpu_cs_chunk_fence
*fence_data
;
250 fence_data
= (void *)p
->chunks
[i
].kdata
;
251 handle
= fence_data
->handle
;
252 gobj
= drm_gem_object_lookup(p
->adev
->ddev
,
259 p
->uf
.bo
= gem_to_amdgpu_bo(gobj
);
260 p
->uf
.offset
= fence_data
->offset
;
267 case AMDGPU_CHUNK_ID_DEPENDENCIES
:
277 p
->ibs
= kmalloc_array(p
->num_ibs
, sizeof(struct amdgpu_ib
), GFP_KERNEL
);
286 /* Returns how many bytes TTM can move per IB.
288 static u64
amdgpu_cs_get_threshold_for_moves(struct amdgpu_device
*adev
)
290 u64 real_vram_size
= adev
->mc
.real_vram_size
;
291 u64 vram_usage
= atomic64_read(&adev
->vram_usage
);
293 /* This function is based on the current VRAM usage.
295 * - If all of VRAM is free, allow relocating the number of bytes that
296 * is equal to 1/4 of the size of VRAM for this IB.
298 * - If more than one half of VRAM is occupied, only allow relocating
299 * 1 MB of data for this IB.
301 * - From 0 to one half of used VRAM, the threshold decreases
316 * Note: It's a threshold, not a limit. The threshold must be crossed
317 * for buffer relocations to stop, so any buffer of an arbitrary size
318 * can be moved as long as the threshold isn't crossed before
319 * the relocation takes place. We don't want to disable buffer
320 * relocations completely.
322 * The idea is that buffers should be placed in VRAM at creation time
323 * and TTM should only do a minimum number of relocations during
324 * command submission. In practice, you need to submit at least
325 * a dozen IBs to move all buffers to VRAM if they are in GTT.
327 * Also, things can get pretty crazy under memory pressure and actual
328 * VRAM usage can change a lot, so playing safe even at 50% does
329 * consistently increase performance.
332 u64 half_vram
= real_vram_size
>> 1;
333 u64 half_free_vram
= vram_usage
>= half_vram
? 0 : half_vram
- vram_usage
;
334 u64 bytes_moved_threshold
= half_free_vram
>> 1;
335 return max(bytes_moved_threshold
, 1024*1024ull);
338 int amdgpu_cs_list_validate(struct amdgpu_cs_parser
*p
)
340 struct amdgpu_fpriv
*fpriv
= p
->filp
->driver_priv
;
341 struct amdgpu_vm
*vm
= &fpriv
->vm
;
342 struct amdgpu_device
*adev
= p
->adev
;
343 struct amdgpu_bo_list_entry
*lobj
;
344 struct list_head duplicates
;
345 struct amdgpu_bo
*bo
;
346 u64 bytes_moved
= 0, initial_bytes_moved
;
347 u64 bytes_moved_threshold
= amdgpu_cs_get_threshold_for_moves(adev
);
350 INIT_LIST_HEAD(&duplicates
);
351 r
= ttm_eu_reserve_buffers(&p
->ticket
, &p
->validated
, true, &duplicates
);
352 if (unlikely(r
!= 0)) {
356 list_for_each_entry(lobj
, &p
->validated
, tv
.head
) {
358 if (!bo
->pin_count
) {
359 u32 domain
= lobj
->prefered_domains
;
361 amdgpu_mem_type_to_domain(bo
->tbo
.mem
.mem_type
);
363 /* Check if this buffer will be moved and don't move it
364 * if we have moved too many buffers for this IB already.
366 * Note that this allows moving at least one buffer of
367 * any size, because it doesn't take the current "bo"
368 * into account. We don't want to disallow buffer moves
371 if (current_domain
!= AMDGPU_GEM_DOMAIN_CPU
&&
372 (domain
& current_domain
) == 0 && /* will be moved */
373 bytes_moved
> bytes_moved_threshold
) {
375 domain
= current_domain
;
379 amdgpu_ttm_placement_from_domain(bo
, domain
);
380 initial_bytes_moved
= atomic64_read(&adev
->num_bytes_moved
);
381 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, true, false);
382 bytes_moved
+= atomic64_read(&adev
->num_bytes_moved
) -
386 if (r
!= -ERESTARTSYS
&& domain
!= lobj
->allowed_domains
) {
387 domain
= lobj
->allowed_domains
;
390 ttm_eu_backoff_reservation(&p
->ticket
, &p
->validated
);
394 lobj
->bo_va
= amdgpu_vm_bo_find(vm
, bo
);
399 static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser
*p
)
401 struct amdgpu_fpriv
*fpriv
= p
->filp
->driver_priv
;
402 struct amdgpu_cs_buckets buckets
;
403 bool need_mmap_lock
= false;
407 need_mmap_lock
= p
->bo_list
->has_userptr
;
408 amdgpu_cs_buckets_init(&buckets
);
409 for (i
= 0; i
< p
->bo_list
->num_entries
; i
++)
410 amdgpu_cs_buckets_add(&buckets
, &p
->bo_list
->array
[i
].tv
.head
,
411 p
->bo_list
->array
[i
].priority
);
413 amdgpu_cs_buckets_get_list(&buckets
, &p
->validated
);
416 p
->vm_bos
= amdgpu_vm_get_bos(p
->adev
, &fpriv
->vm
,
420 down_read(¤t
->mm
->mmap_sem
);
422 r
= amdgpu_cs_list_validate(p
);
425 up_read(¤t
->mm
->mmap_sem
);
430 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser
*p
)
432 struct amdgpu_bo_list_entry
*e
;
435 list_for_each_entry(e
, &p
->validated
, tv
.head
) {
436 struct reservation_object
*resv
= e
->robj
->tbo
.resv
;
437 r
= amdgpu_sync_resv(p
->adev
, &p
->ibs
[0].sync
, resv
, p
->filp
);
445 static int cmp_size_smaller_first(void *priv
, struct list_head
*a
,
448 struct amdgpu_bo_list_entry
*la
= list_entry(a
, struct amdgpu_bo_list_entry
, tv
.head
);
449 struct amdgpu_bo_list_entry
*lb
= list_entry(b
, struct amdgpu_bo_list_entry
, tv
.head
);
451 /* Sort A before B if A is smaller. */
452 return (int)la
->robj
->tbo
.num_pages
- (int)lb
->robj
->tbo
.num_pages
;
455 static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser
*parser
, int error
, bool backoff
)
458 /* Sort the buffer list from the smallest to largest buffer,
459 * which affects the order of buffers in the LRU list.
460 * This assures that the smallest buffers are added first
461 * to the LRU list, so they are likely to be later evicted
462 * first, instead of large buffers whose eviction is more
465 * This slightly lowers the number of bytes moved by TTM
466 * per frame under memory pressure.
468 list_sort(NULL
, &parser
->validated
, cmp_size_smaller_first
);
470 ttm_eu_fence_buffer_objects(&parser
->ticket
,
472 &parser
->ibs
[parser
->num_ibs
-1].fence
->base
);
473 } else if (backoff
) {
474 ttm_eu_backoff_reservation(&parser
->ticket
,
479 static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser
*parser
)
483 amdgpu_ctx_put(parser
->ctx
);
484 if (parser
->bo_list
) {
485 if (amdgpu_enable_scheduler
&& !parser
->bo_list
->has_userptr
)
486 amdgpu_bo_list_free(parser
->bo_list
);
488 amdgpu_bo_list_put(parser
->bo_list
);
490 drm_free_large(parser
->vm_bos
);
491 for (i
= 0; i
< parser
->nchunks
; i
++)
492 drm_free_large(parser
->chunks
[i
].kdata
);
493 kfree(parser
->chunks
);
494 if (!amdgpu_enable_scheduler
)
497 for (i
= 0; i
< parser
->num_ibs
; i
++)
498 amdgpu_ib_free(parser
->adev
, &parser
->ibs
[i
]);
501 drm_gem_object_unreference_unlocked(&parser
->uf
.bo
->gem_base
);
508 * cs_parser_fini() - clean parser states
509 * @parser: parser structure holding parsing context.
510 * @error: error number
512 * If error is set than unvalidate buffer, otherwise just free memory
513 * used by parsing context.
515 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser
*parser
, int error
, bool backoff
)
517 amdgpu_cs_parser_fini_early(parser
, error
, backoff
);
518 amdgpu_cs_parser_fini_late(parser
);
521 static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser
*p
,
522 struct amdgpu_vm
*vm
)
524 struct amdgpu_device
*adev
= p
->adev
;
525 struct amdgpu_bo_va
*bo_va
;
526 struct amdgpu_bo
*bo
;
529 r
= amdgpu_vm_update_page_directory(adev
, vm
);
533 r
= amdgpu_sync_fence(adev
, &p
->ibs
[0].sync
, vm
->page_directory_fence
);
537 r
= amdgpu_vm_clear_freed(adev
, vm
);
542 for (i
= 0; i
< p
->bo_list
->num_entries
; i
++) {
545 /* ignore duplicates */
546 bo
= p
->bo_list
->array
[i
].robj
;
550 bo_va
= p
->bo_list
->array
[i
].bo_va
;
554 r
= amdgpu_vm_bo_update(adev
, bo_va
, &bo
->tbo
.mem
);
558 f
= bo_va
->last_pt_update
;
559 r
= amdgpu_sync_fence(adev
, &p
->ibs
[0].sync
, f
);
565 return amdgpu_vm_clear_invalids(adev
, vm
, &p
->ibs
[0].sync
);
568 static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device
*adev
,
569 struct amdgpu_cs_parser
*parser
)
571 struct amdgpu_fpriv
*fpriv
= parser
->filp
->driver_priv
;
572 struct amdgpu_vm
*vm
= &fpriv
->vm
;
573 struct amdgpu_ring
*ring
;
576 if (parser
->num_ibs
== 0)
579 /* Only for UVD/VCE VM emulation */
580 for (i
= 0; i
< parser
->num_ibs
; i
++) {
581 ring
= parser
->ibs
[i
].ring
;
582 if (ring
->funcs
->parse_cs
) {
583 r
= amdgpu_ring_parse_cs(ring
, parser
, i
);
589 mutex_lock(&vm
->mutex
);
590 r
= amdgpu_bo_vm_update_pte(parser
, vm
);
594 amdgpu_cs_sync_rings(parser
);
595 if (!amdgpu_enable_scheduler
)
596 r
= amdgpu_ib_schedule(adev
, parser
->num_ibs
, parser
->ibs
,
600 mutex_unlock(&vm
->mutex
);
604 static int amdgpu_cs_handle_lockup(struct amdgpu_device
*adev
, int r
)
607 r
= amdgpu_gpu_reset(adev
);
614 static int amdgpu_cs_ib_fill(struct amdgpu_device
*adev
,
615 struct amdgpu_cs_parser
*parser
)
617 struct amdgpu_fpriv
*fpriv
= parser
->filp
->driver_priv
;
618 struct amdgpu_vm
*vm
= &fpriv
->vm
;
622 for (i
= 0, j
= 0; i
< parser
->nchunks
&& j
< parser
->num_ibs
; i
++) {
623 struct amdgpu_cs_chunk
*chunk
;
624 struct amdgpu_ib
*ib
;
625 struct drm_amdgpu_cs_chunk_ib
*chunk_ib
;
626 struct amdgpu_ring
*ring
;
628 chunk
= &parser
->chunks
[i
];
629 ib
= &parser
->ibs
[j
];
630 chunk_ib
= (struct drm_amdgpu_cs_chunk_ib
*)chunk
->kdata
;
632 if (chunk
->chunk_id
!= AMDGPU_CHUNK_ID_IB
)
635 r
= amdgpu_cs_get_ring(adev
, chunk_ib
->ip_type
,
636 chunk_ib
->ip_instance
, chunk_ib
->ring
,
641 if (ring
->funcs
->parse_cs
) {
642 struct amdgpu_bo_va_mapping
*m
;
643 struct amdgpu_bo
*aobj
= NULL
;
647 m
= amdgpu_cs_find_mapping(parser
, chunk_ib
->va_start
,
650 DRM_ERROR("IB va_start is invalid\n");
654 if ((chunk_ib
->va_start
+ chunk_ib
->ib_bytes
) >
655 (m
->it
.last
+ 1) * AMDGPU_GPU_PAGE_SIZE
) {
656 DRM_ERROR("IB va_start+ib_bytes is invalid\n");
660 /* the IB should be reserved at this point */
661 r
= amdgpu_bo_kmap(aobj
, (void **)&kptr
);
666 offset
= ((uint64_t)m
->it
.start
) * AMDGPU_GPU_PAGE_SIZE
;
667 kptr
+= chunk_ib
->va_start
- offset
;
669 r
= amdgpu_ib_get(ring
, NULL
, chunk_ib
->ib_bytes
, ib
);
671 DRM_ERROR("Failed to get ib !\n");
675 memcpy(ib
->ptr
, kptr
, chunk_ib
->ib_bytes
);
676 amdgpu_bo_kunmap(aobj
);
678 r
= amdgpu_ib_get(ring
, vm
, 0, ib
);
680 DRM_ERROR("Failed to get ib !\n");
684 ib
->gpu_addr
= chunk_ib
->va_start
;
687 ib
->length_dw
= chunk_ib
->ib_bytes
/ 4;
688 ib
->flags
= chunk_ib
->flags
;
689 ib
->ctx
= parser
->ctx
;
693 if (!parser
->num_ibs
)
696 /* add GDS resources to first IB */
697 if (parser
->bo_list
) {
698 struct amdgpu_bo
*gds
= parser
->bo_list
->gds_obj
;
699 struct amdgpu_bo
*gws
= parser
->bo_list
->gws_obj
;
700 struct amdgpu_bo
*oa
= parser
->bo_list
->oa_obj
;
701 struct amdgpu_ib
*ib
= &parser
->ibs
[0];
704 ib
->gds_base
= amdgpu_bo_gpu_offset(gds
);
705 ib
->gds_size
= amdgpu_bo_size(gds
);
708 ib
->gws_base
= amdgpu_bo_gpu_offset(gws
);
709 ib
->gws_size
= amdgpu_bo_size(gws
);
712 ib
->oa_base
= amdgpu_bo_gpu_offset(oa
);
713 ib
->oa_size
= amdgpu_bo_size(oa
);
716 /* wrap the last IB with user fence */
718 struct amdgpu_ib
*ib
= &parser
->ibs
[parser
->num_ibs
- 1];
720 /* UVD & VCE fw doesn't support user fences */
721 if (ib
->ring
->type
== AMDGPU_RING_TYPE_UVD
||
722 ib
->ring
->type
== AMDGPU_RING_TYPE_VCE
)
725 ib
->user
= &parser
->uf
;
731 static int amdgpu_cs_dependencies(struct amdgpu_device
*adev
,
732 struct amdgpu_cs_parser
*p
)
734 struct amdgpu_fpriv
*fpriv
= p
->filp
->driver_priv
;
735 struct amdgpu_ib
*ib
;
741 /* Add dependencies to first IB */
743 for (i
= 0; i
< p
->nchunks
; ++i
) {
744 struct drm_amdgpu_cs_chunk_dep
*deps
;
745 struct amdgpu_cs_chunk
*chunk
;
748 chunk
= &p
->chunks
[i
];
750 if (chunk
->chunk_id
!= AMDGPU_CHUNK_ID_DEPENDENCIES
)
753 deps
= (struct drm_amdgpu_cs_chunk_dep
*)chunk
->kdata
;
754 num_deps
= chunk
->length_dw
* 4 /
755 sizeof(struct drm_amdgpu_cs_chunk_dep
);
757 for (j
= 0; j
< num_deps
; ++j
) {
758 struct amdgpu_ring
*ring
;
759 struct amdgpu_ctx
*ctx
;
762 r
= amdgpu_cs_get_ring(adev
, deps
[j
].ip_type
,
764 deps
[j
].ring
, &ring
);
768 ctx
= amdgpu_ctx_get(fpriv
, deps
[j
].ctx_id
);
772 fence
= amdgpu_ctx_get_fence(ctx
, ring
,
780 r
= amdgpu_sync_fence(adev
, &ib
->sync
, fence
);
792 static int amdgpu_cs_parser_prepare_job(struct amdgpu_cs_parser
*sched_job
)
795 struct amdgpu_cs_parser
*parser
= sched_job
;
796 struct amdgpu_device
*adev
= sched_job
->adev
;
797 bool reserved_buffers
= false;
799 r
= amdgpu_cs_parser_relocs(parser
);
801 if (r
!= -ERESTARTSYS
) {
803 DRM_ERROR("Not enough memory for command submission!\n");
805 DRM_ERROR("Failed to process the buffer list %d!\n", r
);
810 reserved_buffers
= true;
811 r
= amdgpu_cs_ib_fill(adev
, parser
);
814 r
= amdgpu_cs_dependencies(adev
, parser
);
816 DRM_ERROR("Failed in the dependencies handling %d!\n", r
);
819 amdgpu_cs_parser_fini(parser
, r
, reserved_buffers
);
823 for (i
= 0; i
< parser
->num_ibs
; i
++)
824 trace_amdgpu_cs(parser
, i
);
826 r
= amdgpu_cs_ib_vm_chunk(adev
, parser
);
830 static struct amdgpu_ring
*amdgpu_cs_parser_get_ring(
831 struct amdgpu_device
*adev
,
832 struct amdgpu_cs_parser
*parser
)
836 struct amdgpu_cs_chunk
*chunk
;
837 struct drm_amdgpu_cs_chunk_ib
*chunk_ib
;
838 struct amdgpu_ring
*ring
;
839 for (i
= 0; i
< parser
->nchunks
; i
++) {
840 chunk
= &parser
->chunks
[i
];
841 chunk_ib
= (struct drm_amdgpu_cs_chunk_ib
*)chunk
->kdata
;
843 if (chunk
->chunk_id
!= AMDGPU_CHUNK_ID_IB
)
846 r
= amdgpu_cs_get_ring(adev
, chunk_ib
->ip_type
,
847 chunk_ib
->ip_instance
, chunk_ib
->ring
,
856 static int amdgpu_cs_free_job(struct amdgpu_job
*sched_job
)
859 amdgpu_ctx_put(sched_job
->ctx
);
861 for (i
= 0; i
< sched_job
->num_ibs
; i
++)
862 amdgpu_ib_free(sched_job
->adev
, &sched_job
->ibs
[i
]);
863 kfree(sched_job
->ibs
);
864 if (sched_job
->uf
.bo
)
865 drm_gem_object_unreference_unlocked(&sched_job
->uf
.bo
->gem_base
);
869 int amdgpu_cs_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*filp
)
871 struct amdgpu_device
*adev
= dev
->dev_private
;
872 union drm_amdgpu_cs
*cs
= data
;
873 struct amdgpu_cs_parser
*parser
;
876 down_read(&adev
->exclusive_lock
);
877 if (!adev
->accel_working
) {
878 up_read(&adev
->exclusive_lock
);
882 parser
= amdgpu_cs_parser_create(adev
, filp
, NULL
, NULL
, 0);
885 r
= amdgpu_cs_parser_init(parser
, data
);
887 DRM_ERROR("Failed to initialize parser !\n");
888 amdgpu_cs_parser_fini(parser
, r
, false);
889 up_read(&adev
->exclusive_lock
);
890 r
= amdgpu_cs_handle_lockup(adev
, r
);
894 r
= amdgpu_cs_parser_prepare_job(parser
);
898 if (amdgpu_enable_scheduler
&& parser
->num_ibs
) {
899 struct amdgpu_job
*job
;
900 struct amdgpu_ring
* ring
=
901 amdgpu_cs_parser_get_ring(adev
, parser
);
902 job
= kzalloc(sizeof(struct amdgpu_job
), GFP_KERNEL
);
905 job
->base
.sched
= ring
->scheduler
;
906 job
->base
.s_entity
= &parser
->ctx
->rings
[ring
->idx
].entity
;
907 job
->adev
= parser
->adev
;
908 job
->ibs
= parser
->ibs
;
909 job
->num_ibs
= parser
->num_ibs
;
910 job
->owner
= parser
->filp
;
911 job
->ctx
= amdgpu_ctx_get_ref(parser
->ctx
);
912 mutex_init(&job
->job_lock
);
913 if (job
->ibs
[job
->num_ibs
- 1].user
) {
914 memcpy(&job
->uf
, &parser
->uf
,
915 sizeof(struct amdgpu_user_fence
));
916 job
->ibs
[job
->num_ibs
- 1].user
= &job
->uf
;
919 job
->free_job
= amdgpu_cs_free_job
;
920 mutex_lock(&job
->job_lock
);
921 r
= amd_sched_push_job((struct amd_sched_job
*)job
);
923 mutex_unlock(&job
->job_lock
);
924 amdgpu_cs_free_job(job
);
928 job
->ibs
[parser
->num_ibs
- 1].sequence
=
929 amdgpu_ctx_add_fence(job
->ctx
, ring
,
930 &job
->base
.s_fence
->base
,
931 job
->base
.s_fence
->v_seq
);
932 cs
->out
.handle
= job
->base
.s_fence
->v_seq
;
933 list_sort(NULL
, &parser
->validated
, cmp_size_smaller_first
);
934 ttm_eu_fence_buffer_objects(&parser
->ticket
,
936 &job
->base
.s_fence
->base
);
938 mutex_unlock(&job
->job_lock
);
939 amdgpu_cs_parser_fini_late(parser
);
940 up_read(&adev
->exclusive_lock
);
944 cs
->out
.handle
= parser
->ibs
[parser
->num_ibs
- 1].sequence
;
946 amdgpu_cs_parser_fini(parser
, r
, true);
947 up_read(&adev
->exclusive_lock
);
948 r
= amdgpu_cs_handle_lockup(adev
, r
);
953 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
956 * @data: data from userspace
957 * @filp: file private
959 * Wait for the command submission identified by handle to finish.
961 int amdgpu_cs_wait_ioctl(struct drm_device
*dev
, void *data
,
962 struct drm_file
*filp
)
964 union drm_amdgpu_wait_cs
*wait
= data
;
965 struct amdgpu_device
*adev
= dev
->dev_private
;
966 unsigned long timeout
= amdgpu_gem_timeout(wait
->in
.timeout
);
967 struct amdgpu_ring
*ring
= NULL
;
968 struct amdgpu_ctx
*ctx
;
972 r
= amdgpu_cs_get_ring(adev
, wait
->in
.ip_type
, wait
->in
.ip_instance
,
973 wait
->in
.ring
, &ring
);
977 ctx
= amdgpu_ctx_get(filp
->driver_priv
, wait
->in
.ctx_id
);
981 fence
= amdgpu_ctx_get_fence(ctx
, ring
, wait
->in
.handle
);
985 r
= fence_wait_timeout(fence
, true, timeout
);
994 memset(wait
, 0, sizeof(*wait
));
995 wait
->out
.status
= (r
== 0);
1001 * amdgpu_cs_find_bo_va - find bo_va for VM address
1003 * @parser: command submission parser context
1005 * @bo: resulting BO of the mapping found
1007 * Search the buffer objects in the command submission context for a certain
1008 * virtual memory address. Returns allocation structure when found, NULL
1011 struct amdgpu_bo_va_mapping
*
1012 amdgpu_cs_find_mapping(struct amdgpu_cs_parser
*parser
,
1013 uint64_t addr
, struct amdgpu_bo
**bo
)
1015 struct amdgpu_bo_list_entry
*reloc
;
1016 struct amdgpu_bo_va_mapping
*mapping
;
1018 addr
/= AMDGPU_GPU_PAGE_SIZE
;
1020 list_for_each_entry(reloc
, &parser
->validated
, tv
.head
) {
1024 list_for_each_entry(mapping
, &reloc
->bo_va
->valids
, list
) {
1025 if (mapping
->it
.start
> addr
||
1026 addr
> mapping
->it
.last
)
1029 *bo
= reloc
->bo_va
->bo
;
1033 list_for_each_entry(mapping
, &reloc
->bo_va
->invalids
, list
) {
1034 if (mapping
->it
.start
> addr
||
1035 addr
> mapping
->it
.last
)
1038 *bo
= reloc
->bo_va
->bo
;