2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
28 #include "gpu_scheduler.h"
30 #define CREATE_TRACE_POINTS
31 #include "gpu_sched_trace.h"
33 static bool amd_sched_entity_is_ready(struct amd_sched_entity
*entity
);
34 static void amd_sched_wakeup(struct amd_gpu_scheduler
*sched
);
35 static void amd_sched_process_job(struct fence
*f
, struct fence_cb
*cb
);
37 struct kmem_cache
*sched_fence_slab
;
38 atomic_t sched_fence_slab_ref
= ATOMIC_INIT(0);
40 /* Initialize a given run queue struct */
41 static void amd_sched_rq_init(struct amd_sched_rq
*rq
)
43 spin_lock_init(&rq
->lock
);
44 INIT_LIST_HEAD(&rq
->entities
);
45 rq
->current_entity
= NULL
;
48 static void amd_sched_rq_add_entity(struct amd_sched_rq
*rq
,
49 struct amd_sched_entity
*entity
)
51 if (!list_empty(&entity
->list
))
54 list_add_tail(&entity
->list
, &rq
->entities
);
55 spin_unlock(&rq
->lock
);
58 static void amd_sched_rq_remove_entity(struct amd_sched_rq
*rq
,
59 struct amd_sched_entity
*entity
)
61 if (list_empty(&entity
->list
))
64 list_del_init(&entity
->list
);
65 if (rq
->current_entity
== entity
)
66 rq
->current_entity
= NULL
;
67 spin_unlock(&rq
->lock
);
71 * Select an entity which could provide a job to run
73 * @rq The run queue to check.
75 * Try to find a ready entity, returns NULL if none found.
77 static struct amd_sched_entity
*
78 amd_sched_rq_select_entity(struct amd_sched_rq
*rq
)
80 struct amd_sched_entity
*entity
;
84 entity
= rq
->current_entity
;
86 list_for_each_entry_continue(entity
, &rq
->entities
, list
) {
87 if (amd_sched_entity_is_ready(entity
)) {
88 rq
->current_entity
= entity
;
89 spin_unlock(&rq
->lock
);
95 list_for_each_entry(entity
, &rq
->entities
, list
) {
97 if (amd_sched_entity_is_ready(entity
)) {
98 rq
->current_entity
= entity
;
99 spin_unlock(&rq
->lock
);
103 if (entity
== rq
->current_entity
)
107 spin_unlock(&rq
->lock
);
113 * Init a context entity used by scheduler when submit to HW ring.
115 * @sched The pointer to the scheduler
116 * @entity The pointer to a valid amd_sched_entity
117 * @rq The run queue this entity belongs
118 * @kernel If this is an entity for the kernel
119 * @jobs The max number of jobs in the job queue
121 * return 0 if succeed. negative error code on failure
123 int amd_sched_entity_init(struct amd_gpu_scheduler
*sched
,
124 struct amd_sched_entity
*entity
,
125 struct amd_sched_rq
*rq
,
130 if (!(sched
&& entity
&& rq
))
133 memset(entity
, 0, sizeof(struct amd_sched_entity
));
134 INIT_LIST_HEAD(&entity
->list
);
136 entity
->sched
= sched
;
138 spin_lock_init(&entity
->queue_lock
);
139 r
= kfifo_alloc(&entity
->job_queue
, jobs
* sizeof(void *), GFP_KERNEL
);
143 atomic_set(&entity
->fence_seq
, 0);
144 entity
->fence_context
= fence_context_alloc(2);
150 * Query if entity is initialized
152 * @sched Pointer to scheduler instance
153 * @entity The pointer to a valid scheduler entity
155 * return true if entity is initialized, false otherwise
157 static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler
*sched
,
158 struct amd_sched_entity
*entity
)
160 return entity
->sched
== sched
&&
165 * Check if entity is idle
167 * @entity The pointer to a valid scheduler entity
169 * Return true if entity don't has any unscheduled jobs.
171 static bool amd_sched_entity_is_idle(struct amd_sched_entity
*entity
)
174 if (kfifo_is_empty(&entity
->job_queue
))
181 * Check if entity is ready
183 * @entity The pointer to a valid scheduler entity
185 * Return true if entity could provide a job.
187 static bool amd_sched_entity_is_ready(struct amd_sched_entity
*entity
)
189 if (kfifo_is_empty(&entity
->job_queue
))
192 if (ACCESS_ONCE(entity
->dependency
))
199 * Destroy a context entity
201 * @sched Pointer to scheduler instance
202 * @entity The pointer to a valid scheduler entity
204 * Cleanup and free the allocated resources.
206 void amd_sched_entity_fini(struct amd_gpu_scheduler
*sched
,
207 struct amd_sched_entity
*entity
)
209 struct amd_sched_rq
*rq
= entity
->rq
;
211 if (!amd_sched_entity_is_initialized(sched
, entity
))
215 * The client will not queue more IBs during this fini, consume existing
218 wait_event(sched
->job_scheduled
, amd_sched_entity_is_idle(entity
));
220 amd_sched_rq_remove_entity(rq
, entity
);
221 kfifo_free(&entity
->job_queue
);
224 static void amd_sched_entity_wakeup(struct fence
*f
, struct fence_cb
*cb
)
226 struct amd_sched_entity
*entity
=
227 container_of(cb
, struct amd_sched_entity
, cb
);
228 entity
->dependency
= NULL
;
230 amd_sched_wakeup(entity
->sched
);
233 static void amd_sched_entity_clear_dep(struct fence
*f
, struct fence_cb
*cb
)
235 struct amd_sched_entity
*entity
=
236 container_of(cb
, struct amd_sched_entity
, cb
);
237 entity
->dependency
= NULL
;
241 static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity
*entity
)
243 struct amd_gpu_scheduler
*sched
= entity
->sched
;
244 struct fence
* fence
= entity
->dependency
;
245 struct amd_sched_fence
*s_fence
;
247 if (fence
->context
== entity
->fence_context
) {
248 /* We can ignore fences from ourself */
249 fence_put(entity
->dependency
);
253 s_fence
= to_amd_sched_fence(fence
);
254 if (s_fence
&& s_fence
->sched
== sched
) {
257 * Fence is from the same scheduler, only need to wait for
260 fence
= fence_get(&s_fence
->scheduled
);
261 fence_put(entity
->dependency
);
262 entity
->dependency
= fence
;
263 if (!fence_add_callback(fence
, &entity
->cb
,
264 amd_sched_entity_clear_dep
))
267 /* Ignore it when it is already scheduled */
272 if (!fence_add_callback(entity
->dependency
, &entity
->cb
,
273 amd_sched_entity_wakeup
))
276 fence_put(entity
->dependency
);
280 static struct amd_sched_job
*
281 amd_sched_entity_pop_job(struct amd_sched_entity
*entity
)
283 struct amd_gpu_scheduler
*sched
= entity
->sched
;
284 struct amd_sched_job
*sched_job
;
286 if (!kfifo_out_peek(&entity
->job_queue
, &sched_job
, sizeof(sched_job
)))
289 while ((entity
->dependency
= sched
->ops
->dependency(sched_job
)))
290 if (amd_sched_entity_add_dependency_cb(entity
))
297 * Helper to submit a job to the job queue
299 * @sched_job The pointer to job required to submit
301 * Returns true if we could submit the job.
303 static bool amd_sched_entity_in(struct amd_sched_job
*sched_job
)
305 struct amd_gpu_scheduler
*sched
= sched_job
->sched
;
306 struct amd_sched_entity
*entity
= sched_job
->s_entity
;
307 bool added
, first
= false;
309 spin_lock(&entity
->queue_lock
);
310 added
= kfifo_in(&entity
->job_queue
, &sched_job
,
311 sizeof(sched_job
)) == sizeof(sched_job
);
313 if (added
&& kfifo_len(&entity
->job_queue
) == sizeof(sched_job
))
316 spin_unlock(&entity
->queue_lock
);
318 /* first job wakes up scheduler */
320 /* Add the entity to the run queue */
321 amd_sched_rq_add_entity(entity
->rq
, entity
);
322 amd_sched_wakeup(sched
);
327 /* job_finish is called after hw fence signaled, and
328 * the job had already been deleted from ring_mirror_list
330 static void amd_sched_job_finish(struct work_struct
*work
)
332 struct amd_sched_job
*s_job
= container_of(work
, struct amd_sched_job
,
334 struct amd_gpu_scheduler
*sched
= s_job
->sched
;
336 /* remove job from ring_mirror_list */
337 spin_lock(&sched
->job_list_lock
);
338 list_del_init(&s_job
->node
);
339 if (sched
->timeout
!= MAX_SCHEDULE_TIMEOUT
) {
340 struct amd_sched_job
*next
;
342 spin_unlock(&sched
->job_list_lock
);
343 cancel_delayed_work_sync(&s_job
->work_tdr
);
344 spin_lock(&sched
->job_list_lock
);
346 /* queue TDR for next job */
347 next
= list_first_entry_or_null(&sched
->ring_mirror_list
,
348 struct amd_sched_job
, node
);
351 schedule_delayed_work(&next
->work_tdr
, sched
->timeout
);
353 spin_unlock(&sched
->job_list_lock
);
354 sched
->ops
->free_job(s_job
);
357 static void amd_sched_job_finish_cb(struct fence
*f
, struct fence_cb
*cb
)
359 struct amd_sched_job
*job
= container_of(cb
, struct amd_sched_job
,
361 schedule_work(&job
->finish_work
);
364 static void amd_sched_job_begin(struct amd_sched_job
*s_job
)
366 struct amd_gpu_scheduler
*sched
= s_job
->sched
;
368 spin_lock(&sched
->job_list_lock
);
369 list_add_tail(&s_job
->node
, &sched
->ring_mirror_list
);
370 if (sched
->timeout
!= MAX_SCHEDULE_TIMEOUT
&&
371 list_first_entry_or_null(&sched
->ring_mirror_list
,
372 struct amd_sched_job
, node
) == s_job
)
373 schedule_delayed_work(&s_job
->work_tdr
, sched
->timeout
);
374 spin_unlock(&sched
->job_list_lock
);
377 static void amd_sched_job_timedout(struct work_struct
*work
)
379 struct amd_sched_job
*job
= container_of(work
, struct amd_sched_job
,
382 job
->sched
->ops
->timedout_job(job
);
385 void amd_sched_hw_job_reset(struct amd_gpu_scheduler
*sched
)
387 struct amd_sched_job
*s_job
;
389 spin_lock(&sched
->job_list_lock
);
390 list_for_each_entry_reverse(s_job
, &sched
->ring_mirror_list
, node
) {
391 if (fence_remove_callback(s_job
->s_fence
->parent
, &s_job
->s_fence
->cb
)) {
392 fence_put(s_job
->s_fence
->parent
);
393 s_job
->s_fence
->parent
= NULL
;
396 atomic_set(&sched
->hw_rq_count
, 0);
397 spin_unlock(&sched
->job_list_lock
);
400 void amd_sched_job_recovery(struct amd_gpu_scheduler
*sched
)
402 struct amd_sched_job
*s_job
, *tmp
;
405 spin_lock(&sched
->job_list_lock
);
406 s_job
= list_first_entry_or_null(&sched
->ring_mirror_list
,
407 struct amd_sched_job
, node
);
408 if (s_job
&& sched
->timeout
!= MAX_SCHEDULE_TIMEOUT
)
409 schedule_delayed_work(&s_job
->work_tdr
, sched
->timeout
);
411 list_for_each_entry_safe(s_job
, tmp
, &sched
->ring_mirror_list
, node
) {
412 struct amd_sched_fence
*s_fence
= s_job
->s_fence
;
415 spin_unlock(&sched
->job_list_lock
);
416 fence
= sched
->ops
->run_job(s_job
);
417 atomic_inc(&sched
->hw_rq_count
);
419 s_fence
->parent
= fence_get(fence
);
420 r
= fence_add_callback(fence
, &s_fence
->cb
,
421 amd_sched_process_job
);
423 amd_sched_process_job(fence
, &s_fence
->cb
);
425 DRM_ERROR("fence add callback failed (%d)\n",
429 DRM_ERROR("Failed to run job!\n");
430 amd_sched_process_job(NULL
, &s_fence
->cb
);
432 spin_lock(&sched
->job_list_lock
);
434 spin_unlock(&sched
->job_list_lock
);
438 * Submit a job to the job queue
440 * @sched_job The pointer to job required to submit
442 * Returns 0 for success, negative error code otherwise.
444 void amd_sched_entity_push_job(struct amd_sched_job
*sched_job
)
446 struct amd_sched_entity
*entity
= sched_job
->s_entity
;
448 trace_amd_sched_job(sched_job
);
449 fence_add_callback(&sched_job
->s_fence
->finished
, &sched_job
->finish_cb
,
450 amd_sched_job_finish_cb
);
451 wait_event(entity
->sched
->job_scheduled
,
452 amd_sched_entity_in(sched_job
));
455 /* init a sched_job with basic field */
456 int amd_sched_job_init(struct amd_sched_job
*job
,
457 struct amd_gpu_scheduler
*sched
,
458 struct amd_sched_entity
*entity
,
462 job
->s_entity
= entity
;
463 job
->s_fence
= amd_sched_fence_create(entity
, owner
);
467 INIT_WORK(&job
->finish_work
, amd_sched_job_finish
);
468 INIT_LIST_HEAD(&job
->node
);
469 INIT_DELAYED_WORK(&job
->work_tdr
, amd_sched_job_timedout
);
475 * Return ture if we can push more jobs to the hw.
477 static bool amd_sched_ready(struct amd_gpu_scheduler
*sched
)
479 return atomic_read(&sched
->hw_rq_count
) <
480 sched
->hw_submission_limit
;
484 * Wake up the scheduler when it is ready
486 static void amd_sched_wakeup(struct amd_gpu_scheduler
*sched
)
488 if (amd_sched_ready(sched
))
489 wake_up_interruptible(&sched
->wake_up_worker
);
493 * Select next entity to process
495 static struct amd_sched_entity
*
496 amd_sched_select_entity(struct amd_gpu_scheduler
*sched
)
498 struct amd_sched_entity
*entity
;
501 if (!amd_sched_ready(sched
))
504 /* Kernel run queue has higher priority than normal run queue*/
505 for (i
= 0; i
< AMD_SCHED_MAX_PRIORITY
; i
++) {
506 entity
= amd_sched_rq_select_entity(&sched
->sched_rq
[i
]);
514 static void amd_sched_process_job(struct fence
*f
, struct fence_cb
*cb
)
516 struct amd_sched_fence
*s_fence
=
517 container_of(cb
, struct amd_sched_fence
, cb
);
518 struct amd_gpu_scheduler
*sched
= s_fence
->sched
;
520 atomic_dec(&sched
->hw_rq_count
);
521 amd_sched_fence_finished(s_fence
);
523 trace_amd_sched_process_job(s_fence
);
524 fence_put(&s_fence
->finished
);
525 wake_up_interruptible(&sched
->wake_up_worker
);
528 static bool amd_sched_blocked(struct amd_gpu_scheduler
*sched
)
530 if (kthread_should_park()) {
538 static int amd_sched_main(void *param
)
540 struct sched_param sparam
= {.sched_priority
= 1};
541 struct amd_gpu_scheduler
*sched
= (struct amd_gpu_scheduler
*)param
;
544 sched_setscheduler(current
, SCHED_FIFO
, &sparam
);
546 while (!kthread_should_stop()) {
547 struct amd_sched_entity
*entity
= NULL
;
548 struct amd_sched_fence
*s_fence
;
549 struct amd_sched_job
*sched_job
;
552 wait_event_interruptible(sched
->wake_up_worker
,
553 (!amd_sched_blocked(sched
) &&
554 (entity
= amd_sched_select_entity(sched
))) ||
555 kthread_should_stop());
560 sched_job
= amd_sched_entity_pop_job(entity
);
564 s_fence
= sched_job
->s_fence
;
566 atomic_inc(&sched
->hw_rq_count
);
567 amd_sched_job_begin(sched_job
);
569 fence
= sched
->ops
->run_job(sched_job
);
570 amd_sched_fence_scheduled(s_fence
);
572 s_fence
->parent
= fence_get(fence
);
573 r
= fence_add_callback(fence
, &s_fence
->cb
,
574 amd_sched_process_job
);
576 amd_sched_process_job(fence
, &s_fence
->cb
);
578 DRM_ERROR("fence add callback failed (%d)\n",
582 DRM_ERROR("Failed to run job!\n");
583 amd_sched_process_job(NULL
, &s_fence
->cb
);
586 count
= kfifo_out(&entity
->job_queue
, &sched_job
,
588 WARN_ON(count
!= sizeof(sched_job
));
589 wake_up(&sched
->job_scheduled
);
595 * Init a gpu scheduler instance
597 * @sched The pointer to the scheduler
598 * @ops The backend operations for this scheduler.
599 * @hw_submissions Number of hw submissions to do.
600 * @name Name used for debugging
602 * Return 0 on success, otherwise error code.
604 int amd_sched_init(struct amd_gpu_scheduler
*sched
,
605 const struct amd_sched_backend_ops
*ops
,
606 unsigned hw_submission
, long timeout
, const char *name
)
610 sched
->hw_submission_limit
= hw_submission
;
612 sched
->timeout
= timeout
;
613 for (i
= 0; i
< AMD_SCHED_MAX_PRIORITY
; i
++)
614 amd_sched_rq_init(&sched
->sched_rq
[i
]);
616 init_waitqueue_head(&sched
->wake_up_worker
);
617 init_waitqueue_head(&sched
->job_scheduled
);
618 INIT_LIST_HEAD(&sched
->ring_mirror_list
);
619 spin_lock_init(&sched
->job_list_lock
);
620 atomic_set(&sched
->hw_rq_count
, 0);
621 if (atomic_inc_return(&sched_fence_slab_ref
) == 1) {
622 sched_fence_slab
= kmem_cache_create(
623 "amd_sched_fence", sizeof(struct amd_sched_fence
), 0,
624 SLAB_HWCACHE_ALIGN
, NULL
);
625 if (!sched_fence_slab
)
629 /* Each scheduler will run on a seperate kernel thread */
630 sched
->thread
= kthread_run(amd_sched_main
, sched
, sched
->name
);
631 if (IS_ERR(sched
->thread
)) {
632 DRM_ERROR("Failed to create scheduler for %s.\n", name
);
633 return PTR_ERR(sched
->thread
);
640 * Destroy a gpu scheduler
642 * @sched The pointer to the scheduler
644 void amd_sched_fini(struct amd_gpu_scheduler
*sched
)
647 kthread_stop(sched
->thread
);
648 if (atomic_dec_and_test(&sched_fence_slab_ref
))
649 kmem_cache_destroy(sched_fence_slab
);