2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
28 #include "gpu_scheduler.h"
30 #define CREATE_TRACE_POINTS
31 #include "gpu_sched_trace.h"
33 static struct amd_sched_job
*
34 amd_sched_entity_pop_job(struct amd_sched_entity
*entity
);
35 static void amd_sched_wakeup(struct amd_gpu_scheduler
*sched
);
37 struct kmem_cache
*sched_fence_slab
;
38 atomic_t sched_fence_slab_ref
= ATOMIC_INIT(0);
40 /* Initialize a given run queue struct */
41 static void amd_sched_rq_init(struct amd_sched_rq
*rq
)
43 spin_lock_init(&rq
->lock
);
44 INIT_LIST_HEAD(&rq
->entities
);
45 rq
->current_entity
= NULL
;
48 static void amd_sched_rq_add_entity(struct amd_sched_rq
*rq
,
49 struct amd_sched_entity
*entity
)
52 list_add_tail(&entity
->list
, &rq
->entities
);
53 spin_unlock(&rq
->lock
);
56 static void amd_sched_rq_remove_entity(struct amd_sched_rq
*rq
,
57 struct amd_sched_entity
*entity
)
60 list_del_init(&entity
->list
);
61 if (rq
->current_entity
== entity
)
62 rq
->current_entity
= NULL
;
63 spin_unlock(&rq
->lock
);
67 * Select next job from a specified run queue with round robin policy.
68 * Return NULL if nothing available.
70 static struct amd_sched_job
*
71 amd_sched_rq_select_job(struct amd_sched_rq
*rq
)
73 struct amd_sched_entity
*entity
;
74 struct amd_sched_job
*sched_job
;
78 entity
= rq
->current_entity
;
80 list_for_each_entry_continue(entity
, &rq
->entities
, list
) {
81 sched_job
= amd_sched_entity_pop_job(entity
);
83 rq
->current_entity
= entity
;
84 spin_unlock(&rq
->lock
);
90 list_for_each_entry(entity
, &rq
->entities
, list
) {
92 sched_job
= amd_sched_entity_pop_job(entity
);
94 rq
->current_entity
= entity
;
95 spin_unlock(&rq
->lock
);
99 if (entity
== rq
->current_entity
)
103 spin_unlock(&rq
->lock
);
109 * Init a context entity used by scheduler when submit to HW ring.
111 * @sched The pointer to the scheduler
112 * @entity The pointer to a valid amd_sched_entity
113 * @rq The run queue this entity belongs
114 * @kernel If this is an entity for the kernel
115 * @jobs The max number of jobs in the job queue
117 * return 0 if succeed. negative error code on failure
119 int amd_sched_entity_init(struct amd_gpu_scheduler
*sched
,
120 struct amd_sched_entity
*entity
,
121 struct amd_sched_rq
*rq
,
126 if (!(sched
&& entity
&& rq
))
129 memset(entity
, 0, sizeof(struct amd_sched_entity
));
130 INIT_LIST_HEAD(&entity
->list
);
132 entity
->sched
= sched
;
134 spin_lock_init(&entity
->queue_lock
);
135 r
= kfifo_alloc(&entity
->job_queue
, jobs
* sizeof(void *), GFP_KERNEL
);
139 atomic_set(&entity
->fence_seq
, 0);
140 entity
->fence_context
= fence_context_alloc(1);
142 /* Add the entity to the run queue */
143 amd_sched_rq_add_entity(rq
, entity
);
149 * Query if entity is initialized
151 * @sched Pointer to scheduler instance
152 * @entity The pointer to a valid scheduler entity
154 * return true if entity is initialized, false otherwise
156 static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler
*sched
,
157 struct amd_sched_entity
*entity
)
159 return entity
->sched
== sched
&&
164 * Check if entity is idle
166 * @entity The pointer to a valid scheduler entity
168 * Return true if entity don't has any unscheduled jobs.
170 static bool amd_sched_entity_is_idle(struct amd_sched_entity
*entity
)
173 if (kfifo_is_empty(&entity
->job_queue
))
180 * Destroy a context entity
182 * @sched Pointer to scheduler instance
183 * @entity The pointer to a valid scheduler entity
185 * Cleanup and free the allocated resources.
187 void amd_sched_entity_fini(struct amd_gpu_scheduler
*sched
,
188 struct amd_sched_entity
*entity
)
190 struct amd_sched_rq
*rq
= entity
->rq
;
192 if (!amd_sched_entity_is_initialized(sched
, entity
))
196 * The client will not queue more IBs during this fini, consume existing
199 wait_event(sched
->job_scheduled
, amd_sched_entity_is_idle(entity
));
201 amd_sched_rq_remove_entity(rq
, entity
);
202 kfifo_free(&entity
->job_queue
);
205 static void amd_sched_entity_wakeup(struct fence
*f
, struct fence_cb
*cb
)
207 struct amd_sched_entity
*entity
=
208 container_of(cb
, struct amd_sched_entity
, cb
);
209 entity
->dependency
= NULL
;
211 amd_sched_wakeup(entity
->sched
);
214 static struct amd_sched_job
*
215 amd_sched_entity_pop_job(struct amd_sched_entity
*entity
)
217 struct amd_gpu_scheduler
*sched
= entity
->sched
;
218 struct amd_sched_job
*sched_job
;
220 if (ACCESS_ONCE(entity
->dependency
))
223 if (!kfifo_out_peek(&entity
->job_queue
, &sched_job
, sizeof(sched_job
)))
226 while ((entity
->dependency
= sched
->ops
->dependency(sched_job
))) {
228 if (entity
->dependency
->context
== entity
->fence_context
) {
229 /* We can ignore fences from ourself */
230 fence_put(entity
->dependency
);
234 if (fence_add_callback(entity
->dependency
, &entity
->cb
,
235 amd_sched_entity_wakeup
))
236 fence_put(entity
->dependency
);
245 * Helper to submit a job to the job queue
247 * @sched_job The pointer to job required to submit
249 * Returns true if we could submit the job.
251 static bool amd_sched_entity_in(struct amd_sched_job
*sched_job
)
253 struct amd_sched_entity
*entity
= sched_job
->s_entity
;
254 bool added
, first
= false;
256 spin_lock(&entity
->queue_lock
);
257 added
= kfifo_in(&entity
->job_queue
, &sched_job
,
258 sizeof(sched_job
)) == sizeof(sched_job
);
260 if (added
&& kfifo_len(&entity
->job_queue
) == sizeof(sched_job
))
263 spin_unlock(&entity
->queue_lock
);
265 /* first job wakes up scheduler */
267 amd_sched_wakeup(sched_job
->sched
);
273 * Submit a job to the job queue
275 * @sched_job The pointer to job required to submit
277 * Returns 0 for success, negative error code otherwise.
279 int amd_sched_entity_push_job(struct amd_sched_job
*sched_job
)
281 struct amd_sched_entity
*entity
= sched_job
->s_entity
;
282 struct amd_sched_fence
*fence
= amd_sched_fence_create(
283 entity
, sched_job
->owner
);
288 fence_get(&fence
->base
);
289 sched_job
->s_fence
= fence
;
291 wait_event(entity
->sched
->job_scheduled
,
292 amd_sched_entity_in(sched_job
));
293 trace_amd_sched_job(sched_job
);
298 * Return ture if we can push more jobs to the hw.
300 static bool amd_sched_ready(struct amd_gpu_scheduler
*sched
)
302 return atomic_read(&sched
->hw_rq_count
) <
303 sched
->hw_submission_limit
;
307 * Wake up the scheduler when it is ready
309 static void amd_sched_wakeup(struct amd_gpu_scheduler
*sched
)
311 if (amd_sched_ready(sched
))
312 wake_up_interruptible(&sched
->wake_up_worker
);
318 static struct amd_sched_job
*
319 amd_sched_select_job(struct amd_gpu_scheduler
*sched
)
321 struct amd_sched_job
*sched_job
;
323 if (!amd_sched_ready(sched
))
326 /* Kernel run queue has higher priority than normal run queue*/
327 sched_job
= amd_sched_rq_select_job(&sched
->kernel_rq
);
328 if (sched_job
== NULL
)
329 sched_job
= amd_sched_rq_select_job(&sched
->sched_rq
);
334 static void amd_sched_process_job(struct fence
*f
, struct fence_cb
*cb
)
336 struct amd_sched_fence
*s_fence
=
337 container_of(cb
, struct amd_sched_fence
, cb
);
338 struct amd_gpu_scheduler
*sched
= s_fence
->sched
;
341 atomic_dec(&sched
->hw_rq_count
);
342 amd_sched_fence_signal(s_fence
);
343 if (sched
->timeout
!= MAX_SCHEDULE_TIMEOUT
) {
344 cancel_delayed_work(&s_fence
->dwork
);
345 spin_lock_irqsave(&sched
->fence_list_lock
, flags
);
346 list_del_init(&s_fence
->list
);
347 spin_unlock_irqrestore(&sched
->fence_list_lock
, flags
);
349 fence_put(&s_fence
->base
);
350 wake_up_interruptible(&sched
->wake_up_worker
);
353 static void amd_sched_fence_work_func(struct work_struct
*work
)
355 struct amd_sched_fence
*s_fence
=
356 container_of(work
, struct amd_sched_fence
, dwork
.work
);
357 struct amd_gpu_scheduler
*sched
= s_fence
->sched
;
358 struct amd_sched_fence
*entity
, *tmp
;
361 DRM_ERROR("[%s] scheduler is timeout!\n", sched
->name
);
363 /* Clean all pending fences */
364 spin_lock_irqsave(&sched
->fence_list_lock
, flags
);
365 list_for_each_entry_safe(entity
, tmp
, &sched
->fence_list
, list
) {
366 DRM_ERROR(" fence no %d\n", entity
->base
.seqno
);
367 cancel_delayed_work(&entity
->dwork
);
368 list_del_init(&entity
->list
);
369 fence_put(&entity
->base
);
371 spin_unlock_irqrestore(&sched
->fence_list_lock
, flags
);
374 static int amd_sched_main(void *param
)
376 struct sched_param sparam
= {.sched_priority
= 1};
377 struct amd_gpu_scheduler
*sched
= (struct amd_gpu_scheduler
*)param
;
380 spin_lock_init(&sched
->fence_list_lock
);
381 INIT_LIST_HEAD(&sched
->fence_list
);
382 sched_setscheduler(current
, SCHED_FIFO
, &sparam
);
384 while (!kthread_should_stop()) {
385 struct amd_sched_entity
*entity
;
386 struct amd_sched_fence
*s_fence
;
387 struct amd_sched_job
*sched_job
;
391 wait_event_interruptible(sched
->wake_up_worker
,
392 kthread_should_stop() ||
393 (sched_job
= amd_sched_select_job(sched
)));
398 entity
= sched_job
->s_entity
;
399 s_fence
= sched_job
->s_fence
;
401 if (sched
->timeout
!= MAX_SCHEDULE_TIMEOUT
) {
402 INIT_DELAYED_WORK(&s_fence
->dwork
, amd_sched_fence_work_func
);
403 schedule_delayed_work(&s_fence
->dwork
, sched
->timeout
);
404 spin_lock_irqsave(&sched
->fence_list_lock
, flags
);
405 list_add_tail(&s_fence
->list
, &sched
->fence_list
);
406 spin_unlock_irqrestore(&sched
->fence_list_lock
, flags
);
409 atomic_inc(&sched
->hw_rq_count
);
410 fence
= sched
->ops
->run_job(sched_job
);
412 r
= fence_add_callback(fence
, &s_fence
->cb
,
413 amd_sched_process_job
);
415 amd_sched_process_job(fence
, &s_fence
->cb
);
417 DRM_ERROR("fence add callback failed (%d)\n", r
);
420 DRM_ERROR("Failed to run job!\n");
421 amd_sched_process_job(NULL
, &s_fence
->cb
);
424 count
= kfifo_out(&entity
->job_queue
, &sched_job
,
426 WARN_ON(count
!= sizeof(sched_job
));
427 wake_up(&sched
->job_scheduled
);
433 * Init a gpu scheduler instance
435 * @sched The pointer to the scheduler
436 * @ops The backend operations for this scheduler.
437 * @hw_submissions Number of hw submissions to do.
438 * @name Name used for debugging
440 * Return 0 on success, otherwise error code.
442 int amd_sched_init(struct amd_gpu_scheduler
*sched
,
443 struct amd_sched_backend_ops
*ops
,
444 unsigned hw_submission
, long timeout
, const char *name
)
447 sched
->hw_submission_limit
= hw_submission
;
449 sched
->timeout
= timeout
;
450 amd_sched_rq_init(&sched
->sched_rq
);
451 amd_sched_rq_init(&sched
->kernel_rq
);
453 init_waitqueue_head(&sched
->wake_up_worker
);
454 init_waitqueue_head(&sched
->job_scheduled
);
455 atomic_set(&sched
->hw_rq_count
, 0);
456 if (atomic_inc_return(&sched_fence_slab_ref
) == 1) {
457 sched_fence_slab
= kmem_cache_create(
458 "amd_sched_fence", sizeof(struct amd_sched_fence
), 0,
459 SLAB_HWCACHE_ALIGN
, NULL
);
460 if (!sched_fence_slab
)
464 /* Each scheduler will run on a seperate kernel thread */
465 sched
->thread
= kthread_run(amd_sched_main
, sched
, sched
->name
);
466 if (IS_ERR(sched
->thread
)) {
467 DRM_ERROR("Failed to create scheduler for %s.\n", name
);
468 return PTR_ERR(sched
->thread
);
475 * Destroy a gpu scheduler
477 * @sched The pointer to the scheduler
479 void amd_sched_fini(struct amd_gpu_scheduler
*sched
)
482 kthread_stop(sched
->thread
);
483 if (atomic_dec_and_test(&sched_fence_slab_ref
))
484 kmem_cache_destroy(sched_fence_slab
);