2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
28 #include "gpu_scheduler.h"
30 /* Initialize a given run queue struct */
31 static void init_rq(struct amd_run_queue
*rq
)
33 INIT_LIST_HEAD(&rq
->head
.list
);
34 rq
->head
.belongto_rq
= rq
;
35 mutex_init(&rq
->lock
);
36 atomic_set(&rq
->nr_entity
, 0);
37 rq
->current_entity
= &rq
->head
;
40 /* Note: caller must hold the lock or in a atomic context */
41 static void rq_remove_entity(struct amd_run_queue
*rq
,
42 struct amd_sched_entity
*entity
)
44 if (rq
->current_entity
== entity
)
45 rq
->current_entity
= list_entry(entity
->list
.prev
,
46 typeof(*entity
), list
);
47 list_del_init(&entity
->list
);
48 atomic_dec(&rq
->nr_entity
);
51 static void rq_add_entity(struct amd_run_queue
*rq
,
52 struct amd_sched_entity
*entity
)
54 list_add_tail(&entity
->list
, &rq
->head
.list
);
55 atomic_inc(&rq
->nr_entity
);
59 * Select next entity from a specified run queue with round robin policy.
60 * It could return the same entity as current one if current is the only
61 * available one in the queue. Return NULL if nothing available.
63 static struct amd_sched_entity
*rq_select_entity(struct amd_run_queue
*rq
)
65 struct amd_sched_entity
*p
= rq
->current_entity
;
66 int i
= atomic_read(&rq
->nr_entity
) + 1; /*real count + dummy head*/
69 p
= list_entry(p
->list
.next
, typeof(*p
), list
);
70 if (!rq
->check_entity_status(p
)) {
71 rq
->current_entity
= p
;
79 static bool context_entity_is_waiting(struct amd_sched_entity
*entity
)
81 /* TODO: sync obj for multi-ring synchronization */
85 static int gpu_entity_check_status(struct amd_sched_entity
*entity
)
87 if (entity
== &entity
->belongto_rq
->head
)
90 if (kfifo_is_empty(&entity
->job_queue
) ||
91 context_entity_is_waiting(entity
))
98 * Note: This function should only been called inside scheduler main
99 * function for thread safety, there is no other protection here.
100 * return ture if scheduler has something ready to run.
102 * For active_hw_rq, there is only one producer(scheduler thread) and
103 * one consumer(ISR). It should be safe to use this function in scheduler
104 * main thread to decide whether to continue emit more IBs.
106 static bool is_scheduler_ready(struct amd_gpu_scheduler
*sched
)
111 spin_lock_irqsave(&sched
->queue_lock
, flags
);
112 full
= atomic64_read(&sched
->hw_rq_count
) <
113 sched
->hw_submission_limit
? true : false;
114 spin_unlock_irqrestore(&sched
->queue_lock
, flags
);
120 * Select next entity from the kernel run queue, if not available,
123 static struct amd_sched_entity
*
124 kernel_rq_select_context(struct amd_gpu_scheduler
*sched
)
126 struct amd_sched_entity
*sched_entity
;
127 struct amd_run_queue
*rq
= &sched
->kernel_rq
;
129 mutex_lock(&rq
->lock
);
130 sched_entity
= rq_select_entity(rq
);
131 mutex_unlock(&rq
->lock
);
136 * Select next entity containing real IB submissions
138 static struct amd_sched_entity
*
139 select_context(struct amd_gpu_scheduler
*sched
)
141 struct amd_sched_entity
*wake_entity
= NULL
;
142 struct amd_sched_entity
*tmp
;
143 struct amd_run_queue
*rq
;
145 if (!is_scheduler_ready(sched
))
148 /* Kernel run queue has higher priority than normal run queue*/
149 tmp
= kernel_rq_select_context(sched
);
153 rq
= &sched
->sched_rq
;
154 mutex_lock(&rq
->lock
);
155 tmp
= rq_select_entity(rq
);
156 mutex_unlock(&rq
->lock
);
158 if (sched
->current_entity
&& (sched
->current_entity
!= tmp
))
159 wake_entity
= sched
->current_entity
;
160 sched
->current_entity
= tmp
;
162 wake_up(&wake_entity
->wait_queue
);
167 * Init a context entity used by scheduler when submit to HW ring.
169 * @sched The pointer to the scheduler
170 * @entity The pointer to a valid amd_sched_entity
171 * @rq The run queue this entity belongs
172 * @kernel If this is an entity for the kernel
173 * @jobs The max number of jobs in the job queue
175 * return 0 if succeed. negative error code on failure
177 int amd_sched_entity_init(struct amd_gpu_scheduler
*sched
,
178 struct amd_sched_entity
*entity
,
179 struct amd_run_queue
*rq
,
182 uint64_t seq_ring
= 0;
185 if (!(sched
&& entity
&& rq
))
188 memset(entity
, 0, sizeof(struct amd_sched_entity
));
189 seq_ring
= ((uint64_t)sched
->ring_id
) << 60;
190 spin_lock_init(&entity
->lock
);
191 entity
->belongto_rq
= rq
;
192 entity
->scheduler
= sched
;
193 init_waitqueue_head(&entity
->wait_queue
);
194 init_waitqueue_head(&entity
->wait_emit
);
195 entity
->fence_context
= fence_context_alloc(1);
196 snprintf(name
, sizeof(name
), "c_entity[%llu]", entity
->fence_context
);
197 memcpy(entity
->name
, name
, 20);
198 if(kfifo_alloc(&entity
->job_queue
,
199 jobs
* sizeof(void *),
203 spin_lock_init(&entity
->queue_lock
);
204 atomic64_set(&entity
->last_queued_v_seq
, seq_ring
);
205 atomic64_set(&entity
->last_signaled_v_seq
, seq_ring
);
207 /* Add the entity to the run queue */
208 mutex_lock(&rq
->lock
);
209 rq_add_entity(rq
, entity
);
210 mutex_unlock(&rq
->lock
);
215 * Query if entity is initialized
217 * @sched Pointer to scheduler instance
218 * @entity The pointer to a valid scheduler entity
220 * return true if entity is initialized, false otherwise
222 static bool is_context_entity_initialized(struct amd_gpu_scheduler
*sched
,
223 struct amd_sched_entity
*entity
)
225 return entity
->scheduler
== sched
&&
226 entity
->belongto_rq
!= NULL
;
229 static bool is_context_entity_idle(struct amd_gpu_scheduler
*sched
,
230 struct amd_sched_entity
*entity
)
233 * Idle means no pending IBs, and the entity is not
234 * currently being used.
237 if ((sched
->current_entity
!= entity
) &&
238 kfifo_is_empty(&entity
->job_queue
))
245 * Destroy a context entity
247 * @sched Pointer to scheduler instance
248 * @entity The pointer to a valid scheduler entity
250 * return 0 if succeed. negative error code on failure
252 int amd_sched_entity_fini(struct amd_gpu_scheduler
*sched
,
253 struct amd_sched_entity
*entity
)
256 struct amd_run_queue
*rq
= entity
->belongto_rq
;
258 if (!is_context_entity_initialized(sched
, entity
))
262 * The client will not queue more IBs during this fini, consume existing
265 r
= wait_event_timeout(
267 is_context_entity_idle(sched
, entity
),
268 msecs_to_jiffies(AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS
)
272 if (entity
->is_pending
)
273 DRM_INFO("Entity %p is in waiting state during fini,\
274 all pending ibs will be canceled.\n",
278 mutex_lock(&rq
->lock
);
279 rq_remove_entity(rq
, entity
);
280 mutex_unlock(&rq
->lock
);
281 kfifo_free(&entity
->job_queue
);
286 * Submit a normal job to the job queue
288 * @sched The pointer to the scheduler
289 * @c_entity The pointer to amd_sched_entity
290 * @job The pointer to job required to submit
291 * return 0 if succeed. -1 if failed.
292 * -2 indicate queue is full for this client, client should wait untill
293 * scheduler consum some queued command.
296 int amd_sched_push_job(struct amd_gpu_scheduler
*sched
,
297 struct amd_sched_entity
*c_entity
,
299 struct amd_sched_fence
**fence
)
301 struct amd_sched_job
*job
;
305 job
= kzalloc(sizeof(struct amd_sched_job
), GFP_KERNEL
);
309 job
->s_entity
= c_entity
;
311 *fence
= amd_sched_fence_create(c_entity
);
312 if ((*fence
) == NULL
) {
316 job
->s_fence
= *fence
;
317 while (kfifo_in_spinlocked(&c_entity
->job_queue
, &job
, sizeof(void *),
318 &c_entity
->queue_lock
) != sizeof(void *)) {
320 * Current context used up all its IB slots
321 * wait here, or need to check whether GPU is hung
326 wake_up_interruptible(&sched
->wait_queue
);
330 static void amd_sched_process_job(struct fence
*f
, struct fence_cb
*cb
)
332 struct amd_sched_job
*sched_job
=
333 container_of(cb
, struct amd_sched_job
, cb
);
334 struct amd_gpu_scheduler
*sched
;
337 sched
= sched_job
->sched
;
338 atomic64_set(&sched_job
->s_entity
->last_signaled_v_seq
,
339 sched_job
->s_fence
->v_seq
);
340 amd_sched_fence_signal(sched_job
->s_fence
);
341 spin_lock_irqsave(&sched
->queue_lock
, flags
);
342 list_del(&sched_job
->list
);
343 atomic64_dec(&sched
->hw_rq_count
);
344 spin_unlock_irqrestore(&sched
->queue_lock
, flags
);
346 sched
->ops
->process_job(sched
, sched_job
);
347 fence_put(&sched_job
->s_fence
->base
);
349 wake_up_interruptible(&sched
->wait_queue
);
352 static int amd_sched_main(void *param
)
355 struct amd_sched_job
*job
;
356 struct sched_param sparam
= {.sched_priority
= 1};
357 struct amd_sched_entity
*c_entity
= NULL
;
358 struct amd_gpu_scheduler
*sched
= (struct amd_gpu_scheduler
*)param
;
360 sched_setscheduler(current
, SCHED_FIFO
, &sparam
);
362 while (!kthread_should_stop()) {
365 wait_event_interruptible(sched
->wait_queue
,
366 is_scheduler_ready(sched
) &&
367 (c_entity
= select_context(sched
)));
368 r
= kfifo_out(&c_entity
->job_queue
, &job
, sizeof(void *));
369 if (r
!= sizeof(void *))
371 r
= sched
->ops
->prepare_job(sched
, c_entity
, job
);
374 spin_lock_irqsave(&sched
->queue_lock
, flags
);
375 list_add_tail(&job
->list
, &sched
->active_hw_rq
);
376 atomic64_inc(&sched
->hw_rq_count
);
377 spin_unlock_irqrestore(&sched
->queue_lock
, flags
);
379 mutex_lock(&sched
->sched_lock
);
380 fence
= sched
->ops
->run_job(sched
, c_entity
, job
);
382 r
= fence_add_callback(fence
, &job
->cb
,
383 amd_sched_process_job
);
385 amd_sched_process_job(fence
, &job
->cb
);
387 DRM_ERROR("fence add callback failed (%d)\n", r
);
390 mutex_unlock(&sched
->sched_lock
);
396 * Create a gpu scheduler
398 * @device The device context for this scheduler
399 * @ops The backend operations for this scheduler.
400 * @id The scheduler is per ring, here is ring id.
401 * @granularity The minumum ms unit the scheduler will scheduled.
402 * @preemption Indicate whether this ring support preemption, 0 is no.
404 * return the pointer to scheduler for success, otherwise return NULL
406 struct amd_gpu_scheduler
*amd_sched_create(void *device
,
407 struct amd_sched_backend_ops
*ops
,
409 unsigned granularity
,
411 unsigned hw_submission
)
413 struct amd_gpu_scheduler
*sched
;
416 sched
= kzalloc(sizeof(struct amd_gpu_scheduler
), GFP_KERNEL
);
420 sched
->device
= device
;
422 sched
->granularity
= granularity
;
423 sched
->ring_id
= ring
;
424 sched
->preemption
= preemption
;
425 sched
->hw_submission_limit
= hw_submission
;
426 snprintf(name
, sizeof(name
), "gpu_sched[%d]", ring
);
427 mutex_init(&sched
->sched_lock
);
428 spin_lock_init(&sched
->queue_lock
);
429 init_rq(&sched
->sched_rq
);
430 sched
->sched_rq
.check_entity_status
= gpu_entity_check_status
;
432 init_rq(&sched
->kernel_rq
);
433 sched
->kernel_rq
.check_entity_status
= gpu_entity_check_status
;
435 init_waitqueue_head(&sched
->wait_queue
);
436 INIT_LIST_HEAD(&sched
->active_hw_rq
);
437 atomic64_set(&sched
->hw_rq_count
, 0);
438 /* Each scheduler will run on a seperate kernel thread */
439 sched
->thread
= kthread_create(amd_sched_main
, sched
, name
);
441 wake_up_process(sched
->thread
);
445 DRM_ERROR("Failed to create scheduler for id %d.\n", ring
);
451 * Destroy a gpu scheduler
453 * @sched The pointer to the scheduler
455 * return 0 if succeed. -1 if failed.
457 int amd_sched_destroy(struct amd_gpu_scheduler
*sched
)
459 kthread_stop(sched
->thread
);
465 * Get next queued sequence number
467 * @entity The context entity
469 * return the next queued sequence number
471 uint64_t amd_sched_next_queued_seq(struct amd_sched_entity
*c_entity
)
473 return atomic64_read(&c_entity
->last_queued_v_seq
) + 1;