2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
28 #include "gpu_scheduler.h"
30 /* Initialize a given run queue struct */
31 static void amd_sched_rq_init(struct amd_sched_rq
*rq
)
33 spin_lock_init(&rq
->lock
);
34 INIT_LIST_HEAD(&rq
->entities
);
35 rq
->current_entity
= NULL
;
38 static void amd_sched_rq_add_entity(struct amd_sched_rq
*rq
,
39 struct amd_sched_entity
*entity
)
42 list_add_tail(&entity
->list
, &rq
->entities
);
43 spin_unlock(&rq
->lock
);
46 static void amd_sched_rq_remove_entity(struct amd_sched_rq
*rq
,
47 struct amd_sched_entity
*entity
)
50 list_del_init(&entity
->list
);
51 if (rq
->current_entity
== entity
)
52 rq
->current_entity
= NULL
;
53 spin_unlock(&rq
->lock
);
57 * Select next entity from a specified run queue with round robin policy.
58 * It could return the same entity as current one if current is the only
59 * available one in the queue. Return NULL if nothing available.
61 static struct amd_sched_entity
*
62 amd_sched_rq_select_entity(struct amd_sched_rq
*rq
)
64 struct amd_sched_entity
*entity
;
68 entity
= rq
->current_entity
;
70 list_for_each_entry_continue(entity
, &rq
->entities
, list
) {
71 if (!kfifo_is_empty(&entity
->job_queue
)) {
72 rq
->current_entity
= entity
;
73 spin_unlock(&rq
->lock
);
74 return rq
->current_entity
;
79 list_for_each_entry(entity
, &rq
->entities
, list
) {
81 if (!kfifo_is_empty(&entity
->job_queue
)) {
82 rq
->current_entity
= entity
;
83 spin_unlock(&rq
->lock
);
84 return rq
->current_entity
;
87 if (entity
== rq
->current_entity
)
91 spin_unlock(&rq
->lock
);
97 * Init a context entity used by scheduler when submit to HW ring.
99 * @sched The pointer to the scheduler
100 * @entity The pointer to a valid amd_sched_entity
101 * @rq The run queue this entity belongs
102 * @kernel If this is an entity for the kernel
103 * @jobs The max number of jobs in the job queue
105 * return 0 if succeed. negative error code on failure
107 int amd_sched_entity_init(struct amd_gpu_scheduler
*sched
,
108 struct amd_sched_entity
*entity
,
109 struct amd_sched_rq
*rq
,
112 if (!(sched
&& entity
&& rq
))
115 memset(entity
, 0, sizeof(struct amd_sched_entity
));
116 entity
->belongto_rq
= rq
;
117 entity
->scheduler
= sched
;
118 init_waitqueue_head(&entity
->wait_queue
);
119 entity
->fence_context
= fence_context_alloc(1);
120 if(kfifo_alloc(&entity
->job_queue
,
121 jobs
* sizeof(void *),
125 spin_lock_init(&entity
->queue_lock
);
126 atomic_set(&entity
->fence_seq
, 0);
128 /* Add the entity to the run queue */
129 amd_sched_rq_add_entity(rq
, entity
);
134 * Query if entity is initialized
136 * @sched Pointer to scheduler instance
137 * @entity The pointer to a valid scheduler entity
139 * return true if entity is initialized, false otherwise
141 static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler
*sched
,
142 struct amd_sched_entity
*entity
)
144 return entity
->scheduler
== sched
&&
145 entity
->belongto_rq
!= NULL
;
149 * Check if entity is idle
151 * @entity The pointer to a valid scheduler entity
153 * Return true if entity don't has any unscheduled jobs.
155 static bool amd_sched_entity_is_idle(struct amd_sched_entity
*entity
)
158 if (kfifo_is_empty(&entity
->job_queue
))
165 * Destroy a context entity
167 * @sched Pointer to scheduler instance
168 * @entity The pointer to a valid scheduler entity
170 * return 0 if succeed. negative error code on failure
172 int amd_sched_entity_fini(struct amd_gpu_scheduler
*sched
,
173 struct amd_sched_entity
*entity
)
175 struct amd_sched_rq
*rq
= entity
->belongto_rq
;
178 if (!amd_sched_entity_is_initialized(sched
, entity
))
182 * The client will not queue more IBs during this fini, consume existing
185 r
= wait_event_timeout(entity
->wait_queue
,
186 amd_sched_entity_is_idle(entity
),
187 msecs_to_jiffies(AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS
));
190 DRM_INFO("Entity %p is in waiting state during fini\n",
193 amd_sched_rq_remove_entity(rq
, entity
);
194 kfifo_free(&entity
->job_queue
);
199 * Helper to submit a job to the job queue
201 * @job The pointer to job required to submit
203 * Returns true if we could submit the job.
205 static bool amd_sched_entity_in(struct amd_sched_job
*job
)
207 struct amd_sched_entity
*entity
= job
->s_entity
;
208 bool added
, first
= false;
210 spin_lock(&entity
->queue_lock
);
211 added
= kfifo_in(&entity
->job_queue
, &job
, sizeof(job
)) == sizeof(job
);
213 if (added
&& kfifo_len(&entity
->job_queue
) == sizeof(job
))
216 spin_unlock(&entity
->queue_lock
);
218 /* first job wakes up scheduler */
220 wake_up_interruptible(&job
->sched
->wait_queue
);
226 * Submit a job to the job queue
228 * @job The pointer to job required to submit
230 * Returns 0 for success, negative error code otherwise.
232 int amd_sched_entity_push_job(struct amd_sched_job
*sched_job
)
234 struct amd_sched_entity
*entity
= sched_job
->s_entity
;
235 struct amd_sched_fence
*fence
= amd_sched_fence_create(
236 entity
, sched_job
->owner
);
242 fence_get(&fence
->base
);
243 sched_job
->s_fence
= fence
;
245 r
= wait_event_interruptible(entity
->wait_queue
,
246 amd_sched_entity_in(sched_job
));
252 * Return ture if we can push more jobs to the hw.
254 static bool amd_sched_ready(struct amd_gpu_scheduler
*sched
)
256 return atomic_read(&sched
->hw_rq_count
) <
257 sched
->hw_submission_limit
;
261 * Select next entity containing real IB submissions
263 static struct amd_sched_entity
*
264 amd_sched_select_context(struct amd_gpu_scheduler
*sched
)
266 struct amd_sched_entity
*tmp
;
268 if (!amd_sched_ready(sched
))
271 /* Kernel run queue has higher priority than normal run queue*/
272 tmp
= amd_sched_rq_select_entity(&sched
->kernel_rq
);
274 tmp
= amd_sched_rq_select_entity(&sched
->sched_rq
);
279 static void amd_sched_process_job(struct fence
*f
, struct fence_cb
*cb
)
281 struct amd_sched_job
*sched_job
=
282 container_of(cb
, struct amd_sched_job
, cb
);
283 struct amd_gpu_scheduler
*sched
;
285 sched
= sched_job
->sched
;
286 amd_sched_fence_signal(sched_job
->s_fence
);
287 atomic_dec(&sched
->hw_rq_count
);
288 fence_put(&sched_job
->s_fence
->base
);
289 sched
->ops
->process_job(sched
, sched_job
);
290 wake_up_interruptible(&sched
->wait_queue
);
293 static int amd_sched_main(void *param
)
295 struct sched_param sparam
= {.sched_priority
= 1};
296 struct amd_gpu_scheduler
*sched
= (struct amd_gpu_scheduler
*)param
;
299 sched_setscheduler(current
, SCHED_FIFO
, &sparam
);
301 while (!kthread_should_stop()) {
302 struct amd_sched_entity
*c_entity
= NULL
;
303 struct amd_sched_job
*job
;
306 wait_event_interruptible(sched
->wait_queue
,
307 kthread_should_stop() ||
308 (c_entity
= amd_sched_select_context(sched
)));
313 r
= kfifo_out(&c_entity
->job_queue
, &job
, sizeof(void *));
314 if (r
!= sizeof(void *))
316 atomic_inc(&sched
->hw_rq_count
);
318 fence
= sched
->ops
->run_job(sched
, c_entity
, job
);
320 r
= fence_add_callback(fence
, &job
->cb
,
321 amd_sched_process_job
);
323 amd_sched_process_job(fence
, &job
->cb
);
325 DRM_ERROR("fence add callback failed (%d)\n", r
);
329 wake_up(&c_entity
->wait_queue
);
335 * Create a gpu scheduler
337 * @ops The backend operations for this scheduler.
338 * @ring The the ring id for the scheduler.
339 * @hw_submissions Number of hw submissions to do.
341 * Return the pointer to scheduler for success, otherwise return NULL
343 struct amd_gpu_scheduler
*amd_sched_create(struct amd_sched_backend_ops
*ops
,
344 unsigned ring
, unsigned hw_submission
)
346 struct amd_gpu_scheduler
*sched
;
348 sched
= kzalloc(sizeof(struct amd_gpu_scheduler
), GFP_KERNEL
);
353 sched
->ring_id
= ring
;
354 sched
->hw_submission_limit
= hw_submission
;
355 snprintf(sched
->name
, sizeof(sched
->name
), "amdgpu[%d]", ring
);
356 amd_sched_rq_init(&sched
->sched_rq
);
357 amd_sched_rq_init(&sched
->kernel_rq
);
359 init_waitqueue_head(&sched
->wait_queue
);
360 atomic_set(&sched
->hw_rq_count
, 0);
361 /* Each scheduler will run on a seperate kernel thread */
362 sched
->thread
= kthread_run(amd_sched_main
, sched
, sched
->name
);
363 if (IS_ERR(sched
->thread
)) {
364 DRM_ERROR("Failed to create scheduler for id %d.\n", ring
);
373 * Destroy a gpu scheduler
375 * @sched The pointer to the scheduler
377 * return 0 if succeed. -1 if failed.
379 int amd_sched_destroy(struct amd_gpu_scheduler
*sched
)
381 kthread_stop(sched
->thread
);