Merge remote-tracking branch 'selinux/next'
[deliverable/linux.git] / drivers / gpu / drm / amd / scheduler / gpu_scheduler.c
CommitLineData
a72ce6f8
JZ
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <drm/drmP.h>
28#include "gpu_scheduler.h"
29
353da3c5
CZ
30#define CREATE_TRACE_POINTS
31#include "gpu_sched_trace.h"
32
3d651936 33static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
88079006 34static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
ec75f573 35static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
88079006 36
f5617f9d
CZ
37struct kmem_cache *sched_fence_slab;
38atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
39
a72ce6f8 40/* Initialize a given run queue struct */
432a4ff8 41static void amd_sched_rq_init(struct amd_sched_rq *rq)
a72ce6f8 42{
2b184d8d 43 spin_lock_init(&rq->lock);
432a4ff8 44 INIT_LIST_HEAD(&rq->entities);
432a4ff8 45 rq->current_entity = NULL;
a72ce6f8
JZ
46}
47
432a4ff8
CK
48static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
49 struct amd_sched_entity *entity)
a72ce6f8 50{
e8deea2d
CZ
51 if (!list_empty(&entity->list))
52 return;
2b184d8d 53 spin_lock(&rq->lock);
432a4ff8 54 list_add_tail(&entity->list, &rq->entities);
2b184d8d 55 spin_unlock(&rq->lock);
a72ce6f8
JZ
56}
57
432a4ff8
CK
58static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
59 struct amd_sched_entity *entity)
a72ce6f8 60{
e8deea2d
CZ
61 if (list_empty(&entity->list))
62 return;
2b184d8d 63 spin_lock(&rq->lock);
432a4ff8
CK
64 list_del_init(&entity->list);
65 if (rq->current_entity == entity)
66 rq->current_entity = NULL;
2b184d8d 67 spin_unlock(&rq->lock);
a72ce6f8
JZ
68}
69
70/**
3d651936
CK
71 * Select an entity which could provide a job to run
72 *
73 * @rq The run queue to check.
74 *
75 * Try to find a ready entity, returns NULL if none found.
a72ce6f8 76 */
3d651936
CK
77static struct amd_sched_entity *
78amd_sched_rq_select_entity(struct amd_sched_rq *rq)
a72ce6f8 79{
2b184d8d 80 struct amd_sched_entity *entity;
432a4ff8 81
2b184d8d
CK
82 spin_lock(&rq->lock);
83
84 entity = rq->current_entity;
432a4ff8
CK
85 if (entity) {
86 list_for_each_entry_continue(entity, &rq->entities, list) {
3d651936 87 if (amd_sched_entity_is_ready(entity)) {
432a4ff8 88 rq->current_entity = entity;
2b184d8d 89 spin_unlock(&rq->lock);
3d651936 90 return entity;
432a4ff8 91 }
a72ce6f8 92 }
a72ce6f8 93 }
a72ce6f8 94
432a4ff8 95 list_for_each_entry(entity, &rq->entities, list) {
a72ce6f8 96
3d651936 97 if (amd_sched_entity_is_ready(entity)) {
432a4ff8 98 rq->current_entity = entity;
2b184d8d 99 spin_unlock(&rq->lock);
3d651936 100 return entity;
432a4ff8 101 }
a72ce6f8 102
432a4ff8
CK
103 if (entity == rq->current_entity)
104 break;
105 }
a72ce6f8 106
2b184d8d
CK
107 spin_unlock(&rq->lock);
108
432a4ff8 109 return NULL;
a72ce6f8
JZ
110}
111
a72ce6f8
JZ
112/**
113 * Init a context entity used by scheduler when submit to HW ring.
114 *
115 * @sched The pointer to the scheduler
91404fb2 116 * @entity The pointer to a valid amd_sched_entity
a72ce6f8 117 * @rq The run queue this entity belongs
0e89d0c1 118 * @kernel If this is an entity for the kernel
1333f723 119 * @jobs The max number of jobs in the job queue
a72ce6f8
JZ
120 *
121 * return 0 if succeed. negative error code on failure
122*/
91404fb2 123int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
6f0e54a9 124 struct amd_sched_entity *entity,
432a4ff8 125 struct amd_sched_rq *rq,
6f0e54a9 126 uint32_t jobs)
a72ce6f8 127{
0f75aee7
CK
128 int r;
129
a72ce6f8
JZ
130 if (!(sched && entity && rq))
131 return -EINVAL;
132
91404fb2 133 memset(entity, 0, sizeof(struct amd_sched_entity));
0f75aee7
CK
134 INIT_LIST_HEAD(&entity->list);
135 entity->rq = rq;
136 entity->sched = sched;
a72ce6f8
JZ
137
138 spin_lock_init(&entity->queue_lock);
0f75aee7
CK
139 r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
140 if (r)
141 return r;
142
ce882e6d 143 atomic_set(&entity->fence_seq, 0);
6fc13675 144 entity->fence_context = fence_context_alloc(2);
a72ce6f8 145
a72ce6f8
JZ
146 return 0;
147}
148
149/**
150 * Query if entity is initialized
151 *
152 * @sched Pointer to scheduler instance
153 * @entity The pointer to a valid scheduler entity
154 *
155 * return true if entity is initialized, false otherwise
156*/
d54fdb94
CK
157static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
158 struct amd_sched_entity *entity)
a72ce6f8 159{
0f75aee7
CK
160 return entity->sched == sched &&
161 entity->rq != NULL;
a72ce6f8
JZ
162}
163
aef4852e
CK
164/**
165 * Check if entity is idle
166 *
167 * @entity The pointer to a valid scheduler entity
168 *
169 * Return true if entity don't has any unscheduled jobs.
170 */
171static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
a72ce6f8 172{
aef4852e
CK
173 rmb();
174 if (kfifo_is_empty(&entity->job_queue))
a72ce6f8
JZ
175 return true;
176
177 return false;
178}
179
3d651936
CK
180/**
181 * Check if entity is ready
182 *
183 * @entity The pointer to a valid scheduler entity
184 *
185 * Return true if entity could provide a job.
186 */
187static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
188{
189 if (kfifo_is_empty(&entity->job_queue))
190 return false;
191
192 if (ACCESS_ONCE(entity->dependency))
193 return false;
194
195 return true;
196}
197
a72ce6f8
JZ
198/**
199 * Destroy a context entity
200 *
201 * @sched Pointer to scheduler instance
202 * @entity The pointer to a valid scheduler entity
203 *
062c7fb3 204 * Cleanup and free the allocated resources.
a72ce6f8 205 */
062c7fb3
CK
206void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
207 struct amd_sched_entity *entity)
a72ce6f8 208{
0f75aee7 209 struct amd_sched_rq *rq = entity->rq;
a72ce6f8 210
d54fdb94 211 if (!amd_sched_entity_is_initialized(sched, entity))
062c7fb3 212 return;
6c859274 213
a72ce6f8
JZ
214 /**
215 * The client will not queue more IBs during this fini, consume existing
216 * queued IBs
217 */
c2b6bd7e 218 wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
a72ce6f8 219
432a4ff8 220 amd_sched_rq_remove_entity(rq, entity);
a72ce6f8 221 kfifo_free(&entity->job_queue);
a72ce6f8
JZ
222}
223
e61235db
CK
224static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
225{
226 struct amd_sched_entity *entity =
227 container_of(cb, struct amd_sched_entity, cb);
228 entity->dependency = NULL;
229 fence_put(f);
0f75aee7 230 amd_sched_wakeup(entity->sched);
e61235db
CK
231}
232
777dbd45
ML
233static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb)
234{
235 struct amd_sched_entity *entity =
236 container_of(cb, struct amd_sched_entity, cb);
237 entity->dependency = NULL;
238 fence_put(f);
239}
240
393a0bd4
CK
241static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
242{
243 struct amd_gpu_scheduler *sched = entity->sched;
244 struct fence * fence = entity->dependency;
245 struct amd_sched_fence *s_fence;
246
247 if (fence->context == entity->fence_context) {
248 /* We can ignore fences from ourself */
249 fence_put(entity->dependency);
250 return false;
251 }
252
253 s_fence = to_amd_sched_fence(fence);
254 if (s_fence && s_fence->sched == sched) {
393a0bd4 255
6fc13675
CK
256 /*
257 * Fence is from the same scheduler, only need to wait for
258 * it to be scheduled
259 */
260 fence = fence_get(&s_fence->scheduled);
261 fence_put(entity->dependency);
262 entity->dependency = fence;
263 if (!fence_add_callback(fence, &entity->cb,
264 amd_sched_entity_clear_dep))
265 return true;
266
267 /* Ignore it when it is already scheduled */
268 fence_put(fence);
269 return false;
393a0bd4
CK
270 }
271
272 if (!fence_add_callback(entity->dependency, &entity->cb,
273 amd_sched_entity_wakeup))
274 return true;
275
276 fence_put(entity->dependency);
277 return false;
278}
279
69bd5bf1
CK
280static struct amd_sched_job *
281amd_sched_entity_pop_job(struct amd_sched_entity *entity)
282{
0f75aee7 283 struct amd_gpu_scheduler *sched = entity->sched;
4c7eb91c 284 struct amd_sched_job *sched_job;
69bd5bf1 285
4c7eb91c 286 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
69bd5bf1
CK
287 return NULL;
288
393a0bd4
CK
289 while ((entity->dependency = sched->ops->dependency(sched_job)))
290 if (amd_sched_entity_add_dependency_cb(entity))
e61235db 291 return NULL;
e61235db 292
4c7eb91c 293 return sched_job;
69bd5bf1
CK
294}
295
a72ce6f8 296/**
6c859274 297 * Helper to submit a job to the job queue
a72ce6f8 298 *
4c7eb91c 299 * @sched_job The pointer to job required to submit
6c859274
CK
300 *
301 * Returns true if we could submit the job.
302 */
4c7eb91c 303static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
a72ce6f8 304{
786b5219 305 struct amd_gpu_scheduler *sched = sched_job->sched;
4c7eb91c 306 struct amd_sched_entity *entity = sched_job->s_entity;
6c859274
CK
307 bool added, first = false;
308
309 spin_lock(&entity->queue_lock);
4c7eb91c
JZ
310 added = kfifo_in(&entity->job_queue, &sched_job,
311 sizeof(sched_job)) == sizeof(sched_job);
6c859274 312
4c7eb91c 313 if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
6c859274
CK
314 first = true;
315
316 spin_unlock(&entity->queue_lock);
317
318 /* first job wakes up scheduler */
e8deea2d
CZ
319 if (first) {
320 /* Add the entity to the run queue */
321 amd_sched_rq_add_entity(entity->rq, entity);
786b5219 322 amd_sched_wakeup(sched);
e8deea2d 323 }
6c859274
CK
324 return added;
325}
326
0de2479c
ML
327/* job_finish is called after hw fence signaled, and
328 * the job had already been deleted from ring_mirror_list
329 */
c5f74f78 330static void amd_sched_job_finish(struct work_struct *work)
0de2479c 331{
c5f74f78
CK
332 struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
333 finish_work);
0de2479c
ML
334 struct amd_gpu_scheduler *sched = s_job->sched;
335
f42d20a9 336 /* remove job from ring_mirror_list */
1059e117 337 spin_lock(&sched->job_list_lock);
f42d20a9 338 list_del_init(&s_job->node);
0de2479c 339 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
c5f74f78
CK
340 struct amd_sched_job *next;
341
1059e117 342 spin_unlock(&sched->job_list_lock);
c5f74f78 343 cancel_delayed_work_sync(&s_job->work_tdr);
1059e117 344 spin_lock(&sched->job_list_lock);
0de2479c
ML
345
346 /* queue TDR for next job */
347 next = list_first_entry_or_null(&sched->ring_mirror_list,
348 struct amd_sched_job, node);
349
c5f74f78 350 if (next)
0de2479c 351 schedule_delayed_work(&next->work_tdr, sched->timeout);
0de2479c 352 }
1059e117 353 spin_unlock(&sched->job_list_lock);
c5f74f78
CK
354 sched->ops->free_job(s_job);
355}
356
357static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb)
358{
359 struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
360 finish_cb);
361 schedule_work(&job->finish_work);
0de2479c
ML
362}
363
7392c329 364static void amd_sched_job_begin(struct amd_sched_job *s_job)
0de2479c
ML
365{
366 struct amd_gpu_scheduler *sched = s_job->sched;
367
1059e117 368 spin_lock(&sched->job_list_lock);
f42d20a9 369 list_add_tail(&s_job->node, &sched->ring_mirror_list);
0de2479c 370 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
16a7133f
CK
371 list_first_entry_or_null(&sched->ring_mirror_list,
372 struct amd_sched_job, node) == s_job)
0de2479c 373 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
1059e117 374 spin_unlock(&sched->job_list_lock);
0de2479c
ML
375}
376
0e51a772
CK
377static void amd_sched_job_timedout(struct work_struct *work)
378{
379 struct amd_sched_job *job = container_of(work, struct amd_sched_job,
380 work_tdr.work);
381
382 job->sched->ops->timedout_job(job);
383}
384
e686e75d
CZ
385void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
386{
387 struct amd_sched_job *s_job;
388
389 spin_lock(&sched->job_list_lock);
390 list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
391 if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
392 fence_put(s_job->s_fence->parent);
393 s_job->s_fence->parent = NULL;
394 }
395 }
bdc2eea4 396 atomic_set(&sched->hw_rq_count, 0);
e686e75d
CZ
397 spin_unlock(&sched->job_list_lock);
398}
399
ec75f573
CZ
400void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
401{
1c62cf91 402 struct amd_sched_job *s_job, *tmp;
ec75f573
CZ
403 int r;
404
405 spin_lock(&sched->job_list_lock);
406 s_job = list_first_entry_or_null(&sched->ring_mirror_list,
407 struct amd_sched_job, node);
bdf00137 408 if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
ec75f573
CZ
409 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
410
1c62cf91 411 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
ec75f573 412 struct amd_sched_fence *s_fence = s_job->s_fence;
1c62cf91 413 struct fence *fence;
bdc2eea4 414
1c62cf91
CZ
415 spin_unlock(&sched->job_list_lock);
416 fence = sched->ops->run_job(s_job);
bdc2eea4 417 atomic_inc(&sched->hw_rq_count);
ec75f573
CZ
418 if (fence) {
419 s_fence->parent = fence_get(fence);
420 r = fence_add_callback(fence, &s_fence->cb,
421 amd_sched_process_job);
422 if (r == -ENOENT)
423 amd_sched_process_job(fence, &s_fence->cb);
424 else if (r)
425 DRM_ERROR("fence add callback failed (%d)\n",
426 r);
427 fence_put(fence);
428 } else {
429 DRM_ERROR("Failed to run job!\n");
430 amd_sched_process_job(NULL, &s_fence->cb);
431 }
1c62cf91 432 spin_lock(&sched->job_list_lock);
ec75f573
CZ
433 }
434 spin_unlock(&sched->job_list_lock);
435}
436
6c859274
CK
437/**
438 * Submit a job to the job queue
439 *
4c7eb91c 440 * @sched_job The pointer to job required to submit
6c859274
CK
441 *
442 * Returns 0 for success, negative error code otherwise.
443 */
e2840221 444void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
6c859274
CK
445{
446 struct amd_sched_entity *entity = sched_job->s_entity;
6c859274 447
786b5219 448 trace_amd_sched_job(sched_job);
6fc13675 449 fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
c5f74f78 450 amd_sched_job_finish_cb);
0f75aee7 451 wait_event(entity->sched->job_scheduled,
c9f0fe5e 452 amd_sched_entity_in(sched_job));
a72ce6f8
JZ
453}
454
e686941a
ML
455/* init a sched_job with basic field */
456int amd_sched_job_init(struct amd_sched_job *job,
16a7133f
CK
457 struct amd_gpu_scheduler *sched,
458 struct amd_sched_entity *entity,
595a9cd6 459 void *owner)
e686941a
ML
460{
461 job->sched = sched;
462 job->s_entity = entity;
463 job->s_fence = amd_sched_fence_create(entity, owner);
464 if (!job->s_fence)
465 return -ENOMEM;
466
c5f74f78
CK
467 INIT_WORK(&job->finish_work, amd_sched_job_finish);
468 INIT_LIST_HEAD(&job->node);
0e51a772 469 INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
4835096b 470
e686941a
ML
471 return 0;
472}
473
e688b728
CK
474/**
475 * Return ture if we can push more jobs to the hw.
476 */
477static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
478{
479 return atomic_read(&sched->hw_rq_count) <
480 sched->hw_submission_limit;
481}
482
88079006
CK
483/**
484 * Wake up the scheduler when it is ready
485 */
486static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
487{
488 if (amd_sched_ready(sched))
c2b6bd7e 489 wake_up_interruptible(&sched->wake_up_worker);
88079006
CK
490}
491
e688b728 492/**
3d651936 493 * Select next entity to process
e688b728 494*/
3d651936
CK
495static struct amd_sched_entity *
496amd_sched_select_entity(struct amd_gpu_scheduler *sched)
e688b728 497{
3d651936 498 struct amd_sched_entity *entity;
d033a6de 499 int i;
e688b728
CK
500
501 if (!amd_sched_ready(sched))
502 return NULL;
503
504 /* Kernel run queue has higher priority than normal run queue*/
d033a6de
CZ
505 for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++) {
506 entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
507 if (entity)
508 break;
509 }
e688b728 510
3d651936 511 return entity;
e688b728
CK
512}
513
6f0e54a9
CK
514static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
515{
258f3f99
CK
516 struct amd_sched_fence *s_fence =
517 container_of(cb, struct amd_sched_fence, cb);
9b398fa5 518 struct amd_gpu_scheduler *sched = s_fence->sched;
6f0e54a9 519
c746ba22 520 atomic_dec(&sched->hw_rq_count);
6fc13675 521 amd_sched_fence_finished(s_fence);
cccd9bce 522
7034decf 523 trace_amd_sched_process_job(s_fence);
6fc13675 524 fence_put(&s_fence->finished);
c2b6bd7e 525 wake_up_interruptible(&sched->wake_up_worker);
6f0e54a9
CK
526}
527
0875dc9e
CZ
528static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
529{
530 if (kthread_should_park()) {
531 kthread_parkme();
532 return true;
533 }
534
535 return false;
536}
537
a72ce6f8
JZ
538static int amd_sched_main(void *param)
539{
a72ce6f8 540 struct sched_param sparam = {.sched_priority = 1};
a72ce6f8 541 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
5134e999 542 int r, count;
a72ce6f8
JZ
543
544 sched_setscheduler(current, SCHED_FIFO, &sparam);
545
546 while (!kthread_should_stop()) {
0875dc9e 547 struct amd_sched_entity *entity = NULL;
258f3f99 548 struct amd_sched_fence *s_fence;
4c7eb91c 549 struct amd_sched_job *sched_job;
6f0e54a9
CK
550 struct fence *fence;
551
c2b6bd7e 552 wait_event_interruptible(sched->wake_up_worker,
0875dc9e
CZ
553 (!amd_sched_blocked(sched) &&
554 (entity = amd_sched_select_entity(sched))) ||
555 kthread_should_stop());
f85a6dd9 556
3d651936
CK
557 if (!entity)
558 continue;
559
560 sched_job = amd_sched_entity_pop_job(entity);
4c7eb91c 561 if (!sched_job)
f85a6dd9
CK
562 continue;
563
4c7eb91c 564 s_fence = sched_job->s_fence;
2440ff2c 565
b034b572 566 atomic_inc(&sched->hw_rq_count);
7392c329 567 amd_sched_job_begin(sched_job);
7392c329 568
4c7eb91c 569 fence = sched->ops->run_job(sched_job);
393a0bd4 570 amd_sched_fence_scheduled(s_fence);
6f0e54a9 571 if (fence) {
754ce0fa 572 s_fence->parent = fence_get(fence);
258f3f99 573 r = fence_add_callback(fence, &s_fence->cb,
6f0e54a9
CK
574 amd_sched_process_job);
575 if (r == -ENOENT)
258f3f99 576 amd_sched_process_job(fence, &s_fence->cb);
6f0e54a9 577 else if (r)
16a7133f
CK
578 DRM_ERROR("fence add callback failed (%d)\n",
579 r);
6f0e54a9 580 fence_put(fence);
27439fca
CK
581 } else {
582 DRM_ERROR("Failed to run job!\n");
258f3f99 583 amd_sched_process_job(NULL, &s_fence->cb);
6f0e54a9 584 }
aef4852e 585
4c7eb91c
JZ
586 count = kfifo_out(&entity->job_queue, &sched_job,
587 sizeof(sched_job));
588 WARN_ON(count != sizeof(sched_job));
c2b6bd7e 589 wake_up(&sched->job_scheduled);
a72ce6f8
JZ
590 }
591 return 0;
592}
593
a72ce6f8 594/**
4f839a24 595 * Init a gpu scheduler instance
a72ce6f8 596 *
4f839a24 597 * @sched The pointer to the scheduler
69f7dd65 598 * @ops The backend operations for this scheduler.
69f7dd65 599 * @hw_submissions Number of hw submissions to do.
4f839a24 600 * @name Name used for debugging
a72ce6f8 601 *
4f839a24 602 * Return 0 on success, otherwise error code.
a72ce6f8 603*/
4f839a24 604int amd_sched_init(struct amd_gpu_scheduler *sched,
62250a91 605 const struct amd_sched_backend_ops *ops,
2440ff2c 606 unsigned hw_submission, long timeout, const char *name)
a72ce6f8 607{
d033a6de 608 int i;
a72ce6f8 609 sched->ops = ops;
4cef9267 610 sched->hw_submission_limit = hw_submission;
4f839a24 611 sched->name = name;
2440ff2c 612 sched->timeout = timeout;
d033a6de
CZ
613 for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++)
614 amd_sched_rq_init(&sched->sched_rq[i]);
a72ce6f8 615
c2b6bd7e
CK
616 init_waitqueue_head(&sched->wake_up_worker);
617 init_waitqueue_head(&sched->job_scheduled);
4835096b
ML
618 INIT_LIST_HEAD(&sched->ring_mirror_list);
619 spin_lock_init(&sched->job_list_lock);
c746ba22 620 atomic_set(&sched->hw_rq_count, 0);
f5617f9d
CZ
621 if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
622 sched_fence_slab = kmem_cache_create(
623 "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
624 SLAB_HWCACHE_ALIGN, NULL);
625 if (!sched_fence_slab)
626 return -ENOMEM;
627 }
4f839a24 628
a72ce6f8 629 /* Each scheduler will run on a seperate kernel thread */
c14692f0 630 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
f4956598 631 if (IS_ERR(sched->thread)) {
4f839a24
CK
632 DRM_ERROR("Failed to create scheduler for %s.\n", name);
633 return PTR_ERR(sched->thread);
a72ce6f8
JZ
634 }
635
4f839a24 636 return 0;
a72ce6f8
JZ
637}
638
639/**
640 * Destroy a gpu scheduler
641 *
642 * @sched The pointer to the scheduler
a72ce6f8 643 */
4f839a24 644void amd_sched_fini(struct amd_gpu_scheduler *sched)
a72ce6f8 645{
32544d02
DA
646 if (sched->thread)
647 kthread_stop(sched->thread);
f5617f9d
CZ
648 if (atomic_dec_and_test(&sched_fence_slab_ref))
649 kmem_cache_destroy(sched_fence_slab);
a72ce6f8 650}
This page took 0.108081 seconds and 5 git commands to generate.