drm/amdgpu: update Fiji's tiling mode table
[deliverable/linux.git] / drivers / gpu / drm / amd / scheduler / gpu_scheduler.c
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 #include <drm/drmP.h>
28 #include "gpu_scheduler.h"
29
30 #define CREATE_TRACE_POINTS
31 #include "gpu_sched_trace.h"
32
33 static struct amd_sched_job *
34 amd_sched_entity_pop_job(struct amd_sched_entity *entity);
35 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
36
37 struct kmem_cache *sched_fence_slab;
38 atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
39
40 /* Initialize a given run queue struct */
41 static void amd_sched_rq_init(struct amd_sched_rq *rq)
42 {
43 spin_lock_init(&rq->lock);
44 INIT_LIST_HEAD(&rq->entities);
45 rq->current_entity = NULL;
46 }
47
48 static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
49 struct amd_sched_entity *entity)
50 {
51 spin_lock(&rq->lock);
52 list_add_tail(&entity->list, &rq->entities);
53 spin_unlock(&rq->lock);
54 }
55
56 static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
57 struct amd_sched_entity *entity)
58 {
59 spin_lock(&rq->lock);
60 list_del_init(&entity->list);
61 if (rq->current_entity == entity)
62 rq->current_entity = NULL;
63 spin_unlock(&rq->lock);
64 }
65
66 /**
67 * Select next job from a specified run queue with round robin policy.
68 * Return NULL if nothing available.
69 */
70 static struct amd_sched_job *
71 amd_sched_rq_select_job(struct amd_sched_rq *rq)
72 {
73 struct amd_sched_entity *entity;
74 struct amd_sched_job *sched_job;
75
76 spin_lock(&rq->lock);
77
78 entity = rq->current_entity;
79 if (entity) {
80 list_for_each_entry_continue(entity, &rq->entities, list) {
81 sched_job = amd_sched_entity_pop_job(entity);
82 if (sched_job) {
83 rq->current_entity = entity;
84 spin_unlock(&rq->lock);
85 return sched_job;
86 }
87 }
88 }
89
90 list_for_each_entry(entity, &rq->entities, list) {
91
92 sched_job = amd_sched_entity_pop_job(entity);
93 if (sched_job) {
94 rq->current_entity = entity;
95 spin_unlock(&rq->lock);
96 return sched_job;
97 }
98
99 if (entity == rq->current_entity)
100 break;
101 }
102
103 spin_unlock(&rq->lock);
104
105 return NULL;
106 }
107
108 /**
109 * Init a context entity used by scheduler when submit to HW ring.
110 *
111 * @sched The pointer to the scheduler
112 * @entity The pointer to a valid amd_sched_entity
113 * @rq The run queue this entity belongs
114 * @kernel If this is an entity for the kernel
115 * @jobs The max number of jobs in the job queue
116 *
117 * return 0 if succeed. negative error code on failure
118 */
119 int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
120 struct amd_sched_entity *entity,
121 struct amd_sched_rq *rq,
122 uint32_t jobs)
123 {
124 int r;
125
126 if (!(sched && entity && rq))
127 return -EINVAL;
128
129 memset(entity, 0, sizeof(struct amd_sched_entity));
130 INIT_LIST_HEAD(&entity->list);
131 entity->rq = rq;
132 entity->sched = sched;
133
134 spin_lock_init(&entity->queue_lock);
135 r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
136 if (r)
137 return r;
138
139 atomic_set(&entity->fence_seq, 0);
140 entity->fence_context = fence_context_alloc(1);
141
142 /* Add the entity to the run queue */
143 amd_sched_rq_add_entity(rq, entity);
144
145 return 0;
146 }
147
148 /**
149 * Query if entity is initialized
150 *
151 * @sched Pointer to scheduler instance
152 * @entity The pointer to a valid scheduler entity
153 *
154 * return true if entity is initialized, false otherwise
155 */
156 static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
157 struct amd_sched_entity *entity)
158 {
159 return entity->sched == sched &&
160 entity->rq != NULL;
161 }
162
163 /**
164 * Check if entity is idle
165 *
166 * @entity The pointer to a valid scheduler entity
167 *
168 * Return true if entity don't has any unscheduled jobs.
169 */
170 static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
171 {
172 rmb();
173 if (kfifo_is_empty(&entity->job_queue))
174 return true;
175
176 return false;
177 }
178
179 /**
180 * Destroy a context entity
181 *
182 * @sched Pointer to scheduler instance
183 * @entity The pointer to a valid scheduler entity
184 *
185 * Cleanup and free the allocated resources.
186 */
187 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
188 struct amd_sched_entity *entity)
189 {
190 struct amd_sched_rq *rq = entity->rq;
191
192 if (!amd_sched_entity_is_initialized(sched, entity))
193 return;
194
195 /**
196 * The client will not queue more IBs during this fini, consume existing
197 * queued IBs
198 */
199 wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
200
201 amd_sched_rq_remove_entity(rq, entity);
202 kfifo_free(&entity->job_queue);
203 }
204
205 static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
206 {
207 struct amd_sched_entity *entity =
208 container_of(cb, struct amd_sched_entity, cb);
209 entity->dependency = NULL;
210 fence_put(f);
211 amd_sched_wakeup(entity->sched);
212 }
213
214 static struct amd_sched_job *
215 amd_sched_entity_pop_job(struct amd_sched_entity *entity)
216 {
217 struct amd_gpu_scheduler *sched = entity->sched;
218 struct amd_sched_job *sched_job;
219
220 if (ACCESS_ONCE(entity->dependency))
221 return NULL;
222
223 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
224 return NULL;
225
226 while ((entity->dependency = sched->ops->dependency(sched_job))) {
227
228 if (entity->dependency->context == entity->fence_context) {
229 /* We can ignore fences from ourself */
230 fence_put(entity->dependency);
231 continue;
232 }
233
234 if (fence_add_callback(entity->dependency, &entity->cb,
235 amd_sched_entity_wakeup))
236 fence_put(entity->dependency);
237 else
238 return NULL;
239 }
240
241 return sched_job;
242 }
243
244 /**
245 * Helper to submit a job to the job queue
246 *
247 * @sched_job The pointer to job required to submit
248 *
249 * Returns true if we could submit the job.
250 */
251 static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
252 {
253 struct amd_sched_entity *entity = sched_job->s_entity;
254 bool added, first = false;
255
256 spin_lock(&entity->queue_lock);
257 added = kfifo_in(&entity->job_queue, &sched_job,
258 sizeof(sched_job)) == sizeof(sched_job);
259
260 if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
261 first = true;
262
263 spin_unlock(&entity->queue_lock);
264
265 /* first job wakes up scheduler */
266 if (first)
267 amd_sched_wakeup(sched_job->sched);
268
269 return added;
270 }
271
272 /**
273 * Submit a job to the job queue
274 *
275 * @sched_job The pointer to job required to submit
276 *
277 * Returns 0 for success, negative error code otherwise.
278 */
279 int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
280 {
281 struct amd_sched_entity *entity = sched_job->s_entity;
282 struct amd_sched_fence *fence = amd_sched_fence_create(
283 entity, sched_job->owner);
284
285 if (!fence)
286 return -ENOMEM;
287
288 fence_get(&fence->base);
289 sched_job->s_fence = fence;
290
291 wait_event(entity->sched->job_scheduled,
292 amd_sched_entity_in(sched_job));
293 trace_amd_sched_job(sched_job);
294 return 0;
295 }
296
297 /**
298 * Return ture if we can push more jobs to the hw.
299 */
300 static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
301 {
302 return atomic_read(&sched->hw_rq_count) <
303 sched->hw_submission_limit;
304 }
305
306 /**
307 * Wake up the scheduler when it is ready
308 */
309 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
310 {
311 if (amd_sched_ready(sched))
312 wake_up_interruptible(&sched->wake_up_worker);
313 }
314
315 /**
316 * Select next to run
317 */
318 static struct amd_sched_job *
319 amd_sched_select_job(struct amd_gpu_scheduler *sched)
320 {
321 struct amd_sched_job *sched_job;
322
323 if (!amd_sched_ready(sched))
324 return NULL;
325
326 /* Kernel run queue has higher priority than normal run queue*/
327 sched_job = amd_sched_rq_select_job(&sched->kernel_rq);
328 if (sched_job == NULL)
329 sched_job = amd_sched_rq_select_job(&sched->sched_rq);
330
331 return sched_job;
332 }
333
334 static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
335 {
336 struct amd_sched_fence *s_fence =
337 container_of(cb, struct amd_sched_fence, cb);
338 struct amd_gpu_scheduler *sched = s_fence->sched;
339 unsigned long flags;
340
341 atomic_dec(&sched->hw_rq_count);
342 amd_sched_fence_signal(s_fence);
343 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
344 cancel_delayed_work(&s_fence->dwork);
345 spin_lock_irqsave(&sched->fence_list_lock, flags);
346 list_del_init(&s_fence->list);
347 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
348 }
349 fence_put(&s_fence->base);
350 wake_up_interruptible(&sched->wake_up_worker);
351 }
352
353 static void amd_sched_fence_work_func(struct work_struct *work)
354 {
355 struct amd_sched_fence *s_fence =
356 container_of(work, struct amd_sched_fence, dwork.work);
357 struct amd_gpu_scheduler *sched = s_fence->sched;
358 struct amd_sched_fence *entity, *tmp;
359 unsigned long flags;
360
361 DRM_ERROR("[%s] scheduler is timeout!\n", sched->name);
362
363 /* Clean all pending fences */
364 spin_lock_irqsave(&sched->fence_list_lock, flags);
365 list_for_each_entry_safe(entity, tmp, &sched->fence_list, list) {
366 DRM_ERROR(" fence no %d\n", entity->base.seqno);
367 cancel_delayed_work(&entity->dwork);
368 list_del_init(&entity->list);
369 fence_put(&entity->base);
370 }
371 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
372 }
373
374 static int amd_sched_main(void *param)
375 {
376 struct sched_param sparam = {.sched_priority = 1};
377 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
378 int r, count;
379
380 spin_lock_init(&sched->fence_list_lock);
381 INIT_LIST_HEAD(&sched->fence_list);
382 sched_setscheduler(current, SCHED_FIFO, &sparam);
383
384 while (!kthread_should_stop()) {
385 struct amd_sched_entity *entity;
386 struct amd_sched_fence *s_fence;
387 struct amd_sched_job *sched_job;
388 struct fence *fence;
389 unsigned long flags;
390
391 wait_event_interruptible(sched->wake_up_worker,
392 kthread_should_stop() ||
393 (sched_job = amd_sched_select_job(sched)));
394
395 if (!sched_job)
396 continue;
397
398 entity = sched_job->s_entity;
399 s_fence = sched_job->s_fence;
400
401 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
402 INIT_DELAYED_WORK(&s_fence->dwork, amd_sched_fence_work_func);
403 schedule_delayed_work(&s_fence->dwork, sched->timeout);
404 spin_lock_irqsave(&sched->fence_list_lock, flags);
405 list_add_tail(&s_fence->list, &sched->fence_list);
406 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
407 }
408
409 atomic_inc(&sched->hw_rq_count);
410 fence = sched->ops->run_job(sched_job);
411 if (fence) {
412 r = fence_add_callback(fence, &s_fence->cb,
413 amd_sched_process_job);
414 if (r == -ENOENT)
415 amd_sched_process_job(fence, &s_fence->cb);
416 else if (r)
417 DRM_ERROR("fence add callback failed (%d)\n", r);
418 fence_put(fence);
419 } else {
420 DRM_ERROR("Failed to run job!\n");
421 amd_sched_process_job(NULL, &s_fence->cb);
422 }
423
424 count = kfifo_out(&entity->job_queue, &sched_job,
425 sizeof(sched_job));
426 WARN_ON(count != sizeof(sched_job));
427 wake_up(&sched->job_scheduled);
428 }
429 return 0;
430 }
431
432 /**
433 * Init a gpu scheduler instance
434 *
435 * @sched The pointer to the scheduler
436 * @ops The backend operations for this scheduler.
437 * @hw_submissions Number of hw submissions to do.
438 * @name Name used for debugging
439 *
440 * Return 0 on success, otherwise error code.
441 */
442 int amd_sched_init(struct amd_gpu_scheduler *sched,
443 struct amd_sched_backend_ops *ops,
444 unsigned hw_submission, long timeout, const char *name)
445 {
446 sched->ops = ops;
447 sched->hw_submission_limit = hw_submission;
448 sched->name = name;
449 sched->timeout = timeout;
450 amd_sched_rq_init(&sched->sched_rq);
451 amd_sched_rq_init(&sched->kernel_rq);
452
453 init_waitqueue_head(&sched->wake_up_worker);
454 init_waitqueue_head(&sched->job_scheduled);
455 atomic_set(&sched->hw_rq_count, 0);
456 if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
457 sched_fence_slab = kmem_cache_create(
458 "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
459 SLAB_HWCACHE_ALIGN, NULL);
460 if (!sched_fence_slab)
461 return -ENOMEM;
462 }
463
464 /* Each scheduler will run on a seperate kernel thread */
465 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
466 if (IS_ERR(sched->thread)) {
467 DRM_ERROR("Failed to create scheduler for %s.\n", name);
468 return PTR_ERR(sched->thread);
469 }
470
471 return 0;
472 }
473
474 /**
475 * Destroy a gpu scheduler
476 *
477 * @sched The pointer to the scheduler
478 */
479 void amd_sched_fini(struct amd_gpu_scheduler *sched)
480 {
481 if (sched->thread)
482 kthread_stop(sched->thread);
483 if (atomic_dec_and_test(&sched_fence_slab_ref))
484 kmem_cache_destroy(sched_fence_slab);
485 }
This page took 0.11723 seconds and 5 git commands to generate.