drm/i915: Use atomics to manipulate obj->frontbuffer_bits
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_breadcrumbs.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include <linux/kthread.h>
26
27 #include "i915_drv.h"
28
29 static void intel_breadcrumbs_fake_irq(unsigned long data)
30 {
31 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
32
33 /*
34 * The timer persists in case we cannot enable interrupts,
35 * or if we have previously seen seqno/interrupt incoherency
36 * ("missed interrupt" syndrome). Here the worker will wake up
37 * every jiffie in order to kick the oldest waiter to do the
38 * coherent seqno check.
39 */
40 rcu_read_lock();
41 if (intel_engine_wakeup(engine))
42 mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
43 rcu_read_unlock();
44 }
45
46 static void irq_enable(struct intel_engine_cs *engine)
47 {
48 /* Enabling the IRQ may miss the generation of the interrupt, but
49 * we still need to force the barrier before reading the seqno,
50 * just in case.
51 */
52 engine->breadcrumbs.irq_posted = true;
53
54 /* Make sure the current hangcheck doesn't falsely accuse a just
55 * started irq handler from missing an interrupt (because the
56 * interrupt count still matches the stale value from when
57 * the irq handler was disabled, many hangchecks ago).
58 */
59 engine->breadcrumbs.irq_wakeups++;
60
61 spin_lock_irq(&engine->i915->irq_lock);
62 engine->irq_enable(engine);
63 spin_unlock_irq(&engine->i915->irq_lock);
64 }
65
66 static void irq_disable(struct intel_engine_cs *engine)
67 {
68 spin_lock_irq(&engine->i915->irq_lock);
69 engine->irq_disable(engine);
70 spin_unlock_irq(&engine->i915->irq_lock);
71
72 engine->breadcrumbs.irq_posted = false;
73 }
74
75 static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
76 {
77 struct intel_engine_cs *engine =
78 container_of(b, struct intel_engine_cs, breadcrumbs);
79 struct drm_i915_private *i915 = engine->i915;
80
81 assert_spin_locked(&b->lock);
82 if (b->rpm_wakelock)
83 return;
84
85 /* Since we are waiting on a request, the GPU should be busy
86 * and should have its own rpm reference. For completeness,
87 * record an rpm reference for ourselves to cover the
88 * interrupt we unmask.
89 */
90 intel_runtime_pm_get_noresume(i915);
91 b->rpm_wakelock = true;
92
93 /* No interrupts? Kick the waiter every jiffie! */
94 if (intel_irqs_enabled(i915)) {
95 if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
96 irq_enable(engine);
97 b->irq_enabled = true;
98 }
99
100 if (!b->irq_enabled ||
101 test_bit(engine->id, &i915->gpu_error.missed_irq_rings))
102 mod_timer(&b->fake_irq, jiffies + 1);
103
104 /* Ensure that even if the GPU hangs, we get woken up.
105 *
106 * However, note that if no one is waiting, we never notice
107 * a gpu hang. Eventually, we will have to wait for a resource
108 * held by the GPU and so trigger a hangcheck. In the most
109 * pathological case, this will be upon memory starvation!
110 */
111 i915_queue_hangcheck(i915);
112 }
113
114 static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b)
115 {
116 struct intel_engine_cs *engine =
117 container_of(b, struct intel_engine_cs, breadcrumbs);
118
119 assert_spin_locked(&b->lock);
120 if (!b->rpm_wakelock)
121 return;
122
123 if (b->irq_enabled) {
124 irq_disable(engine);
125 b->irq_enabled = false;
126 }
127
128 intel_runtime_pm_put(engine->i915);
129 b->rpm_wakelock = false;
130 }
131
132 static inline struct intel_wait *to_wait(struct rb_node *node)
133 {
134 return container_of(node, struct intel_wait, node);
135 }
136
137 static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
138 struct intel_wait *wait)
139 {
140 assert_spin_locked(&b->lock);
141
142 /* This request is completed, so remove it from the tree, mark it as
143 * complete, and *then* wake up the associated task.
144 */
145 rb_erase(&wait->node, &b->waiters);
146 RB_CLEAR_NODE(&wait->node);
147
148 wake_up_process(wait->tsk); /* implicit smp_wmb() */
149 }
150
151 static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
152 struct intel_wait *wait)
153 {
154 struct intel_breadcrumbs *b = &engine->breadcrumbs;
155 struct rb_node **p, *parent, *completed;
156 bool first;
157 u32 seqno;
158
159 /* Insert the request into the retirement ordered list
160 * of waiters by walking the rbtree. If we are the oldest
161 * seqno in the tree (the first to be retired), then
162 * set ourselves as the bottom-half.
163 *
164 * As we descend the tree, prune completed branches since we hold the
165 * spinlock we know that the first_waiter must be delayed and can
166 * reduce some of the sequential wake up latency if we take action
167 * ourselves and wake up the completed tasks in parallel. Also, by
168 * removing stale elements in the tree, we may be able to reduce the
169 * ping-pong between the old bottom-half and ourselves as first-waiter.
170 */
171 first = true;
172 parent = NULL;
173 completed = NULL;
174 seqno = intel_engine_get_seqno(engine);
175
176 /* If the request completed before we managed to grab the spinlock,
177 * return now before adding ourselves to the rbtree. We let the
178 * current bottom-half handle any pending wakeups and instead
179 * try and get out of the way quickly.
180 */
181 if (i915_seqno_passed(seqno, wait->seqno)) {
182 RB_CLEAR_NODE(&wait->node);
183 return first;
184 }
185
186 p = &b->waiters.rb_node;
187 while (*p) {
188 parent = *p;
189 if (wait->seqno == to_wait(parent)->seqno) {
190 /* We have multiple waiters on the same seqno, select
191 * the highest priority task (that with the smallest
192 * task->prio) to serve as the bottom-half for this
193 * group.
194 */
195 if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
196 p = &parent->rb_right;
197 first = false;
198 } else {
199 p = &parent->rb_left;
200 }
201 } else if (i915_seqno_passed(wait->seqno,
202 to_wait(parent)->seqno)) {
203 p = &parent->rb_right;
204 if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
205 completed = parent;
206 else
207 first = false;
208 } else {
209 p = &parent->rb_left;
210 }
211 }
212 rb_link_node(&wait->node, parent, p);
213 rb_insert_color(&wait->node, &b->waiters);
214 GEM_BUG_ON(!first && !b->irq_seqno_bh);
215
216 if (completed) {
217 struct rb_node *next = rb_next(completed);
218
219 GEM_BUG_ON(!next && !first);
220 if (next && next != &wait->node) {
221 GEM_BUG_ON(first);
222 b->first_wait = to_wait(next);
223 smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk);
224 /* As there is a delay between reading the current
225 * seqno, processing the completed tasks and selecting
226 * the next waiter, we may have missed the interrupt
227 * and so need for the next bottom-half to wakeup.
228 *
229 * Also as we enable the IRQ, we may miss the
230 * interrupt for that seqno, so we have to wake up
231 * the next bottom-half in order to do a coherent check
232 * in case the seqno passed.
233 */
234 __intel_breadcrumbs_enable_irq(b);
235 if (READ_ONCE(b->irq_posted))
236 wake_up_process(to_wait(next)->tsk);
237 }
238
239 do {
240 struct intel_wait *crumb = to_wait(completed);
241 completed = rb_prev(completed);
242 __intel_breadcrumbs_finish(b, crumb);
243 } while (completed);
244 }
245
246 if (first) {
247 GEM_BUG_ON(rb_first(&b->waiters) != &wait->node);
248 b->first_wait = wait;
249 smp_store_mb(b->irq_seqno_bh, wait->tsk);
250 /* After assigning ourselves as the new bottom-half, we must
251 * perform a cursory check to prevent a missed interrupt.
252 * Either we miss the interrupt whilst programming the hardware,
253 * or if there was a previous waiter (for a later seqno) they
254 * may be woken instead of us (due to the inherent race
255 * in the unlocked read of b->irq_seqno_bh in the irq handler)
256 * and so we miss the wake up.
257 */
258 __intel_breadcrumbs_enable_irq(b);
259 }
260 GEM_BUG_ON(!b->irq_seqno_bh);
261 GEM_BUG_ON(!b->first_wait);
262 GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node);
263
264 return first;
265 }
266
267 bool intel_engine_add_wait(struct intel_engine_cs *engine,
268 struct intel_wait *wait)
269 {
270 struct intel_breadcrumbs *b = &engine->breadcrumbs;
271 bool first;
272
273 spin_lock(&b->lock);
274 first = __intel_engine_add_wait(engine, wait);
275 spin_unlock(&b->lock);
276
277 return first;
278 }
279
280 void intel_engine_enable_fake_irq(struct intel_engine_cs *engine)
281 {
282 mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
283 }
284
285 static inline bool chain_wakeup(struct rb_node *rb, int priority)
286 {
287 return rb && to_wait(rb)->tsk->prio <= priority;
288 }
289
290 static inline int wakeup_priority(struct intel_breadcrumbs *b,
291 struct task_struct *tsk)
292 {
293 if (tsk == b->signaler)
294 return INT_MIN;
295 else
296 return tsk->prio;
297 }
298
299 void intel_engine_remove_wait(struct intel_engine_cs *engine,
300 struct intel_wait *wait)
301 {
302 struct intel_breadcrumbs *b = &engine->breadcrumbs;
303
304 /* Quick check to see if this waiter was already decoupled from
305 * the tree by the bottom-half to avoid contention on the spinlock
306 * by the herd.
307 */
308 if (RB_EMPTY_NODE(&wait->node))
309 return;
310
311 spin_lock(&b->lock);
312
313 if (RB_EMPTY_NODE(&wait->node))
314 goto out_unlock;
315
316 if (b->first_wait == wait) {
317 const int priority = wakeup_priority(b, wait->tsk);
318 struct rb_node *next;
319
320 GEM_BUG_ON(b->irq_seqno_bh != wait->tsk);
321
322 /* We are the current bottom-half. Find the next candidate,
323 * the first waiter in the queue on the remaining oldest
324 * request. As multiple seqnos may complete in the time it
325 * takes us to wake up and find the next waiter, we have to
326 * wake up that waiter for it to perform its own coherent
327 * completion check.
328 */
329 next = rb_next(&wait->node);
330 if (chain_wakeup(next, priority)) {
331 /* If the next waiter is already complete,
332 * wake it up and continue onto the next waiter. So
333 * if have a small herd, they will wake up in parallel
334 * rather than sequentially, which should reduce
335 * the overall latency in waking all the completed
336 * clients.
337 *
338 * However, waking up a chain adds extra latency to
339 * the first_waiter. This is undesirable if that
340 * waiter is a high priority task.
341 */
342 u32 seqno = intel_engine_get_seqno(engine);
343
344 while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
345 struct rb_node *n = rb_next(next);
346
347 __intel_breadcrumbs_finish(b, to_wait(next));
348 next = n;
349 if (!chain_wakeup(next, priority))
350 break;
351 }
352 }
353
354 if (next) {
355 /* In our haste, we may have completed the first waiter
356 * before we enabled the interrupt. Do so now as we
357 * have a second waiter for a future seqno. Afterwards,
358 * we have to wake up that waiter in case we missed
359 * the interrupt, or if we have to handle an
360 * exception rather than a seqno completion.
361 */
362 b->first_wait = to_wait(next);
363 smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk);
364 if (b->first_wait->seqno != wait->seqno)
365 __intel_breadcrumbs_enable_irq(b);
366 wake_up_process(b->irq_seqno_bh);
367 } else {
368 b->first_wait = NULL;
369 WRITE_ONCE(b->irq_seqno_bh, NULL);
370 __intel_breadcrumbs_disable_irq(b);
371 }
372 } else {
373 GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
374 }
375
376 GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
377 rb_erase(&wait->node, &b->waiters);
378
379 out_unlock:
380 GEM_BUG_ON(b->first_wait == wait);
381 GEM_BUG_ON(rb_first(&b->waiters) !=
382 (b->first_wait ? &b->first_wait->node : NULL));
383 GEM_BUG_ON(!b->irq_seqno_bh ^ RB_EMPTY_ROOT(&b->waiters));
384 spin_unlock(&b->lock);
385 }
386
387 static bool signal_complete(struct drm_i915_gem_request *request)
388 {
389 if (!request)
390 return false;
391
392 /* If another process served as the bottom-half it may have already
393 * signalled that this wait is already completed.
394 */
395 if (intel_wait_complete(&request->signaling.wait))
396 return true;
397
398 /* Carefully check if the request is complete, giving time for the
399 * seqno to be visible or if the GPU hung.
400 */
401 if (__i915_request_irq_complete(request))
402 return true;
403
404 return false;
405 }
406
407 static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
408 {
409 return container_of(rb, struct drm_i915_gem_request, signaling.node);
410 }
411
412 static void signaler_set_rtpriority(void)
413 {
414 struct sched_param param = { .sched_priority = 1 };
415
416 sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
417 }
418
419 static int intel_breadcrumbs_signaler(void *arg)
420 {
421 struct intel_engine_cs *engine = arg;
422 struct intel_breadcrumbs *b = &engine->breadcrumbs;
423 struct drm_i915_gem_request *request;
424
425 /* Install ourselves with high priority to reduce signalling latency */
426 signaler_set_rtpriority();
427
428 do {
429 set_current_state(TASK_INTERRUPTIBLE);
430
431 /* We are either woken up by the interrupt bottom-half,
432 * or by a client adding a new signaller. In both cases,
433 * the GPU seqno may have advanced beyond our oldest signal.
434 * If it has, propagate the signal, remove the waiter and
435 * check again with the next oldest signal. Otherwise we
436 * need to wait for a new interrupt from the GPU or for
437 * a new client.
438 */
439 request = READ_ONCE(b->first_signal);
440 if (signal_complete(request)) {
441 /* Wake up all other completed waiters and select the
442 * next bottom-half for the next user interrupt.
443 */
444 intel_engine_remove_wait(engine,
445 &request->signaling.wait);
446 fence_signal(&request->fence);
447
448 /* Find the next oldest signal. Note that as we have
449 * not been holding the lock, another client may
450 * have installed an even older signal than the one
451 * we just completed - so double check we are still
452 * the oldest before picking the next one.
453 */
454 spin_lock(&b->lock);
455 if (request == b->first_signal) {
456 struct rb_node *rb =
457 rb_next(&request->signaling.node);
458 b->first_signal = rb ? to_signaler(rb) : NULL;
459 }
460 rb_erase(&request->signaling.node, &b->signals);
461 spin_unlock(&b->lock);
462
463 i915_gem_request_put(request);
464 } else {
465 if (kthread_should_stop())
466 break;
467
468 schedule();
469 }
470 } while (1);
471 __set_current_state(TASK_RUNNING);
472
473 return 0;
474 }
475
476 void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
477 {
478 struct intel_engine_cs *engine = request->engine;
479 struct intel_breadcrumbs *b = &engine->breadcrumbs;
480 struct rb_node *parent, **p;
481 bool first, wakeup;
482
483 /* locked by fence_enable_sw_signaling() */
484 assert_spin_locked(&request->lock);
485
486 request->signaling.wait.tsk = b->signaler;
487 request->signaling.wait.seqno = request->fence.seqno;
488 i915_gem_request_get(request);
489
490 spin_lock(&b->lock);
491
492 /* First add ourselves into the list of waiters, but register our
493 * bottom-half as the signaller thread. As per usual, only the oldest
494 * waiter (not just signaller) is tasked as the bottom-half waking
495 * up all completed waiters after the user interrupt.
496 *
497 * If we are the oldest waiter, enable the irq (after which we
498 * must double check that the seqno did not complete).
499 */
500 wakeup = __intel_engine_add_wait(engine, &request->signaling.wait);
501
502 /* Now insert ourselves into the retirement ordered list of signals
503 * on this engine. We track the oldest seqno as that will be the
504 * first signal to complete.
505 */
506 parent = NULL;
507 first = true;
508 p = &b->signals.rb_node;
509 while (*p) {
510 parent = *p;
511 if (i915_seqno_passed(request->fence.seqno,
512 to_signaler(parent)->fence.seqno)) {
513 p = &parent->rb_right;
514 first = false;
515 } else {
516 p = &parent->rb_left;
517 }
518 }
519 rb_link_node(&request->signaling.node, parent, p);
520 rb_insert_color(&request->signaling.node, &b->signals);
521 if (first)
522 smp_store_mb(b->first_signal, request);
523
524 spin_unlock(&b->lock);
525
526 if (wakeup)
527 wake_up_process(b->signaler);
528 }
529
530 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
531 {
532 struct intel_breadcrumbs *b = &engine->breadcrumbs;
533 struct task_struct *tsk;
534
535 spin_lock_init(&b->lock);
536 setup_timer(&b->fake_irq,
537 intel_breadcrumbs_fake_irq,
538 (unsigned long)engine);
539
540 /* Spawn a thread to provide a common bottom-half for all signals.
541 * As this is an asynchronous interface we cannot steal the current
542 * task for handling the bottom-half to the user interrupt, therefore
543 * we create a thread to do the coherent seqno dance after the
544 * interrupt and then signal the waitqueue (via the dma-buf/fence).
545 */
546 tsk = kthread_run(intel_breadcrumbs_signaler, engine,
547 "i915/signal:%d", engine->id);
548 if (IS_ERR(tsk))
549 return PTR_ERR(tsk);
550
551 b->signaler = tsk;
552
553 return 0;
554 }
555
556 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
557 {
558 struct intel_breadcrumbs *b = &engine->breadcrumbs;
559
560 if (!IS_ERR_OR_NULL(b->signaler))
561 kthread_stop(b->signaler);
562
563 del_timer_sync(&b->fake_irq);
564 }
565
566 unsigned int intel_kick_waiters(struct drm_i915_private *i915)
567 {
568 struct intel_engine_cs *engine;
569 unsigned int mask = 0;
570
571 /* To avoid the task_struct disappearing beneath us as we wake up
572 * the process, we must first inspect the task_struct->state under the
573 * RCU lock, i.e. as we call wake_up_process() we must be holding the
574 * rcu_read_lock().
575 */
576 rcu_read_lock();
577 for_each_engine(engine, i915)
578 if (unlikely(intel_engine_wakeup(engine)))
579 mask |= intel_engine_flag(engine);
580 rcu_read_unlock();
581
582 return mask;
583 }
584
585 unsigned int intel_kick_signalers(struct drm_i915_private *i915)
586 {
587 struct intel_engine_cs *engine;
588 unsigned int mask = 0;
589
590 for_each_engine(engine, i915) {
591 if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) {
592 wake_up_process(engine->breadcrumbs.signaler);
593 mask |= intel_engine_flag(engine);
594 }
595 }
596
597 return mask;
598 }
This page took 0.045088 seconds and 5 git commands to generate.