2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/kthread.h>
29 static void intel_breadcrumbs_fake_irq(unsigned long data
)
31 struct intel_engine_cs
*engine
= (struct intel_engine_cs
*)data
;
34 * The timer persists in case we cannot enable interrupts,
35 * or if we have previously seen seqno/interrupt incoherency
36 * ("missed interrupt" syndrome). Here the worker will wake up
37 * every jiffie in order to kick the oldest waiter to do the
38 * coherent seqno check.
41 if (intel_engine_wakeup(engine
))
42 mod_timer(&engine
->breadcrumbs
.fake_irq
, jiffies
+ 1);
46 static void irq_enable(struct intel_engine_cs
*engine
)
48 /* Enabling the IRQ may miss the generation of the interrupt, but
49 * we still need to force the barrier before reading the seqno,
52 engine
->breadcrumbs
.irq_posted
= true;
54 spin_lock_irq(&engine
->i915
->irq_lock
);
55 engine
->irq_enable(engine
);
56 spin_unlock_irq(&engine
->i915
->irq_lock
);
59 static void irq_disable(struct intel_engine_cs
*engine
)
61 spin_lock_irq(&engine
->i915
->irq_lock
);
62 engine
->irq_disable(engine
);
63 spin_unlock_irq(&engine
->i915
->irq_lock
);
65 engine
->breadcrumbs
.irq_posted
= false;
68 static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs
*b
)
70 struct intel_engine_cs
*engine
=
71 container_of(b
, struct intel_engine_cs
, breadcrumbs
);
72 struct drm_i915_private
*i915
= engine
->i915
;
74 assert_spin_locked(&b
->lock
);
78 /* Since we are waiting on a request, the GPU should be busy
79 * and should have its own rpm reference. For completeness,
80 * record an rpm reference for ourselves to cover the
81 * interrupt we unmask.
83 intel_runtime_pm_get_noresume(i915
);
84 b
->rpm_wakelock
= true;
86 /* No interrupts? Kick the waiter every jiffie! */
87 if (intel_irqs_enabled(i915
)) {
88 if (!test_bit(engine
->id
, &i915
->gpu_error
.test_irq_rings
))
90 b
->irq_enabled
= true;
93 if (!b
->irq_enabled
||
94 test_bit(engine
->id
, &i915
->gpu_error
.missed_irq_rings
))
95 mod_timer(&b
->fake_irq
, jiffies
+ 1);
97 /* Ensure that even if the GPU hangs, we get woken up.
99 * However, note that if no one is waiting, we never notice
100 * a gpu hang. Eventually, we will have to wait for a resource
101 * held by the GPU and so trigger a hangcheck. In the most
102 * pathological case, this will be upon memory starvation!
104 i915_queue_hangcheck(i915
);
107 static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs
*b
)
109 struct intel_engine_cs
*engine
=
110 container_of(b
, struct intel_engine_cs
, breadcrumbs
);
112 assert_spin_locked(&b
->lock
);
113 if (!b
->rpm_wakelock
)
116 if (b
->irq_enabled
) {
118 b
->irq_enabled
= false;
121 intel_runtime_pm_put(engine
->i915
);
122 b
->rpm_wakelock
= false;
125 static inline struct intel_wait
*to_wait(struct rb_node
*node
)
127 return container_of(node
, struct intel_wait
, node
);
130 static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs
*b
,
131 struct intel_wait
*wait
)
133 assert_spin_locked(&b
->lock
);
135 /* This request is completed, so remove it from the tree, mark it as
136 * complete, and *then* wake up the associated task.
138 rb_erase(&wait
->node
, &b
->waiters
);
139 RB_CLEAR_NODE(&wait
->node
);
141 wake_up_process(wait
->tsk
); /* implicit smp_wmb() */
144 static bool __intel_engine_add_wait(struct intel_engine_cs
*engine
,
145 struct intel_wait
*wait
)
147 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
148 struct rb_node
**p
, *parent
, *completed
;
152 /* Insert the request into the retirement ordered list
153 * of waiters by walking the rbtree. If we are the oldest
154 * seqno in the tree (the first to be retired), then
155 * set ourselves as the bottom-half.
157 * As we descend the tree, prune completed branches since we hold the
158 * spinlock we know that the first_waiter must be delayed and can
159 * reduce some of the sequential wake up latency if we take action
160 * ourselves and wake up the completed tasks in parallel. Also, by
161 * removing stale elements in the tree, we may be able to reduce the
162 * ping-pong between the old bottom-half and ourselves as first-waiter.
167 seqno
= intel_engine_get_seqno(engine
);
169 /* If the request completed before we managed to grab the spinlock,
170 * return now before adding ourselves to the rbtree. We let the
171 * current bottom-half handle any pending wakeups and instead
172 * try and get out of the way quickly.
174 if (i915_seqno_passed(seqno
, wait
->seqno
)) {
175 RB_CLEAR_NODE(&wait
->node
);
179 p
= &b
->waiters
.rb_node
;
182 if (wait
->seqno
== to_wait(parent
)->seqno
) {
183 /* We have multiple waiters on the same seqno, select
184 * the highest priority task (that with the smallest
185 * task->prio) to serve as the bottom-half for this
188 if (wait
->tsk
->prio
> to_wait(parent
)->tsk
->prio
) {
189 p
= &parent
->rb_right
;
192 p
= &parent
->rb_left
;
194 } else if (i915_seqno_passed(wait
->seqno
,
195 to_wait(parent
)->seqno
)) {
196 p
= &parent
->rb_right
;
197 if (i915_seqno_passed(seqno
, to_wait(parent
)->seqno
))
202 p
= &parent
->rb_left
;
205 rb_link_node(&wait
->node
, parent
, p
);
206 rb_insert_color(&wait
->node
, &b
->waiters
);
207 GEM_BUG_ON(!first
&& !b
->irq_seqno_bh
);
210 struct rb_node
*next
= rb_next(completed
);
212 GEM_BUG_ON(!next
&& !first
);
213 if (next
&& next
!= &wait
->node
) {
215 b
->first_wait
= to_wait(next
);
216 smp_store_mb(b
->irq_seqno_bh
, b
->first_wait
->tsk
);
217 /* As there is a delay between reading the current
218 * seqno, processing the completed tasks and selecting
219 * the next waiter, we may have missed the interrupt
220 * and so need for the next bottom-half to wakeup.
222 * Also as we enable the IRQ, we may miss the
223 * interrupt for that seqno, so we have to wake up
224 * the next bottom-half in order to do a coherent check
225 * in case the seqno passed.
227 __intel_breadcrumbs_enable_irq(b
);
228 if (READ_ONCE(b
->irq_posted
))
229 wake_up_process(to_wait(next
)->tsk
);
233 struct intel_wait
*crumb
= to_wait(completed
);
234 completed
= rb_prev(completed
);
235 __intel_breadcrumbs_finish(b
, crumb
);
240 GEM_BUG_ON(rb_first(&b
->waiters
) != &wait
->node
);
241 b
->first_wait
= wait
;
242 smp_store_mb(b
->irq_seqno_bh
, wait
->tsk
);
243 /* After assigning ourselves as the new bottom-half, we must
244 * perform a cursory check to prevent a missed interrupt.
245 * Either we miss the interrupt whilst programming the hardware,
246 * or if there was a previous waiter (for a later seqno) they
247 * may be woken instead of us (due to the inherent race
248 * in the unlocked read of b->irq_seqno_bh in the irq handler)
249 * and so we miss the wake up.
251 __intel_breadcrumbs_enable_irq(b
);
253 GEM_BUG_ON(!b
->irq_seqno_bh
);
254 GEM_BUG_ON(!b
->first_wait
);
255 GEM_BUG_ON(rb_first(&b
->waiters
) != &b
->first_wait
->node
);
260 bool intel_engine_add_wait(struct intel_engine_cs
*engine
,
261 struct intel_wait
*wait
)
263 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
267 first
= __intel_engine_add_wait(engine
, wait
);
268 spin_unlock(&b
->lock
);
273 void intel_engine_enable_fake_irq(struct intel_engine_cs
*engine
)
275 mod_timer(&engine
->breadcrumbs
.fake_irq
, jiffies
+ 1);
278 static inline bool chain_wakeup(struct rb_node
*rb
, int priority
)
280 return rb
&& to_wait(rb
)->tsk
->prio
<= priority
;
283 static inline int wakeup_priority(struct intel_breadcrumbs
*b
,
284 struct task_struct
*tsk
)
286 if (tsk
== b
->signaler
)
292 void intel_engine_remove_wait(struct intel_engine_cs
*engine
,
293 struct intel_wait
*wait
)
295 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
297 /* Quick check to see if this waiter was already decoupled from
298 * the tree by the bottom-half to avoid contention on the spinlock
301 if (RB_EMPTY_NODE(&wait
->node
))
306 if (RB_EMPTY_NODE(&wait
->node
))
309 if (b
->first_wait
== wait
) {
310 const int priority
= wakeup_priority(b
, wait
->tsk
);
311 struct rb_node
*next
;
313 GEM_BUG_ON(b
->irq_seqno_bh
!= wait
->tsk
);
315 /* We are the current bottom-half. Find the next candidate,
316 * the first waiter in the queue on the remaining oldest
317 * request. As multiple seqnos may complete in the time it
318 * takes us to wake up and find the next waiter, we have to
319 * wake up that waiter for it to perform its own coherent
322 next
= rb_next(&wait
->node
);
323 if (chain_wakeup(next
, priority
)) {
324 /* If the next waiter is already complete,
325 * wake it up and continue onto the next waiter. So
326 * if have a small herd, they will wake up in parallel
327 * rather than sequentially, which should reduce
328 * the overall latency in waking all the completed
331 * However, waking up a chain adds extra latency to
332 * the first_waiter. This is undesirable if that
333 * waiter is a high priority task.
335 u32 seqno
= intel_engine_get_seqno(engine
);
337 while (i915_seqno_passed(seqno
, to_wait(next
)->seqno
)) {
338 struct rb_node
*n
= rb_next(next
);
340 __intel_breadcrumbs_finish(b
, to_wait(next
));
342 if (!chain_wakeup(next
, priority
))
348 /* In our haste, we may have completed the first waiter
349 * before we enabled the interrupt. Do so now as we
350 * have a second waiter for a future seqno. Afterwards,
351 * we have to wake up that waiter in case we missed
352 * the interrupt, or if we have to handle an
353 * exception rather than a seqno completion.
355 b
->first_wait
= to_wait(next
);
356 smp_store_mb(b
->irq_seqno_bh
, b
->first_wait
->tsk
);
357 if (b
->first_wait
->seqno
!= wait
->seqno
)
358 __intel_breadcrumbs_enable_irq(b
);
359 wake_up_process(b
->irq_seqno_bh
);
361 b
->first_wait
= NULL
;
362 WRITE_ONCE(b
->irq_seqno_bh
, NULL
);
363 __intel_breadcrumbs_disable_irq(b
);
366 GEM_BUG_ON(rb_first(&b
->waiters
) == &wait
->node
);
369 GEM_BUG_ON(RB_EMPTY_NODE(&wait
->node
));
370 rb_erase(&wait
->node
, &b
->waiters
);
373 GEM_BUG_ON(b
->first_wait
== wait
);
374 GEM_BUG_ON(rb_first(&b
->waiters
) !=
375 (b
->first_wait
? &b
->first_wait
->node
: NULL
));
376 GEM_BUG_ON(!b
->irq_seqno_bh
^ RB_EMPTY_ROOT(&b
->waiters
));
377 spin_unlock(&b
->lock
);
380 static bool signal_complete(struct drm_i915_gem_request
*request
)
385 /* If another process served as the bottom-half it may have already
386 * signalled that this wait is already completed.
388 if (intel_wait_complete(&request
->signaling
.wait
))
391 /* Carefully check if the request is complete, giving time for the
392 * seqno to be visible or if the GPU hung.
394 if (__i915_request_irq_complete(request
))
400 static struct drm_i915_gem_request
*to_signaler(struct rb_node
*rb
)
402 return container_of(rb
, struct drm_i915_gem_request
, signaling
.node
);
405 static void signaler_set_rtpriority(void)
407 struct sched_param param
= { .sched_priority
= 1 };
409 sched_setscheduler_nocheck(current
, SCHED_FIFO
, ¶m
);
412 static int intel_breadcrumbs_signaler(void *arg
)
414 struct intel_engine_cs
*engine
= arg
;
415 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
416 struct drm_i915_gem_request
*request
;
418 /* Install ourselves with high priority to reduce signalling latency */
419 signaler_set_rtpriority();
422 set_current_state(TASK_INTERRUPTIBLE
);
424 /* We are either woken up by the interrupt bottom-half,
425 * or by a client adding a new signaller. In both cases,
426 * the GPU seqno may have advanced beyond our oldest signal.
427 * If it has, propagate the signal, remove the waiter and
428 * check again with the next oldest signal. Otherwise we
429 * need to wait for a new interrupt from the GPU or for
432 request
= READ_ONCE(b
->first_signal
);
433 if (signal_complete(request
)) {
434 /* Wake up all other completed waiters and select the
435 * next bottom-half for the next user interrupt.
437 intel_engine_remove_wait(engine
,
438 &request
->signaling
.wait
);
440 /* Find the next oldest signal. Note that as we have
441 * not been holding the lock, another client may
442 * have installed an even older signal than the one
443 * we just completed - so double check we are still
444 * the oldest before picking the next one.
447 if (request
== b
->first_signal
) {
449 rb_next(&request
->signaling
.node
);
450 b
->first_signal
= rb
? to_signaler(rb
) : NULL
;
452 rb_erase(&request
->signaling
.node
, &b
->signals
);
453 spin_unlock(&b
->lock
);
455 i915_gem_request_unreference(request
);
457 if (kthread_should_stop())
463 __set_current_state(TASK_RUNNING
);
468 void intel_engine_enable_signaling(struct drm_i915_gem_request
*request
)
470 struct intel_engine_cs
*engine
= request
->engine
;
471 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
472 struct rb_node
*parent
, **p
;
475 if (unlikely(READ_ONCE(request
->signaling
.wait
.tsk
)))
479 if (unlikely(request
->signaling
.wait
.tsk
)) {
484 request
->signaling
.wait
.tsk
= b
->signaler
;
485 request
->signaling
.wait
.seqno
= request
->seqno
;
486 i915_gem_request_reference(request
);
488 /* First add ourselves into the list of waiters, but register our
489 * bottom-half as the signaller thread. As per usual, only the oldest
490 * waiter (not just signaller) is tasked as the bottom-half waking
491 * up all completed waiters after the user interrupt.
493 * If we are the oldest waiter, enable the irq (after which we
494 * must double check that the seqno did not complete).
496 wakeup
= __intel_engine_add_wait(engine
, &request
->signaling
.wait
);
498 /* Now insert ourselves into the retirement ordered list of signals
499 * on this engine. We track the oldest seqno as that will be the
500 * first signal to complete.
504 p
= &b
->signals
.rb_node
;
507 if (i915_seqno_passed(request
->seqno
,
508 to_signaler(parent
)->seqno
)) {
509 p
= &parent
->rb_right
;
512 p
= &parent
->rb_left
;
515 rb_link_node(&request
->signaling
.node
, parent
, p
);
516 rb_insert_color(&request
->signaling
.node
, &b
->signals
);
518 smp_store_mb(b
->first_signal
, request
);
521 spin_unlock(&b
->lock
);
524 wake_up_process(b
->signaler
);
527 int intel_engine_init_breadcrumbs(struct intel_engine_cs
*engine
)
529 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
530 struct task_struct
*tsk
;
532 spin_lock_init(&b
->lock
);
533 setup_timer(&b
->fake_irq
,
534 intel_breadcrumbs_fake_irq
,
535 (unsigned long)engine
);
537 /* Spawn a thread to provide a common bottom-half for all signals.
538 * As this is an asynchronous interface we cannot steal the current
539 * task for handling the bottom-half to the user interrupt, therefore
540 * we create a thread to do the coherent seqno dance after the
541 * interrupt and then signal the waitqueue (via the dma-buf/fence).
543 tsk
= kthread_run(intel_breadcrumbs_signaler
, engine
,
544 "i915/signal:%d", engine
->id
);
553 void intel_engine_fini_breadcrumbs(struct intel_engine_cs
*engine
)
555 struct intel_breadcrumbs
*b
= &engine
->breadcrumbs
;
557 if (!IS_ERR_OR_NULL(b
->signaler
))
558 kthread_stop(b
->signaler
);
560 del_timer_sync(&b
->fake_irq
);
563 unsigned int intel_kick_waiters(struct drm_i915_private
*i915
)
565 struct intel_engine_cs
*engine
;
566 unsigned int mask
= 0;
568 /* To avoid the task_struct disappearing beneath us as we wake up
569 * the process, we must first inspect the task_struct->state under the
570 * RCU lock, i.e. as we call wake_up_process() we must be holding the
574 for_each_engine(engine
, i915
)
575 if (unlikely(intel_engine_wakeup(engine
)))
576 mask
|= intel_engine_flag(engine
);
582 unsigned int intel_kick_signalers(struct drm_i915_private
*i915
)
584 struct intel_engine_cs
*engine
;
585 unsigned int mask
= 0;
587 for_each_engine(engine
, i915
) {
588 if (unlikely(READ_ONCE(engine
->breadcrumbs
.first_signal
))) {
589 wake_up_process(engine
->breadcrumbs
.signaler
);
590 mask
|= intel_engine_flag(engine
);