2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/prefetch.h>
29 static const char *i915_fence_get_driver_name(struct fence
*fence
)
34 static const char *i915_fence_get_timeline_name(struct fence
*fence
)
36 /* Timelines are bound by eviction to a VM. However, since
37 * we only have a global seqno at the moment, we only have
38 * a single timeline. Note that each timeline will have
39 * multiple execution contexts (fence contexts) as we allow
40 * engines within a single timeline to execute in parallel.
45 static bool i915_fence_signaled(struct fence
*fence
)
47 return i915_gem_request_completed(to_request(fence
));
50 static bool i915_fence_enable_signaling(struct fence
*fence
)
52 if (i915_fence_signaled(fence
))
55 intel_engine_enable_signaling(to_request(fence
));
59 static signed long i915_fence_wait(struct fence
*fence
,
61 signed long timeout_jiffies
)
63 s64 timeout_ns
, *timeout
;
66 if (timeout_jiffies
!= MAX_SCHEDULE_TIMEOUT
) {
67 timeout_ns
= jiffies_to_nsecs(timeout_jiffies
);
68 timeout
= &timeout_ns
;
73 ret
= i915_wait_request(to_request(fence
),
74 interruptible
, timeout
,
82 if (timeout_jiffies
!= MAX_SCHEDULE_TIMEOUT
)
83 timeout_jiffies
= nsecs_to_jiffies(timeout_ns
);
85 return timeout_jiffies
;
88 static void i915_fence_value_str(struct fence
*fence
, char *str
, int size
)
90 snprintf(str
, size
, "%u", fence
->seqno
);
93 static void i915_fence_timeline_value_str(struct fence
*fence
, char *str
,
96 snprintf(str
, size
, "%u",
97 intel_engine_get_seqno(to_request(fence
)->engine
));
100 static void i915_fence_release(struct fence
*fence
)
102 struct drm_i915_gem_request
*req
= to_request(fence
);
104 kmem_cache_free(req
->i915
->requests
, req
);
107 const struct fence_ops i915_fence_ops
= {
108 .get_driver_name
= i915_fence_get_driver_name
,
109 .get_timeline_name
= i915_fence_get_timeline_name
,
110 .enable_signaling
= i915_fence_enable_signaling
,
111 .signaled
= i915_fence_signaled
,
112 .wait
= i915_fence_wait
,
113 .release
= i915_fence_release
,
114 .fence_value_str
= i915_fence_value_str
,
115 .timeline_value_str
= i915_fence_timeline_value_str
,
118 int i915_gem_request_add_to_client(struct drm_i915_gem_request
*req
,
119 struct drm_file
*file
)
121 struct drm_i915_private
*dev_private
;
122 struct drm_i915_file_private
*file_priv
;
124 WARN_ON(!req
|| !file
|| req
->file_priv
);
132 dev_private
= req
->i915
;
133 file_priv
= file
->driver_priv
;
135 spin_lock(&file_priv
->mm
.lock
);
136 req
->file_priv
= file_priv
;
137 list_add_tail(&req
->client_list
, &file_priv
->mm
.request_list
);
138 spin_unlock(&file_priv
->mm
.lock
);
144 i915_gem_request_remove_from_client(struct drm_i915_gem_request
*request
)
146 struct drm_i915_file_private
*file_priv
= request
->file_priv
;
151 spin_lock(&file_priv
->mm
.lock
);
152 list_del(&request
->client_list
);
153 request
->file_priv
= NULL
;
154 spin_unlock(&file_priv
->mm
.lock
);
157 void i915_gem_retire_noop(struct i915_gem_active
*active
,
158 struct drm_i915_gem_request
*request
)
160 /* Space left intentionally blank */
163 static void i915_gem_request_retire(struct drm_i915_gem_request
*request
)
165 struct i915_gem_active
*active
, *next
;
167 trace_i915_gem_request_retire(request
);
168 list_del(&request
->link
);
170 /* We know the GPU must have read the request to have
171 * sent us the seqno + interrupt, so use the position
172 * of tail of the request to update the last known position
175 * Note this requires that we are always called in request
178 list_del(&request
->ring_link
);
179 request
->ring
->last_retired_head
= request
->postfix
;
181 /* Walk through the active list, calling retire on each. This allows
182 * objects to track their GPU activity and mark themselves as idle
183 * when their *last* active request is completed (updating state
184 * tracking lists for eviction, active references for GEM, etc).
186 * As the ->retire() may free the node, we decouple it first and
187 * pass along the auxiliary information (to avoid dereferencing
188 * the node after the callback).
190 list_for_each_entry_safe(active
, next
, &request
->active_list
, link
) {
191 /* In microbenchmarks or focusing upon time inside the kernel,
192 * we may spend an inordinate amount of time simply handling
193 * the retirement of requests and processing their callbacks.
194 * Of which, this loop itself is particularly hot due to the
195 * cache misses when jumping around the list of i915_gem_active.
196 * So we try to keep this loop as streamlined as possible and
197 * also prefetch the next i915_gem_active to try and hide
198 * the likely cache miss.
202 INIT_LIST_HEAD(&active
->link
);
203 RCU_INIT_POINTER(active
->request
, NULL
);
205 active
->retire(active
, request
);
208 i915_gem_request_remove_from_client(request
);
210 if (request
->previous_context
) {
211 if (i915
.enable_execlists
)
212 intel_lr_context_unpin(request
->previous_context
,
216 i915_gem_context_put(request
->ctx
);
217 i915_gem_request_put(request
);
220 void i915_gem_request_retire_upto(struct drm_i915_gem_request
*req
)
222 struct intel_engine_cs
*engine
= req
->engine
;
223 struct drm_i915_gem_request
*tmp
;
225 lockdep_assert_held(&req
->i915
->drm
.struct_mutex
);
226 GEM_BUG_ON(list_empty(&req
->link
));
229 tmp
= list_first_entry(&engine
->request_list
,
232 i915_gem_request_retire(tmp
);
233 } while (tmp
!= req
);
236 static int i915_gem_check_wedge(struct drm_i915_private
*dev_priv
)
238 struct i915_gpu_error
*error
= &dev_priv
->gpu_error
;
240 if (i915_terminally_wedged(error
))
243 if (i915_reset_in_progress(error
)) {
244 /* Non-interruptible callers can't handle -EAGAIN, hence return
245 * -EIO unconditionally for these.
247 if (!dev_priv
->mm
.interruptible
)
256 static int i915_gem_init_seqno(struct drm_i915_private
*dev_priv
, u32 seqno
)
258 struct intel_engine_cs
*engine
;
261 /* Carefully retire all requests without writing to the rings */
262 for_each_engine(engine
, dev_priv
) {
263 ret
= intel_engine_idle(engine
,
264 I915_WAIT_INTERRUPTIBLE
|
269 i915_gem_retire_requests(dev_priv
);
271 /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
272 if (!i915_seqno_passed(seqno
, dev_priv
->next_seqno
)) {
273 while (intel_kick_waiters(dev_priv
) ||
274 intel_kick_signalers(dev_priv
))
278 /* Finally reset hw state */
279 for_each_engine(engine
, dev_priv
)
280 intel_engine_init_seqno(engine
, seqno
);
285 int i915_gem_set_seqno(struct drm_device
*dev
, u32 seqno
)
287 struct drm_i915_private
*dev_priv
= to_i915(dev
);
293 /* HWS page needs to be set less than what we
294 * will inject to ring
296 ret
= i915_gem_init_seqno(dev_priv
, seqno
- 1);
300 dev_priv
->next_seqno
= seqno
;
304 static int i915_gem_get_seqno(struct drm_i915_private
*dev_priv
, u32
*seqno
)
306 /* reserve 0 for non-seqno */
307 if (unlikely(dev_priv
->next_seqno
== 0)) {
310 ret
= i915_gem_init_seqno(dev_priv
, 0);
314 dev_priv
->next_seqno
= 1;
317 *seqno
= dev_priv
->next_seqno
++;
321 static int __i915_sw_fence_call
322 submit_notify(struct i915_sw_fence
*fence
, enum i915_sw_fence_notify state
)
324 struct drm_i915_gem_request
*request
=
325 container_of(fence
, typeof(*request
), submit
);
327 /* Will be called from irq-context when using foreign DMA fences */
331 request
->engine
->submit_request(request
);
342 * i915_gem_request_alloc - allocate a request structure
344 * @engine: engine that we wish to issue the request on.
345 * @ctx: context that the request will be associated with.
346 * This can be NULL if the request is not directly related to
347 * any specific user context, in which case this function will
348 * choose an appropriate context to use.
350 * Returns a pointer to the allocated request if successful,
351 * or an error code if not.
353 struct drm_i915_gem_request
*
354 i915_gem_request_alloc(struct intel_engine_cs
*engine
,
355 struct i915_gem_context
*ctx
)
357 struct drm_i915_private
*dev_priv
= engine
->i915
;
358 struct drm_i915_gem_request
*req
;
362 /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
363 * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
366 ret
= i915_gem_check_wedge(dev_priv
);
370 /* Move the oldest request to the slab-cache (if not in use!) */
371 req
= list_first_entry_or_null(&engine
->request_list
,
373 if (req
&& i915_gem_request_completed(req
))
374 i915_gem_request_retire(req
);
376 /* Beware: Dragons be flying overhead.
378 * We use RCU to look up requests in flight. The lookups may
379 * race with the request being allocated from the slab freelist.
380 * That is the request we are writing to here, may be in the process
381 * of being read by __i915_gem_active_get_rcu(). As such,
382 * we have to be very careful when overwriting the contents. During
383 * the RCU lookup, we change chase the request->engine pointer,
384 * read the request->fence.seqno and increment the reference count.
386 * The reference count is incremented atomically. If it is zero,
387 * the lookup knows the request is unallocated and complete. Otherwise,
388 * it is either still in use, or has been reallocated and reset
389 * with fence_init(). This increment is safe for release as we check
390 * that the request we have a reference to and matches the active
393 * Before we increment the refcount, we chase the request->engine
394 * pointer. We must not call kmem_cache_zalloc() or else we set
395 * that pointer to NULL and cause a crash during the lookup. If
396 * we see the request is completed (based on the value of the
397 * old engine and seqno), the lookup is complete and reports NULL.
398 * If we decide the request is not completed (new engine or seqno),
399 * then we grab a reference and double check that it is still the
400 * active request - which it won't be and restart the lookup.
402 * Do not use kmem_cache_zalloc() here!
404 req
= kmem_cache_alloc(dev_priv
->requests
, GFP_KERNEL
);
406 return ERR_PTR(-ENOMEM
);
408 ret
= i915_gem_get_seqno(dev_priv
, &seqno
);
412 spin_lock_init(&req
->lock
);
413 fence_init(&req
->fence
,
416 engine
->fence_context
,
419 i915_sw_fence_init(&req
->submit
, submit_notify
);
421 INIT_LIST_HEAD(&req
->active_list
);
422 req
->i915
= dev_priv
;
423 req
->engine
= engine
;
424 req
->ctx
= i915_gem_context_get(ctx
);
426 /* No zalloc, must clear what we need by hand */
427 req
->previous_context
= NULL
;
428 req
->file_priv
= NULL
;
432 * Reserve space in the ring buffer for all the commands required to
433 * eventually emit this request. This is to guarantee that the
434 * i915_add_request() call can't fail. Note that the reserve may need
435 * to be redone if the request is not actually submitted straight
436 * away, e.g. because a GPU scheduler has deferred it.
438 req
->reserved_space
= MIN_SPACE_FOR_ADD_REQUEST
;
440 if (i915
.enable_execlists
)
441 ret
= intel_logical_ring_alloc_request_extras(req
);
443 ret
= intel_ring_alloc_request_extras(req
);
447 /* Record the position of the start of the request so that
448 * should we detect the updated seqno part-way through the
449 * GPU processing the request, we never over-estimate the
450 * position of the head.
452 req
->head
= req
->ring
->tail
;
457 i915_gem_context_put(ctx
);
459 kmem_cache_free(dev_priv
->requests
, req
);
464 i915_gem_request_await_request(struct drm_i915_gem_request
*to
,
465 struct drm_i915_gem_request
*from
)
469 GEM_BUG_ON(to
== from
);
471 if (to
->engine
== from
->engine
)
474 idx
= intel_engine_sync_index(from
->engine
, to
->engine
);
475 if (from
->fence
.seqno
<= from
->engine
->semaphore
.sync_seqno
[idx
])
478 trace_i915_gem_ring_sync_to(to
, from
);
479 if (!i915
.semaphores
) {
480 if (!i915_spin_request(from
, TASK_INTERRUPTIBLE
, 2)) {
481 ret
= i915_sw_fence_await_dma_fence(&to
->submit
,
488 ret
= to
->engine
->semaphore
.sync_to(to
, from
);
493 from
->engine
->semaphore
.sync_seqno
[idx
] = from
->fence
.seqno
;
498 * i915_gem_request_await_object - set this request to (async) wait upon a bo
500 * @to: request we are wishing to use
501 * @obj: object which may be in use on another ring.
503 * This code is meant to abstract object synchronization with the GPU.
504 * Conceptually we serialise writes between engines inside the GPU.
505 * We only allow one engine to write into a buffer at any time, but
506 * multiple readers. To ensure each has a coherent view of memory, we must:
508 * - If there is an outstanding write request to the object, the new
509 * request must wait for it to complete (either CPU or in hw, requests
510 * on the same ring will be naturally ordered).
512 * - If we are a write request (pending_write_domain is set), the new
513 * request must wait for outstanding read requests to complete.
515 * Returns 0 if successful, else propagates up the lower layer error.
518 i915_gem_request_await_object(struct drm_i915_gem_request
*to
,
519 struct drm_i915_gem_object
*obj
,
522 struct i915_gem_active
*active
;
523 unsigned long active_mask
;
527 active_mask
= i915_gem_object_get_active(obj
);
528 active
= obj
->last_read
;
531 active
= &obj
->last_write
;
534 for_each_active(active_mask
, idx
) {
535 struct drm_i915_gem_request
*request
;
538 request
= i915_gem_active_peek(&active
[idx
],
539 &obj
->base
.dev
->struct_mutex
);
543 ret
= i915_gem_request_await_request(to
, request
);
551 static void i915_gem_mark_busy(const struct intel_engine_cs
*engine
)
553 struct drm_i915_private
*dev_priv
= engine
->i915
;
555 dev_priv
->gt
.active_engines
|= intel_engine_flag(engine
);
556 if (dev_priv
->gt
.awake
)
559 intel_runtime_pm_get_noresume(dev_priv
);
560 dev_priv
->gt
.awake
= true;
562 intel_enable_gt_powersave(dev_priv
);
563 i915_update_gfx_val(dev_priv
);
564 if (INTEL_GEN(dev_priv
) >= 6)
565 gen6_rps_busy(dev_priv
);
567 queue_delayed_work(dev_priv
->wq
,
568 &dev_priv
->gt
.retire_work
,
569 round_jiffies_up_relative(HZ
));
573 * NB: This function is not allowed to fail. Doing so would mean the the
574 * request is not being tracked for completion but the work itself is
575 * going to happen on the hardware. This would be a Bad Thing(tm).
577 void __i915_add_request(struct drm_i915_gem_request
*request
, bool flush_caches
)
579 struct intel_engine_cs
*engine
= request
->engine
;
580 struct intel_ring
*ring
= request
->ring
;
581 struct drm_i915_gem_request
*prev
;
586 trace_i915_gem_request_add(request
);
589 * To ensure that this call will not fail, space for its emissions
590 * should already have been reserved in the ring buffer. Let the ring
591 * know that it is time to use that space up.
593 request_start
= ring
->tail
;
594 reserved_tail
= request
->reserved_space
;
595 request
->reserved_space
= 0;
598 * Emit any outstanding flushes - execbuf can fail to emit the flush
599 * after having emitted the batchbuffer command. Hence we need to fix
600 * things up similar to emitting the lazy request. The difference here
601 * is that the flush _must_ happen before the next request, no matter
605 ret
= engine
->emit_flush(request
, EMIT_FLUSH
);
607 /* Not allowed to fail! */
608 WARN(ret
, "engine->emit_flush() failed: %d!\n", ret
);
611 /* Record the position of the start of the breadcrumb so that
612 * should we detect the updated seqno part-way through the
613 * GPU processing the request, we never over-estimate the
614 * position of the ring's HEAD.
616 request
->postfix
= ring
->tail
;
618 /* Not allowed to fail! */
619 ret
= engine
->emit_request(request
);
620 WARN(ret
, "(%s)->emit_request failed: %d!\n", engine
->name
, ret
);
622 /* Sanity check that the reserved size was large enough. */
623 ret
= ring
->tail
- request_start
;
626 WARN_ONCE(ret
> reserved_tail
,
627 "Not enough space reserved (%d bytes) "
628 "for adding the request (%d bytes)\n",
631 /* Seal the request and mark it as pending execution. Note that
632 * we may inspect this state, without holding any locks, during
633 * hangcheck. Hence we apply the barrier to ensure that we do not
634 * see a more recent value in the hws than we are tracking.
637 prev
= i915_gem_active_raw(&engine
->last_request
,
638 &request
->i915
->drm
.struct_mutex
);
640 i915_sw_fence_await_sw_fence(&request
->submit
, &prev
->submit
,
643 request
->emitted_jiffies
= jiffies
;
644 request
->previous_seqno
= engine
->last_submitted_seqno
;
645 engine
->last_submitted_seqno
= request
->fence
.seqno
;
646 i915_gem_active_set(&engine
->last_request
, request
);
647 list_add_tail(&request
->link
, &engine
->request_list
);
648 list_add_tail(&request
->ring_link
, &ring
->request_list
);
650 i915_gem_mark_busy(engine
);
653 i915_sw_fence_commit(&request
->submit
);
654 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
657 static void reset_wait_queue(wait_queue_head_t
*q
, wait_queue_t
*wait
)
661 spin_lock_irqsave(&q
->lock
, flags
);
662 if (list_empty(&wait
->task_list
))
663 __add_wait_queue(q
, wait
);
664 spin_unlock_irqrestore(&q
->lock
, flags
);
667 static unsigned long local_clock_us(unsigned int *cpu
)
671 /* Cheaply and approximately convert from nanoseconds to microseconds.
672 * The result and subsequent calculations are also defined in the same
673 * approximate microseconds units. The principal source of timing
674 * error here is from the simple truncation.
676 * Note that local_clock() is only defined wrt to the current CPU;
677 * the comparisons are no longer valid if we switch CPUs. Instead of
678 * blocking preemption for the entire busywait, we can detect the CPU
679 * switch and use that as indicator of system load and a reason to
680 * stop busywaiting, see busywait_stop().
683 t
= local_clock() >> 10;
689 static bool busywait_stop(unsigned long timeout
, unsigned int cpu
)
691 unsigned int this_cpu
;
693 if (time_after(local_clock_us(&this_cpu
), timeout
))
696 return this_cpu
!= cpu
;
699 bool __i915_spin_request(const struct drm_i915_gem_request
*req
,
700 int state
, unsigned long timeout_us
)
704 /* When waiting for high frequency requests, e.g. during synchronous
705 * rendering split between the CPU and GPU, the finite amount of time
706 * required to set up the irq and wait upon it limits the response
707 * rate. By busywaiting on the request completion for a short while we
708 * can service the high frequency waits as quick as possible. However,
709 * if it is a slow request, we want to sleep as quickly as possible.
710 * The tradeoff between waiting and sleeping is roughly the time it
711 * takes to sleep on a request, on the order of a microsecond.
714 timeout_us
+= local_clock_us(&cpu
);
716 if (i915_gem_request_completed(req
))
719 if (signal_pending_state(state
, current
))
722 if (busywait_stop(timeout_us
, cpu
))
725 cpu_relax_lowlatency();
726 } while (!need_resched());
732 * i915_wait_request - wait until execution of request has finished
734 * @flags: how to wait
735 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
736 * @rps: client to charge for RPS boosting
738 * Note: It is of utmost importance that the passed in seqno and reset_counter
739 * values have been read by the caller in an smp safe manner. Where read-side
740 * locks are involved, it is sufficient to read the reset_counter before
741 * unlocking the lock that protects the seqno. For lockless tricks, the
742 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
745 * Returns 0 if the request was found within the alloted time. Else returns the
746 * errno with remaining time filled in timeout argument.
748 int i915_wait_request(struct drm_i915_gem_request
*req
,
751 struct intel_rps_client
*rps
)
753 const int state
= flags
& I915_WAIT_INTERRUPTIBLE
?
754 TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE
;
756 struct intel_wait wait
;
757 unsigned long timeout_remain
;
761 #if IS_ENABLED(CONFIG_LOCKDEP)
762 GEM_BUG_ON(!!lockdep_is_held(&req
->i915
->drm
.struct_mutex
) !=
763 !!(flags
& I915_WAIT_LOCKED
));
766 if (i915_gem_request_completed(req
))
769 timeout_remain
= MAX_SCHEDULE_TIMEOUT
;
771 if (WARN_ON(*timeout
< 0))
777 /* Record current time in case interrupted, or wedged */
778 timeout_remain
= nsecs_to_jiffies_timeout(*timeout
);
779 *timeout
+= ktime_get_raw_ns();
782 trace_i915_gem_request_wait_begin(req
);
784 /* This client is about to stall waiting for the GPU. In many cases
785 * this is undesirable and limits the throughput of the system, as
786 * many clients cannot continue processing user input/output whilst
787 * blocked. RPS autotuning may take tens of milliseconds to respond
788 * to the GPU load and thus incurs additional latency for the client.
789 * We can circumvent that by promoting the GPU frequency to maximum
790 * before we wait. This makes the GPU throttle up much more quickly
791 * (good for benchmarks and user experience, e.g. window animations),
792 * but at a cost of spending more power processing the workload
793 * (bad for battery). Not all clients even want their results
794 * immediately and for them we should just let the GPU select its own
795 * frequency to maximise efficiency. To prevent a single client from
796 * forcing the clocks too high for the whole system, we only allow
797 * each client to waitboost once in a busy period.
799 if (IS_RPS_CLIENT(rps
) && INTEL_GEN(req
->i915
) >= 6)
800 gen6_rps_boost(req
->i915
, rps
, req
->emitted_jiffies
);
802 /* Optimistic short spin before touching IRQs */
803 if (i915_spin_request(req
, state
, 5))
806 set_current_state(state
);
807 if (flags
& I915_WAIT_LOCKED
)
808 add_wait_queue(&req
->i915
->gpu_error
.wait_queue
, &reset
);
810 intel_wait_init(&wait
, req
->fence
.seqno
);
811 if (intel_engine_add_wait(req
->engine
, &wait
))
812 /* In order to check that we haven't missed the interrupt
813 * as we enabled it, we need to kick ourselves to do a
814 * coherent check on the seqno before we sleep.
819 if (signal_pending_state(state
, current
)) {
824 timeout_remain
= io_schedule_timeout(timeout_remain
);
825 if (timeout_remain
== 0) {
830 if (intel_wait_complete(&wait
))
833 set_current_state(state
);
836 /* Carefully check if the request is complete, giving time
837 * for the seqno to be visible following the interrupt.
838 * We also have to check in case we are kicked by the GPU
839 * reset in order to drop the struct_mutex.
841 if (__i915_request_irq_complete(req
))
844 /* If the GPU is hung, and we hold the lock, reset the GPU
845 * and then check for completion. On a full reset, the engine's
846 * HW seqno will be advanced passed us and we are complete.
847 * If we do a partial reset, we have to wait for the GPU to
848 * resume and update the breadcrumb.
850 * If we don't hold the mutex, we can just wait for the worker
851 * to come along and update the breadcrumb (either directly
852 * itself, or indirectly by recovering the GPU).
854 if (flags
& I915_WAIT_LOCKED
&&
855 i915_reset_in_progress(&req
->i915
->gpu_error
)) {
856 __set_current_state(TASK_RUNNING
);
857 i915_reset(req
->i915
);
858 reset_wait_queue(&req
->i915
->gpu_error
.wait_queue
,
863 /* Only spin if we know the GPU is processing this request */
864 if (i915_spin_request(req
, state
, 2))
868 intel_engine_remove_wait(req
->engine
, &wait
);
869 if (flags
& I915_WAIT_LOCKED
)
870 remove_wait_queue(&req
->i915
->gpu_error
.wait_queue
, &reset
);
871 __set_current_state(TASK_RUNNING
);
874 trace_i915_gem_request_wait_end(req
);
877 *timeout
-= ktime_get_raw_ns();
882 * Apparently ktime isn't accurate enough and occasionally has a
883 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
884 * things up to make the test happy. We allow up to 1 jiffy.
886 * This is a regrssion from the timespec->ktime conversion.
888 if (ret
== -ETIME
&& *timeout
< jiffies_to_usecs(1)*1000)
892 if (IS_RPS_USER(rps
) &&
893 req
->fence
.seqno
== req
->engine
->last_submitted_seqno
) {
894 /* The GPU is now idle and this client has stalled.
895 * Since no other client has submitted a request in the
896 * meantime, assume that this client is the only one
897 * supplying work to the GPU but is unable to keep that
898 * work supplied because it is waiting. Since the GPU is
899 * then never kept fully busy, RPS autoclocking will
900 * keep the clocks relatively low, causing further delays.
901 * Compensate by giving the synchronous client credit for
902 * a waitboost next time.
904 spin_lock(&req
->i915
->rps
.client_lock
);
905 list_del_init(&rps
->link
);
906 spin_unlock(&req
->i915
->rps
.client_lock
);
912 static bool engine_retire_requests(struct intel_engine_cs
*engine
)
914 struct drm_i915_gem_request
*request
, *next
;
916 list_for_each_entry_safe(request
, next
, &engine
->request_list
, link
) {
917 if (!i915_gem_request_completed(request
))
920 i915_gem_request_retire(request
);
926 void i915_gem_retire_requests(struct drm_i915_private
*dev_priv
)
928 struct intel_engine_cs
*engine
;
931 lockdep_assert_held(&dev_priv
->drm
.struct_mutex
);
933 if (dev_priv
->gt
.active_engines
== 0)
936 GEM_BUG_ON(!dev_priv
->gt
.awake
);
938 for_each_engine_masked(engine
, dev_priv
, dev_priv
->gt
.active_engines
, tmp
)
939 if (engine_retire_requests(engine
))
940 dev_priv
->gt
.active_engines
&= ~intel_engine_flag(engine
);
942 if (dev_priv
->gt
.active_engines
== 0)
943 queue_delayed_work(dev_priv
->wq
,
944 &dev_priv
->gt
.idle_work
,
945 msecs_to_jiffies(100));