Commit | Line | Data |
---|---|---|
05235c53 CW |
1 | /* |
2 | * Copyright © 2008-2015 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
24 | ||
fa545cbf CW |
25 | #include <linux/prefetch.h> |
26 | ||
05235c53 CW |
27 | #include "i915_drv.h" |
28 | ||
04769652 CW |
29 | static const char *i915_fence_get_driver_name(struct fence *fence) |
30 | { | |
31 | return "i915"; | |
32 | } | |
33 | ||
34 | static const char *i915_fence_get_timeline_name(struct fence *fence) | |
35 | { | |
36 | /* Timelines are bound by eviction to a VM. However, since | |
37 | * we only have a global seqno at the moment, we only have | |
38 | * a single timeline. Note that each timeline will have | |
39 | * multiple execution contexts (fence contexts) as we allow | |
40 | * engines within a single timeline to execute in parallel. | |
41 | */ | |
42 | return "global"; | |
43 | } | |
44 | ||
45 | static bool i915_fence_signaled(struct fence *fence) | |
46 | { | |
47 | return i915_gem_request_completed(to_request(fence)); | |
48 | } | |
49 | ||
50 | static bool i915_fence_enable_signaling(struct fence *fence) | |
51 | { | |
52 | if (i915_fence_signaled(fence)) | |
53 | return false; | |
54 | ||
55 | intel_engine_enable_signaling(to_request(fence)); | |
56 | return true; | |
57 | } | |
58 | ||
59 | static signed long i915_fence_wait(struct fence *fence, | |
60 | bool interruptible, | |
61 | signed long timeout_jiffies) | |
62 | { | |
63 | s64 timeout_ns, *timeout; | |
64 | int ret; | |
65 | ||
66 | if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) { | |
67 | timeout_ns = jiffies_to_nsecs(timeout_jiffies); | |
68 | timeout = &timeout_ns; | |
69 | } else { | |
70 | timeout = NULL; | |
71 | } | |
72 | ||
776f3236 CW |
73 | ret = i915_wait_request(to_request(fence), |
74 | interruptible, timeout, | |
75 | NO_WAITBOOST); | |
04769652 CW |
76 | if (ret == -ETIME) |
77 | return 0; | |
78 | ||
79 | if (ret < 0) | |
80 | return ret; | |
81 | ||
82 | if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) | |
83 | timeout_jiffies = nsecs_to_jiffies(timeout_ns); | |
84 | ||
85 | return timeout_jiffies; | |
86 | } | |
87 | ||
88 | static void i915_fence_value_str(struct fence *fence, char *str, int size) | |
89 | { | |
90 | snprintf(str, size, "%u", fence->seqno); | |
91 | } | |
92 | ||
93 | static void i915_fence_timeline_value_str(struct fence *fence, char *str, | |
94 | int size) | |
95 | { | |
96 | snprintf(str, size, "%u", | |
97 | intel_engine_get_seqno(to_request(fence)->engine)); | |
98 | } | |
99 | ||
100 | static void i915_fence_release(struct fence *fence) | |
101 | { | |
102 | struct drm_i915_gem_request *req = to_request(fence); | |
103 | ||
104 | kmem_cache_free(req->i915->requests, req); | |
105 | } | |
106 | ||
107 | const struct fence_ops i915_fence_ops = { | |
108 | .get_driver_name = i915_fence_get_driver_name, | |
109 | .get_timeline_name = i915_fence_get_timeline_name, | |
110 | .enable_signaling = i915_fence_enable_signaling, | |
111 | .signaled = i915_fence_signaled, | |
112 | .wait = i915_fence_wait, | |
113 | .release = i915_fence_release, | |
114 | .fence_value_str = i915_fence_value_str, | |
115 | .timeline_value_str = i915_fence_timeline_value_str, | |
116 | }; | |
117 | ||
05235c53 CW |
118 | int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, |
119 | struct drm_file *file) | |
120 | { | |
121 | struct drm_i915_private *dev_private; | |
122 | struct drm_i915_file_private *file_priv; | |
123 | ||
124 | WARN_ON(!req || !file || req->file_priv); | |
125 | ||
126 | if (!req || !file) | |
127 | return -EINVAL; | |
128 | ||
129 | if (req->file_priv) | |
130 | return -EINVAL; | |
131 | ||
132 | dev_private = req->i915; | |
133 | file_priv = file->driver_priv; | |
134 | ||
135 | spin_lock(&file_priv->mm.lock); | |
136 | req->file_priv = file_priv; | |
137 | list_add_tail(&req->client_list, &file_priv->mm.request_list); | |
138 | spin_unlock(&file_priv->mm.lock); | |
139 | ||
05235c53 CW |
140 | return 0; |
141 | } | |
142 | ||
143 | static inline void | |
144 | i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) | |
145 | { | |
146 | struct drm_i915_file_private *file_priv = request->file_priv; | |
147 | ||
148 | if (!file_priv) | |
149 | return; | |
150 | ||
151 | spin_lock(&file_priv->mm.lock); | |
152 | list_del(&request->client_list); | |
153 | request->file_priv = NULL; | |
154 | spin_unlock(&file_priv->mm.lock); | |
05235c53 CW |
155 | } |
156 | ||
fa545cbf CW |
157 | void i915_gem_retire_noop(struct i915_gem_active *active, |
158 | struct drm_i915_gem_request *request) | |
159 | { | |
160 | /* Space left intentionally blank */ | |
161 | } | |
162 | ||
05235c53 CW |
163 | static void i915_gem_request_retire(struct drm_i915_gem_request *request) |
164 | { | |
fa545cbf CW |
165 | struct i915_gem_active *active, *next; |
166 | ||
05235c53 | 167 | trace_i915_gem_request_retire(request); |
209b3f7e | 168 | list_del(&request->link); |
05235c53 CW |
169 | |
170 | /* We know the GPU must have read the request to have | |
171 | * sent us the seqno + interrupt, so use the position | |
172 | * of tail of the request to update the last known position | |
173 | * of the GPU head. | |
174 | * | |
175 | * Note this requires that we are always called in request | |
176 | * completion order. | |
177 | */ | |
675d9ad7 | 178 | list_del(&request->ring_link); |
1dae2dfb | 179 | request->ring->last_retired_head = request->postfix; |
05235c53 | 180 | |
fa545cbf CW |
181 | /* Walk through the active list, calling retire on each. This allows |
182 | * objects to track their GPU activity and mark themselves as idle | |
183 | * when their *last* active request is completed (updating state | |
184 | * tracking lists for eviction, active references for GEM, etc). | |
185 | * | |
186 | * As the ->retire() may free the node, we decouple it first and | |
187 | * pass along the auxiliary information (to avoid dereferencing | |
188 | * the node after the callback). | |
189 | */ | |
190 | list_for_each_entry_safe(active, next, &request->active_list, link) { | |
191 | /* In microbenchmarks or focusing upon time inside the kernel, | |
192 | * we may spend an inordinate amount of time simply handling | |
193 | * the retirement of requests and processing their callbacks. | |
194 | * Of which, this loop itself is particularly hot due to the | |
195 | * cache misses when jumping around the list of i915_gem_active. | |
196 | * So we try to keep this loop as streamlined as possible and | |
197 | * also prefetch the next i915_gem_active to try and hide | |
198 | * the likely cache miss. | |
199 | */ | |
200 | prefetchw(next); | |
201 | ||
202 | INIT_LIST_HEAD(&active->link); | |
0eafec6d | 203 | RCU_INIT_POINTER(active->request, NULL); |
fa545cbf CW |
204 | |
205 | active->retire(active, request); | |
206 | } | |
207 | ||
05235c53 CW |
208 | i915_gem_request_remove_from_client(request); |
209 | ||
210 | if (request->previous_context) { | |
211 | if (i915.enable_execlists) | |
212 | intel_lr_context_unpin(request->previous_context, | |
213 | request->engine); | |
214 | } | |
215 | ||
9a6feaf0 | 216 | i915_gem_context_put(request->ctx); |
e8a261ea | 217 | i915_gem_request_put(request); |
05235c53 CW |
218 | } |
219 | ||
220 | void i915_gem_request_retire_upto(struct drm_i915_gem_request *req) | |
221 | { | |
222 | struct intel_engine_cs *engine = req->engine; | |
223 | struct drm_i915_gem_request *tmp; | |
224 | ||
225 | lockdep_assert_held(&req->i915->drm.struct_mutex); | |
209b3f7e | 226 | GEM_BUG_ON(list_empty(&req->link)); |
05235c53 CW |
227 | |
228 | do { | |
229 | tmp = list_first_entry(&engine->request_list, | |
efdf7c06 | 230 | typeof(*tmp), link); |
05235c53 CW |
231 | |
232 | i915_gem_request_retire(tmp); | |
233 | } while (tmp != req); | |
05235c53 CW |
234 | } |
235 | ||
8af29b0c | 236 | static int i915_gem_check_wedge(struct drm_i915_private *dev_priv) |
05235c53 | 237 | { |
8af29b0c CW |
238 | struct i915_gpu_error *error = &dev_priv->gpu_error; |
239 | ||
240 | if (i915_terminally_wedged(error)) | |
05235c53 CW |
241 | return -EIO; |
242 | ||
8af29b0c | 243 | if (i915_reset_in_progress(error)) { |
05235c53 CW |
244 | /* Non-interruptible callers can't handle -EAGAIN, hence return |
245 | * -EIO unconditionally for these. | |
246 | */ | |
8af29b0c | 247 | if (!dev_priv->mm.interruptible) |
05235c53 CW |
248 | return -EIO; |
249 | ||
250 | return -EAGAIN; | |
251 | } | |
252 | ||
253 | return 0; | |
254 | } | |
255 | ||
256 | static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno) | |
257 | { | |
258 | struct intel_engine_cs *engine; | |
259 | int ret; | |
260 | ||
261 | /* Carefully retire all requests without writing to the rings */ | |
262 | for_each_engine(engine, dev_priv) { | |
22dd3bb9 CW |
263 | ret = intel_engine_idle(engine, |
264 | I915_WAIT_INTERRUPTIBLE | | |
265 | I915_WAIT_LOCKED); | |
05235c53 CW |
266 | if (ret) |
267 | return ret; | |
268 | } | |
269 | i915_gem_retire_requests(dev_priv); | |
270 | ||
271 | /* If the seqno wraps around, we need to clear the breadcrumb rbtree */ | |
272 | if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) { | |
273 | while (intel_kick_waiters(dev_priv) || | |
274 | intel_kick_signalers(dev_priv)) | |
275 | yield(); | |
276 | } | |
277 | ||
278 | /* Finally reset hw state */ | |
279 | for_each_engine(engine, dev_priv) | |
7e37f889 | 280 | intel_engine_init_seqno(engine, seqno); |
05235c53 CW |
281 | |
282 | return 0; | |
283 | } | |
284 | ||
285 | int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) | |
286 | { | |
287 | struct drm_i915_private *dev_priv = to_i915(dev); | |
288 | int ret; | |
289 | ||
290 | if (seqno == 0) | |
291 | return -EINVAL; | |
292 | ||
293 | /* HWS page needs to be set less than what we | |
294 | * will inject to ring | |
295 | */ | |
296 | ret = i915_gem_init_seqno(dev_priv, seqno - 1); | |
297 | if (ret) | |
298 | return ret; | |
299 | ||
05235c53 | 300 | dev_priv->next_seqno = seqno; |
05235c53 CW |
301 | return 0; |
302 | } | |
303 | ||
304 | static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno) | |
305 | { | |
306 | /* reserve 0 for non-seqno */ | |
307 | if (unlikely(dev_priv->next_seqno == 0)) { | |
308 | int ret; | |
309 | ||
310 | ret = i915_gem_init_seqno(dev_priv, 0); | |
311 | if (ret) | |
312 | return ret; | |
313 | ||
314 | dev_priv->next_seqno = 1; | |
315 | } | |
316 | ||
ddf07be7 | 317 | *seqno = dev_priv->next_seqno++; |
05235c53 CW |
318 | return 0; |
319 | } | |
320 | ||
5590af3e CW |
321 | static int __i915_sw_fence_call |
322 | submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) | |
323 | { | |
324 | struct drm_i915_gem_request *request = | |
325 | container_of(fence, typeof(*request), submit); | |
326 | ||
327 | /* Will be called from irq-context when using foreign DMA fences */ | |
328 | ||
329 | switch (state) { | |
330 | case FENCE_COMPLETE: | |
331 | request->engine->submit_request(request); | |
332 | break; | |
333 | ||
334 | case FENCE_FREE: | |
335 | break; | |
336 | } | |
337 | ||
338 | return NOTIFY_DONE; | |
339 | } | |
340 | ||
8e637178 CW |
341 | /** |
342 | * i915_gem_request_alloc - allocate a request structure | |
343 | * | |
344 | * @engine: engine that we wish to issue the request on. | |
345 | * @ctx: context that the request will be associated with. | |
346 | * This can be NULL if the request is not directly related to | |
347 | * any specific user context, in which case this function will | |
348 | * choose an appropriate context to use. | |
349 | * | |
350 | * Returns a pointer to the allocated request if successful, | |
351 | * or an error code if not. | |
352 | */ | |
353 | struct drm_i915_gem_request * | |
354 | i915_gem_request_alloc(struct intel_engine_cs *engine, | |
355 | struct i915_gem_context *ctx) | |
05235c53 CW |
356 | { |
357 | struct drm_i915_private *dev_priv = engine->i915; | |
05235c53 | 358 | struct drm_i915_gem_request *req; |
04769652 | 359 | u32 seqno; |
05235c53 CW |
360 | int ret; |
361 | ||
05235c53 CW |
362 | /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report |
363 | * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex | |
364 | * and restart. | |
365 | */ | |
8af29b0c | 366 | ret = i915_gem_check_wedge(dev_priv); |
05235c53 | 367 | if (ret) |
8e637178 | 368 | return ERR_PTR(ret); |
05235c53 | 369 | |
9b5f4e5e | 370 | /* Move the oldest request to the slab-cache (if not in use!) */ |
2a1d7752 | 371 | req = list_first_entry_or_null(&engine->request_list, |
efdf7c06 | 372 | typeof(*req), link); |
2a1d7752 CW |
373 | if (req && i915_gem_request_completed(req)) |
374 | i915_gem_request_retire(req); | |
9b5f4e5e | 375 | |
5a198b8c CW |
376 | /* Beware: Dragons be flying overhead. |
377 | * | |
378 | * We use RCU to look up requests in flight. The lookups may | |
379 | * race with the request being allocated from the slab freelist. | |
380 | * That is the request we are writing to here, may be in the process | |
1426f715 | 381 | * of being read by __i915_gem_active_get_rcu(). As such, |
5a198b8c CW |
382 | * we have to be very careful when overwriting the contents. During |
383 | * the RCU lookup, we change chase the request->engine pointer, | |
384 | * read the request->fence.seqno and increment the reference count. | |
385 | * | |
386 | * The reference count is incremented atomically. If it is zero, | |
387 | * the lookup knows the request is unallocated and complete. Otherwise, | |
388 | * it is either still in use, or has been reallocated and reset | |
389 | * with fence_init(). This increment is safe for release as we check | |
390 | * that the request we have a reference to and matches the active | |
391 | * request. | |
392 | * | |
393 | * Before we increment the refcount, we chase the request->engine | |
394 | * pointer. We must not call kmem_cache_zalloc() or else we set | |
395 | * that pointer to NULL and cause a crash during the lookup. If | |
396 | * we see the request is completed (based on the value of the | |
397 | * old engine and seqno), the lookup is complete and reports NULL. | |
398 | * If we decide the request is not completed (new engine or seqno), | |
399 | * then we grab a reference and double check that it is still the | |
400 | * active request - which it won't be and restart the lookup. | |
401 | * | |
402 | * Do not use kmem_cache_zalloc() here! | |
403 | */ | |
404 | req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL); | |
05235c53 | 405 | if (!req) |
8e637178 | 406 | return ERR_PTR(-ENOMEM); |
05235c53 | 407 | |
04769652 | 408 | ret = i915_gem_get_seqno(dev_priv, &seqno); |
05235c53 CW |
409 | if (ret) |
410 | goto err; | |
411 | ||
04769652 CW |
412 | spin_lock_init(&req->lock); |
413 | fence_init(&req->fence, | |
414 | &i915_fence_ops, | |
415 | &req->lock, | |
416 | engine->fence_context, | |
417 | seqno); | |
418 | ||
5590af3e CW |
419 | i915_sw_fence_init(&req->submit, submit_notify); |
420 | ||
fa545cbf | 421 | INIT_LIST_HEAD(&req->active_list); |
05235c53 CW |
422 | req->i915 = dev_priv; |
423 | req->engine = engine; | |
9a6feaf0 | 424 | req->ctx = i915_gem_context_get(ctx); |
05235c53 | 425 | |
5a198b8c CW |
426 | /* No zalloc, must clear what we need by hand */ |
427 | req->previous_context = NULL; | |
428 | req->file_priv = NULL; | |
058d88c4 | 429 | req->batch = NULL; |
5a198b8c | 430 | |
05235c53 CW |
431 | /* |
432 | * Reserve space in the ring buffer for all the commands required to | |
433 | * eventually emit this request. This is to guarantee that the | |
434 | * i915_add_request() call can't fail. Note that the reserve may need | |
435 | * to be redone if the request is not actually submitted straight | |
436 | * away, e.g. because a GPU scheduler has deferred it. | |
437 | */ | |
438 | req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; | |
439 | ||
440 | if (i915.enable_execlists) | |
441 | ret = intel_logical_ring_alloc_request_extras(req); | |
442 | else | |
443 | ret = intel_ring_alloc_request_extras(req); | |
444 | if (ret) | |
445 | goto err_ctx; | |
446 | ||
d045446d CW |
447 | /* Record the position of the start of the request so that |
448 | * should we detect the updated seqno part-way through the | |
449 | * GPU processing the request, we never over-estimate the | |
450 | * position of the head. | |
451 | */ | |
452 | req->head = req->ring->tail; | |
453 | ||
8e637178 | 454 | return req; |
05235c53 CW |
455 | |
456 | err_ctx: | |
9a6feaf0 | 457 | i915_gem_context_put(ctx); |
05235c53 CW |
458 | err: |
459 | kmem_cache_free(dev_priv->requests, req); | |
8e637178 | 460 | return ERR_PTR(ret); |
05235c53 CW |
461 | } |
462 | ||
463 | static void i915_gem_mark_busy(const struct intel_engine_cs *engine) | |
464 | { | |
465 | struct drm_i915_private *dev_priv = engine->i915; | |
466 | ||
467 | dev_priv->gt.active_engines |= intel_engine_flag(engine); | |
468 | if (dev_priv->gt.awake) | |
469 | return; | |
470 | ||
471 | intel_runtime_pm_get_noresume(dev_priv); | |
472 | dev_priv->gt.awake = true; | |
473 | ||
54b4f68f | 474 | intel_enable_gt_powersave(dev_priv); |
05235c53 CW |
475 | i915_update_gfx_val(dev_priv); |
476 | if (INTEL_GEN(dev_priv) >= 6) | |
477 | gen6_rps_busy(dev_priv); | |
478 | ||
479 | queue_delayed_work(dev_priv->wq, | |
480 | &dev_priv->gt.retire_work, | |
481 | round_jiffies_up_relative(HZ)); | |
482 | } | |
483 | ||
484 | /* | |
485 | * NB: This function is not allowed to fail. Doing so would mean the the | |
486 | * request is not being tracked for completion but the work itself is | |
487 | * going to happen on the hardware. This would be a Bad Thing(tm). | |
488 | */ | |
17f298cf | 489 | void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches) |
05235c53 | 490 | { |
95b2ab56 CW |
491 | struct intel_engine_cs *engine = request->engine; |
492 | struct intel_ring *ring = request->ring; | |
05235c53 CW |
493 | u32 request_start; |
494 | u32 reserved_tail; | |
495 | int ret; | |
496 | ||
05235c53 CW |
497 | /* |
498 | * To ensure that this call will not fail, space for its emissions | |
499 | * should already have been reserved in the ring buffer. Let the ring | |
500 | * know that it is time to use that space up. | |
501 | */ | |
ba76d91b | 502 | request_start = ring->tail; |
05235c53 CW |
503 | reserved_tail = request->reserved_space; |
504 | request->reserved_space = 0; | |
505 | ||
506 | /* | |
507 | * Emit any outstanding flushes - execbuf can fail to emit the flush | |
508 | * after having emitted the batchbuffer command. Hence we need to fix | |
509 | * things up similar to emitting the lazy request. The difference here | |
510 | * is that the flush _must_ happen before the next request, no matter | |
511 | * what. | |
512 | */ | |
513 | if (flush_caches) { | |
7c9cf4e3 | 514 | ret = engine->emit_flush(request, EMIT_FLUSH); |
c7fe7d25 | 515 | |
05235c53 | 516 | /* Not allowed to fail! */ |
c7fe7d25 | 517 | WARN(ret, "engine->emit_flush() failed: %d!\n", ret); |
05235c53 CW |
518 | } |
519 | ||
520 | trace_i915_gem_request_add(request); | |
521 | ||
05235c53 CW |
522 | /* Seal the request and mark it as pending execution. Note that |
523 | * we may inspect this state, without holding any locks, during | |
524 | * hangcheck. Hence we apply the barrier to ensure that we do not | |
525 | * see a more recent value in the hws than we are tracking. | |
526 | */ | |
527 | request->emitted_jiffies = jiffies; | |
528 | request->previous_seqno = engine->last_submitted_seqno; | |
dcff85c8 CW |
529 | engine->last_submitted_seqno = request->fence.seqno; |
530 | i915_gem_active_set(&engine->last_request, request); | |
efdf7c06 | 531 | list_add_tail(&request->link, &engine->request_list); |
675d9ad7 | 532 | list_add_tail(&request->ring_link, &ring->request_list); |
05235c53 | 533 | |
d045446d | 534 | /* Record the position of the start of the breadcrumb so that |
05235c53 CW |
535 | * should we detect the updated seqno part-way through the |
536 | * GPU processing the request, we never over-estimate the | |
d045446d | 537 | * position of the ring's HEAD. |
05235c53 | 538 | */ |
ba76d91b | 539 | request->postfix = ring->tail; |
05235c53 | 540 | |
05235c53 | 541 | /* Not allowed to fail! */ |
ddd66c51 CW |
542 | ret = engine->emit_request(request); |
543 | WARN(ret, "(%s)->emit_request failed: %d!\n", engine->name, ret); | |
c5efa1ad | 544 | |
05235c53 | 545 | /* Sanity check that the reserved size was large enough. */ |
ba76d91b | 546 | ret = ring->tail - request_start; |
05235c53 | 547 | if (ret < 0) |
1dae2dfb | 548 | ret += ring->size; |
05235c53 CW |
549 | WARN_ONCE(ret > reserved_tail, |
550 | "Not enough space reserved (%d bytes) " | |
551 | "for adding the request (%d bytes)\n", | |
552 | reserved_tail, ret); | |
553 | ||
554 | i915_gem_mark_busy(engine); | |
5590af3e CW |
555 | |
556 | local_bh_disable(); | |
557 | i915_sw_fence_commit(&request->submit); | |
558 | local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ | |
05235c53 CW |
559 | } |
560 | ||
221fe799 CW |
561 | static void reset_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) |
562 | { | |
563 | unsigned long flags; | |
564 | ||
565 | spin_lock_irqsave(&q->lock, flags); | |
566 | if (list_empty(&wait->task_list)) | |
567 | __add_wait_queue(q, wait); | |
568 | spin_unlock_irqrestore(&q->lock, flags); | |
569 | } | |
570 | ||
05235c53 CW |
571 | static unsigned long local_clock_us(unsigned int *cpu) |
572 | { | |
573 | unsigned long t; | |
574 | ||
575 | /* Cheaply and approximately convert from nanoseconds to microseconds. | |
576 | * The result and subsequent calculations are also defined in the same | |
577 | * approximate microseconds units. The principal source of timing | |
578 | * error here is from the simple truncation. | |
579 | * | |
580 | * Note that local_clock() is only defined wrt to the current CPU; | |
581 | * the comparisons are no longer valid if we switch CPUs. Instead of | |
582 | * blocking preemption for the entire busywait, we can detect the CPU | |
583 | * switch and use that as indicator of system load and a reason to | |
584 | * stop busywaiting, see busywait_stop(). | |
585 | */ | |
586 | *cpu = get_cpu(); | |
587 | t = local_clock() >> 10; | |
588 | put_cpu(); | |
589 | ||
590 | return t; | |
591 | } | |
592 | ||
593 | static bool busywait_stop(unsigned long timeout, unsigned int cpu) | |
594 | { | |
595 | unsigned int this_cpu; | |
596 | ||
597 | if (time_after(local_clock_us(&this_cpu), timeout)) | |
598 | return true; | |
599 | ||
600 | return this_cpu != cpu; | |
601 | } | |
602 | ||
603 | bool __i915_spin_request(const struct drm_i915_gem_request *req, | |
604 | int state, unsigned long timeout_us) | |
605 | { | |
606 | unsigned int cpu; | |
607 | ||
608 | /* When waiting for high frequency requests, e.g. during synchronous | |
609 | * rendering split between the CPU and GPU, the finite amount of time | |
610 | * required to set up the irq and wait upon it limits the response | |
611 | * rate. By busywaiting on the request completion for a short while we | |
612 | * can service the high frequency waits as quick as possible. However, | |
613 | * if it is a slow request, we want to sleep as quickly as possible. | |
614 | * The tradeoff between waiting and sleeping is roughly the time it | |
615 | * takes to sleep on a request, on the order of a microsecond. | |
616 | */ | |
617 | ||
618 | timeout_us += local_clock_us(&cpu); | |
619 | do { | |
620 | if (i915_gem_request_completed(req)) | |
621 | return true; | |
622 | ||
623 | if (signal_pending_state(state, current)) | |
624 | break; | |
625 | ||
626 | if (busywait_stop(timeout_us, cpu)) | |
627 | break; | |
628 | ||
629 | cpu_relax_lowlatency(); | |
630 | } while (!need_resched()); | |
631 | ||
632 | return false; | |
633 | } | |
634 | ||
635 | /** | |
776f3236 | 636 | * i915_wait_request - wait until execution of request has finished |
05235c53 | 637 | * @req: duh! |
ea746f36 | 638 | * @flags: how to wait |
05235c53 CW |
639 | * @timeout: in - how long to wait (NULL forever); out - how much time remaining |
640 | * @rps: client to charge for RPS boosting | |
641 | * | |
642 | * Note: It is of utmost importance that the passed in seqno and reset_counter | |
643 | * values have been read by the caller in an smp safe manner. Where read-side | |
644 | * locks are involved, it is sufficient to read the reset_counter before | |
645 | * unlocking the lock that protects the seqno. For lockless tricks, the | |
646 | * reset_counter _must_ be read before, and an appropriate smp_rmb must be | |
647 | * inserted. | |
648 | * | |
649 | * Returns 0 if the request was found within the alloted time. Else returns the | |
650 | * errno with remaining time filled in timeout argument. | |
651 | */ | |
776f3236 | 652 | int i915_wait_request(struct drm_i915_gem_request *req, |
ea746f36 | 653 | unsigned int flags, |
776f3236 CW |
654 | s64 *timeout, |
655 | struct intel_rps_client *rps) | |
05235c53 | 656 | { |
ea746f36 CW |
657 | const int state = flags & I915_WAIT_INTERRUPTIBLE ? |
658 | TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; | |
05235c53 CW |
659 | DEFINE_WAIT(reset); |
660 | struct intel_wait wait; | |
661 | unsigned long timeout_remain; | |
662 | int ret = 0; | |
663 | ||
664 | might_sleep(); | |
22dd3bb9 CW |
665 | #if IS_ENABLED(CONFIG_LOCKDEP) |
666 | GEM_BUG_ON(!!lockdep_is_held(&req->i915->drm.struct_mutex) != | |
667 | !!(flags & I915_WAIT_LOCKED)); | |
668 | #endif | |
05235c53 | 669 | |
05235c53 CW |
670 | if (i915_gem_request_completed(req)) |
671 | return 0; | |
672 | ||
673 | timeout_remain = MAX_SCHEDULE_TIMEOUT; | |
674 | if (timeout) { | |
675 | if (WARN_ON(*timeout < 0)) | |
676 | return -EINVAL; | |
677 | ||
678 | if (*timeout == 0) | |
679 | return -ETIME; | |
680 | ||
681 | /* Record current time in case interrupted, or wedged */ | |
682 | timeout_remain = nsecs_to_jiffies_timeout(*timeout); | |
683 | *timeout += ktime_get_raw_ns(); | |
684 | } | |
685 | ||
686 | trace_i915_gem_request_wait_begin(req); | |
687 | ||
688 | /* This client is about to stall waiting for the GPU. In many cases | |
689 | * this is undesirable and limits the throughput of the system, as | |
690 | * many clients cannot continue processing user input/output whilst | |
691 | * blocked. RPS autotuning may take tens of milliseconds to respond | |
692 | * to the GPU load and thus incurs additional latency for the client. | |
693 | * We can circumvent that by promoting the GPU frequency to maximum | |
694 | * before we wait. This makes the GPU throttle up much more quickly | |
695 | * (good for benchmarks and user experience, e.g. window animations), | |
696 | * but at a cost of spending more power processing the workload | |
697 | * (bad for battery). Not all clients even want their results | |
698 | * immediately and for them we should just let the GPU select its own | |
699 | * frequency to maximise efficiency. To prevent a single client from | |
700 | * forcing the clocks too high for the whole system, we only allow | |
701 | * each client to waitboost once in a busy period. | |
702 | */ | |
42df2714 | 703 | if (IS_RPS_CLIENT(rps) && INTEL_GEN(req->i915) >= 6) |
05235c53 CW |
704 | gen6_rps_boost(req->i915, rps, req->emitted_jiffies); |
705 | ||
437c3087 | 706 | /* Optimistic short spin before touching IRQs */ |
05235c53 CW |
707 | if (i915_spin_request(req, state, 5)) |
708 | goto complete; | |
709 | ||
710 | set_current_state(state); | |
22dd3bb9 CW |
711 | if (flags & I915_WAIT_LOCKED) |
712 | add_wait_queue(&req->i915->gpu_error.wait_queue, &reset); | |
05235c53 | 713 | |
04769652 | 714 | intel_wait_init(&wait, req->fence.seqno); |
05235c53 CW |
715 | if (intel_engine_add_wait(req->engine, &wait)) |
716 | /* In order to check that we haven't missed the interrupt | |
717 | * as we enabled it, we need to kick ourselves to do a | |
718 | * coherent check on the seqno before we sleep. | |
719 | */ | |
720 | goto wakeup; | |
721 | ||
722 | for (;;) { | |
723 | if (signal_pending_state(state, current)) { | |
724 | ret = -ERESTARTSYS; | |
725 | break; | |
726 | } | |
727 | ||
728 | timeout_remain = io_schedule_timeout(timeout_remain); | |
729 | if (timeout_remain == 0) { | |
730 | ret = -ETIME; | |
731 | break; | |
732 | } | |
733 | ||
734 | if (intel_wait_complete(&wait)) | |
735 | break; | |
736 | ||
737 | set_current_state(state); | |
738 | ||
739 | wakeup: | |
740 | /* Carefully check if the request is complete, giving time | |
741 | * for the seqno to be visible following the interrupt. | |
742 | * We also have to check in case we are kicked by the GPU | |
743 | * reset in order to drop the struct_mutex. | |
744 | */ | |
745 | if (__i915_request_irq_complete(req)) | |
746 | break; | |
747 | ||
221fe799 CW |
748 | /* If the GPU is hung, and we hold the lock, reset the GPU |
749 | * and then check for completion. On a full reset, the engine's | |
750 | * HW seqno will be advanced passed us and we are complete. | |
751 | * If we do a partial reset, we have to wait for the GPU to | |
752 | * resume and update the breadcrumb. | |
753 | * | |
754 | * If we don't hold the mutex, we can just wait for the worker | |
755 | * to come along and update the breadcrumb (either directly | |
756 | * itself, or indirectly by recovering the GPU). | |
757 | */ | |
758 | if (flags & I915_WAIT_LOCKED && | |
759 | i915_reset_in_progress(&req->i915->gpu_error)) { | |
760 | __set_current_state(TASK_RUNNING); | |
761 | i915_reset(req->i915); | |
762 | reset_wait_queue(&req->i915->gpu_error.wait_queue, | |
763 | &reset); | |
764 | continue; | |
765 | } | |
766 | ||
05235c53 CW |
767 | /* Only spin if we know the GPU is processing this request */ |
768 | if (i915_spin_request(req, state, 2)) | |
769 | break; | |
770 | } | |
05235c53 CW |
771 | |
772 | intel_engine_remove_wait(req->engine, &wait); | |
22dd3bb9 CW |
773 | if (flags & I915_WAIT_LOCKED) |
774 | remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset); | |
05235c53 | 775 | __set_current_state(TASK_RUNNING); |
22dd3bb9 | 776 | |
05235c53 CW |
777 | complete: |
778 | trace_i915_gem_request_wait_end(req); | |
779 | ||
780 | if (timeout) { | |
781 | *timeout -= ktime_get_raw_ns(); | |
782 | if (*timeout < 0) | |
783 | *timeout = 0; | |
784 | ||
785 | /* | |
786 | * Apparently ktime isn't accurate enough and occasionally has a | |
787 | * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch | |
788 | * things up to make the test happy. We allow up to 1 jiffy. | |
789 | * | |
790 | * This is a regrssion from the timespec->ktime conversion. | |
791 | */ | |
792 | if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000) | |
793 | *timeout = 0; | |
794 | } | |
795 | ||
42df2714 CW |
796 | if (IS_RPS_USER(rps) && |
797 | req->fence.seqno == req->engine->last_submitted_seqno) { | |
05235c53 CW |
798 | /* The GPU is now idle and this client has stalled. |
799 | * Since no other client has submitted a request in the | |
800 | * meantime, assume that this client is the only one | |
801 | * supplying work to the GPU but is unable to keep that | |
802 | * work supplied because it is waiting. Since the GPU is | |
803 | * then never kept fully busy, RPS autoclocking will | |
804 | * keep the clocks relatively low, causing further delays. | |
805 | * Compensate by giving the synchronous client credit for | |
806 | * a waitboost next time. | |
807 | */ | |
808 | spin_lock(&req->i915->rps.client_lock); | |
809 | list_del_init(&rps->link); | |
810 | spin_unlock(&req->i915->rps.client_lock); | |
811 | } | |
812 | ||
813 | return ret; | |
814 | } | |
4b8de8e6 | 815 | |
f6407193 | 816 | static bool engine_retire_requests(struct intel_engine_cs *engine) |
4b8de8e6 CW |
817 | { |
818 | struct drm_i915_gem_request *request, *next; | |
819 | ||
820 | list_for_each_entry_safe(request, next, &engine->request_list, link) { | |
821 | if (!i915_gem_request_completed(request)) | |
f6407193 | 822 | return false; |
4b8de8e6 CW |
823 | |
824 | i915_gem_request_retire(request); | |
825 | } | |
f6407193 CW |
826 | |
827 | return true; | |
4b8de8e6 CW |
828 | } |
829 | ||
830 | void i915_gem_retire_requests(struct drm_i915_private *dev_priv) | |
831 | { | |
832 | struct intel_engine_cs *engine; | |
bafb0fce | 833 | unsigned int tmp; |
4b8de8e6 CW |
834 | |
835 | lockdep_assert_held(&dev_priv->drm.struct_mutex); | |
836 | ||
837 | if (dev_priv->gt.active_engines == 0) | |
838 | return; | |
839 | ||
840 | GEM_BUG_ON(!dev_priv->gt.awake); | |
841 | ||
bafb0fce | 842 | for_each_engine_masked(engine, dev_priv, dev_priv->gt.active_engines, tmp) |
f6407193 | 843 | if (engine_retire_requests(engine)) |
4b8de8e6 | 844 | dev_priv->gt.active_engines &= ~intel_engine_flag(engine); |
4b8de8e6 CW |
845 | |
846 | if (dev_priv->gt.active_engines == 0) | |
847 | queue_delayed_work(dev_priv->wq, | |
848 | &dev_priv->gt.idle_work, | |
849 | msecs_to_jiffies(100)); | |
850 | } |