2 * Copyright © 2008-2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
30 #include <drm/i915_drm.h>
33 #include "intel_drv.h"
34 #include "i915_trace.h"
36 static int switch_to_pinned_context(struct drm_i915_private
*dev_priv
)
38 struct intel_engine_cs
*engine
;
40 if (i915
.enable_execlists
)
43 for_each_engine(engine
, dev_priv
) {
44 struct drm_i915_gem_request
*req
;
47 if (engine
->last_context
== NULL
)
50 if (engine
->last_context
== dev_priv
->kernel_context
)
53 req
= i915_gem_request_alloc(engine
, dev_priv
->kernel_context
);
57 ret
= i915_switch_context(req
);
58 i915_add_request_no_flush(req
);
68 mark_free(struct i915_vma
*vma
, struct list_head
*unwind
)
73 if (WARN_ON(!list_empty(&vma
->exec_list
)))
76 list_add(&vma
->exec_list
, unwind
);
77 return drm_mm_scan_add_block(&vma
->node
);
81 * i915_gem_evict_something - Evict vmas to make room for binding a new one
83 * @vm: address space to evict from
84 * @min_size: size of the desired free space
85 * @alignment: alignment constraint of the desired free space
86 * @cache_level: cache_level for the desired space
87 * @start: start (inclusive) of the range from which to evict objects
88 * @end: end (exclusive) of the range from which to evict objects
89 * @flags: additional flags to control the eviction algorithm
91 * This function will try to evict vmas until a free space satisfying the
92 * requirements is found. Callers must check first whether any such hole exists
93 * already before calling this function.
95 * This function is used by the object/vma binding code.
97 * Since this function is only used to free up virtual address space it only
98 * ignores pinned vmas, and not object where the backing storage itself is
99 * pinned. Hence obj->pages_pin_count does not protect against eviction.
101 * To clarify: This is for freeing up virtual address space, not for freeing
102 * memory in e.g. the shrinker.
105 i915_gem_evict_something(struct drm_device
*dev
, struct i915_address_space
*vm
,
106 int min_size
, unsigned alignment
, unsigned cache_level
,
107 unsigned long start
, unsigned long end
,
110 struct list_head eviction_list
, unwind_list
;
111 struct i915_vma
*vma
;
115 trace_i915_gem_evict(dev
, min_size
, alignment
, flags
);
118 * The goal is to evict objects and amalgamate space in LRU order.
119 * The oldest idle objects reside on the inactive list, which is in
120 * retirement order. The next objects to retire are those on the (per
121 * ring) active list that do not have an outstanding flush. Once the
122 * hardware reports completion (the seqno is updated after the
123 * batchbuffer has been finished) the clean buffer objects would
124 * be retired to the inactive list. Any dirty objects would be added
125 * to the tail of the flushing list. So after processing the clean
126 * active objects we need to emit a MI_FLUSH to retire the flushing
127 * list, hence the retirement order of the flushing list is in
128 * advance of the dirty objects on the active lists.
130 * The retirement sequence is thus:
131 * 1. Inactive objects (already retired)
132 * 2. Clean active objects
134 * 4. Dirty active objects.
136 * On each list, the oldest objects lie at the HEAD with the freshest
137 * object on the TAIL.
140 INIT_LIST_HEAD(&unwind_list
);
141 if (start
!= 0 || end
!= vm
->total
) {
142 drm_mm_init_scan_with_range(&vm
->mm
, min_size
,
143 alignment
, cache_level
,
146 drm_mm_init_scan(&vm
->mm
, min_size
, alignment
, cache_level
);
149 /* First see if there is a large enough contiguous idle region... */
150 list_for_each_entry(vma
, &vm
->inactive_list
, vm_link
) {
151 if (mark_free(vma
, &unwind_list
))
155 if (flags
& PIN_NONBLOCK
)
158 /* Now merge in the soon-to-be-expired objects... */
159 list_for_each_entry(vma
, &vm
->active_list
, vm_link
) {
160 if (mark_free(vma
, &unwind_list
))
165 /* Nothing found, clean up and bail out! */
166 while (!list_empty(&unwind_list
)) {
167 vma
= list_first_entry(&unwind_list
,
170 ret
= drm_mm_scan_remove_block(&vma
->node
);
173 list_del_init(&vma
->exec_list
);
176 /* Can we unpin some objects such as idle hw contents,
179 if (flags
& PIN_NONBLOCK
)
182 /* Only idle the GPU and repeat the search once */
184 struct drm_i915_private
*dev_priv
= to_i915(dev
);
186 if (i915_is_ggtt(vm
)) {
187 ret
= switch_to_pinned_context(dev_priv
);
192 ret
= i915_gem_wait_for_idle(dev_priv
);
196 i915_gem_retire_requests(dev_priv
);
200 /* If we still have pending pageflip completions, drop
201 * back to userspace to give our workqueues time to
202 * acquire our locks and unpin the old scanouts.
204 return intel_has_pending_fb_unpin(dev
) ? -EAGAIN
: -ENOSPC
;
207 /* drm_mm doesn't allow any other other operations while
208 * scanning, therefore store to be evicted objects on a
210 INIT_LIST_HEAD(&eviction_list
);
211 while (!list_empty(&unwind_list
)) {
212 vma
= list_first_entry(&unwind_list
,
215 if (drm_mm_scan_remove_block(&vma
->node
)) {
216 list_move(&vma
->exec_list
, &eviction_list
);
217 drm_gem_object_reference(&vma
->obj
->base
);
220 list_del_init(&vma
->exec_list
);
223 /* Unbinding will emit any required flushes */
224 while (!list_empty(&eviction_list
)) {
225 struct drm_gem_object
*obj
;
226 vma
= list_first_entry(&eviction_list
,
230 obj
= &vma
->obj
->base
;
231 list_del_init(&vma
->exec_list
);
233 ret
= i915_vma_unbind(vma
);
235 drm_gem_object_unreference(obj
);
242 i915_gem_evict_for_vma(struct i915_vma
*target
)
244 struct drm_mm_node
*node
, *next
;
246 list_for_each_entry_safe(node
, next
,
247 &target
->vm
->mm
.head_node
.node_list
,
249 struct i915_vma
*vma
;
252 if (node
->start
+ node
->size
<= target
->node
.start
)
254 if (node
->start
>= target
->node
.start
+ target
->node
.size
)
257 vma
= container_of(node
, typeof(*vma
), node
);
259 if (vma
->pin_count
) {
260 if (!vma
->exec_entry
|| (vma
->pin_count
> 1))
261 /* Object is pinned for some other use */
264 /* We need to evict a buffer in the same batch */
265 if (vma
->exec_entry
->flags
& EXEC_OBJECT_PINNED
)
266 /* Overlapping fixed objects in the same batch */
272 ret
= i915_vma_unbind(vma
);
281 * i915_gem_evict_vm - Evict all idle vmas from a vm
282 * @vm: Address space to cleanse
283 * @do_idle: Boolean directing whether to idle first.
285 * This function evicts all idles vmas from a vm. If all unpinned vmas should be
286 * evicted the @do_idle needs to be set to true.
288 * This is used by the execbuf code as a last-ditch effort to defragment the
291 * To clarify: This is for freeing up virtual address space, not for freeing
292 * memory in e.g. the shrinker.
294 int i915_gem_evict_vm(struct i915_address_space
*vm
, bool do_idle
)
296 struct i915_vma
*vma
, *next
;
299 WARN_ON(!mutex_is_locked(&vm
->dev
->struct_mutex
));
300 trace_i915_gem_evict_vm(vm
);
303 struct drm_i915_private
*dev_priv
= to_i915(vm
->dev
);
305 if (i915_is_ggtt(vm
)) {
306 ret
= switch_to_pinned_context(dev_priv
);
311 ret
= i915_gem_wait_for_idle(dev_priv
);
315 i915_gem_retire_requests(dev_priv
);
317 WARN_ON(!list_empty(&vm
->active_list
));
320 list_for_each_entry_safe(vma
, next
, &vm
->inactive_list
, vm_link
)
321 if (vma
->pin_count
== 0)
322 WARN_ON(i915_vma_unbind(vma
));
This page took 0.055766 seconds and 6 git commands to generate.