drm/i915: Split i915_gem_execbuffer into its own file.
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem_evict.c
CommitLineData
b47eb4a2
CW
1/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drv.h"
32#include "i915_drm.h"
33
cd377ea9 34static bool
05394f39 35mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
b47eb4a2 36{
05394f39
CW
37 list_add(&obj->evict_list, unwind);
38 drm_gem_object_reference(&obj->base);
39 return drm_mm_scan_add_block(obj->gtt_space);
b47eb4a2
CW
40}
41
42int
a6e0aa42 43i915_gem_evict_something(struct drm_device *dev, int min_size,
5eac3ab4 44 unsigned alignment, bool mappable)
b47eb4a2
CW
45{
46 drm_i915_private_t *dev_priv = dev->dev_private;
cd377ea9 47 struct list_head eviction_list, unwind_list;
05394f39 48 struct drm_i915_gem_object *obj;
cd377ea9 49 int ret = 0;
b47eb4a2 50
cd377ea9 51 i915_gem_retire_requests(dev);
b47eb4a2 52
cd377ea9 53 /* Re-check for free space after retiring requests */
a6e0aa42
DV
54 if (mappable) {
55 if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
5eac3ab4 56 min_size, alignment, 0,
a6e0aa42
DV
57 dev_priv->mm.gtt_mappable_end,
58 0))
59 return 0;
60 } else {
61 if (drm_mm_search_free(&dev_priv->mm.gtt_space,
62 min_size, alignment, 0))
63 return 0;
64 }
b47eb4a2 65
cd377ea9
CW
66 /*
67 * The goal is to evict objects and amalgamate space in LRU order.
68 * The oldest idle objects reside on the inactive list, which is in
69 * retirement order. The next objects to retire are those on the (per
70 * ring) active list that do not have an outstanding flush. Once the
71 * hardware reports completion (the seqno is updated after the
72 * batchbuffer has been finished) the clean buffer objects would
73 * be retired to the inactive list. Any dirty objects would be added
74 * to the tail of the flushing list. So after processing the clean
75 * active objects we need to emit a MI_FLUSH to retire the flushing
76 * list, hence the retirement order of the flushing list is in
77 * advance of the dirty objects on the active lists.
78 *
79 * The retirement sequence is thus:
80 * 1. Inactive objects (already retired)
81 * 2. Clean active objects
82 * 3. Flushing list
83 * 4. Dirty active objects.
84 *
85 * On each list, the oldest objects lie at the HEAD with the freshest
86 * object on the TAIL.
87 */
88
89 INIT_LIST_HEAD(&unwind_list);
a6e0aa42
DV
90 if (mappable)
91 drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
92 alignment, 0,
93 dev_priv->mm.gtt_mappable_end);
94 else
95 drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
cd377ea9
CW
96
97 /* First see if there is a large enough contiguous idle region... */
05394f39
CW
98 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
99 if (mark_free(obj, &unwind_list))
cd377ea9
CW
100 goto found;
101 }
b47eb4a2 102
cd377ea9 103 /* Now merge in the soon-to-be-expired objects... */
05394f39 104 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
cd377ea9 105 /* Does the object require an outstanding flush? */
05394f39 106 if (obj->base.write_domain || obj->pin_count)
b47eb4a2 107 continue;
b47eb4a2 108
05394f39 109 if (mark_free(obj, &unwind_list))
cd377ea9
CW
110 goto found;
111 }
b47eb4a2 112
cd377ea9 113 /* Finally add anything with a pending flush (in order of retirement) */
05394f39
CW
114 list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
115 if (obj->pin_count)
cd377ea9 116 continue;
b47eb4a2 117
05394f39 118 if (mark_free(obj, &unwind_list))
cd377ea9
CW
119 goto found;
120 }
05394f39
CW
121 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
122 if (! obj->base.write_domain || obj->pin_count)
b47eb4a2 123 continue;
b47eb4a2 124
05394f39 125 if (mark_free(obj, &unwind_list))
cd377ea9
CW
126 goto found;
127 }
128
129 /* Nothing found, clean up and bail out! */
05394f39
CW
130 list_for_each_entry(obj, &unwind_list, evict_list) {
131 ret = drm_mm_scan_remove_block(obj->gtt_space);
cd377ea9 132 BUG_ON(ret);
05394f39 133 drm_gem_object_unreference(&obj->base);
cd377ea9
CW
134 }
135
136 /* We expect the caller to unpin, evict all and try again, or give up.
137 * So calling i915_gem_evict_everything() is unnecessary.
138 */
139 return -ENOSPC;
140
141found:
e39a0150
CW
142 /* drm_mm doesn't allow any other other operations while
143 * scanning, therefore store to be evicted objects on a
144 * temporary list. */
cd377ea9 145 INIT_LIST_HEAD(&eviction_list);
e39a0150 146 while (!list_empty(&unwind_list)) {
05394f39
CW
147 obj = list_first_entry(&unwind_list,
148 struct drm_i915_gem_object,
149 evict_list);
150 if (drm_mm_scan_remove_block(obj->gtt_space)) {
151 list_move(&obj->evict_list, &eviction_list);
e39a0150
CW
152 continue;
153 }
05394f39
CW
154 list_del(&obj->evict_list);
155 drm_gem_object_unreference(&obj->base);
cd377ea9 156 }
b47eb4a2 157
cd377ea9 158 /* Unbinding will emit any required flushes */
e39a0150 159 while (!list_empty(&eviction_list)) {
05394f39
CW
160 obj = list_first_entry(&eviction_list,
161 struct drm_i915_gem_object,
162 evict_list);
e39a0150 163 if (ret == 0)
05394f39
CW
164 ret = i915_gem_object_unbind(obj);
165 list_del(&obj->evict_list);
166 drm_gem_object_unreference(&obj->base);
b47eb4a2 167 }
cd377ea9 168
e39a0150 169 return ret;
b47eb4a2
CW
170}
171
172int
5eac3ab4 173i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
b47eb4a2
CW
174{
175 drm_i915_private_t *dev_priv = dev->dev_private;
176 int ret;
177 bool lists_empty;
178
b47eb4a2
CW
179 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
180 list_empty(&dev_priv->mm.flushing_list) &&
395b70be 181 list_empty(&dev_priv->mm.active_list));
b47eb4a2
CW
182 if (lists_empty)
183 return -ENOSPC;
184
185 /* Flush everything (on to the inactive lists) and evict */
186 ret = i915_gpu_idle(dev);
187 if (ret)
188 return ret;
189
190 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
191
5eac3ab4 192 return i915_gem_evict_inactive(dev, purgeable_only);
b47eb4a2
CW
193}
194
195/** Unbinds all inactive objects. */
196int
5eac3ab4 197i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
b47eb4a2
CW
198{
199 drm_i915_private_t *dev_priv = dev->dev_private;
5eac3ab4
CW
200 struct drm_i915_gem_object *obj, *next;
201
202 list_for_each_entry_safe(obj, next,
203 &dev_priv->mm.inactive_list, mm_list) {
204 if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
05394f39 205 int ret = i915_gem_object_unbind(obj);
5eac3ab4
CW
206 if (ret)
207 return ret;
b47eb4a2
CW
208 }
209 }
210
211 return 0;
212}
This page took 0.103945 seconds and 5 git commands to generate.