drm/nouveau/core/mm: fill in holes with "allocated" nodes
[deliverable/linux.git] / drivers / gpu / drm / nouveau / core / core / mm.c
1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25 #include "core/os.h"
26 #include "core/mm.h"
27
28 #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
29 list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
30
31 static void
32 nouveau_mm_dump(struct nouveau_mm *mm, const char *header)
33 {
34 struct nouveau_mm_node *node;
35
36 printk(KERN_ERR "nouveau: %s\n", header);
37 printk(KERN_ERR "nouveau: node list:\n");
38 list_for_each_entry(node, &mm->nodes, nl_entry) {
39 printk(KERN_ERR "nouveau: \t%08x %08x %d\n",
40 node->offset, node->length, node->type);
41 }
42 printk(KERN_ERR "nouveau: free list:\n");
43 list_for_each_entry(node, &mm->free, fl_entry) {
44 printk(KERN_ERR "nouveau: \t%08x %08x %d\n",
45 node->offset, node->length, node->type);
46 }
47 }
48
49 void
50 nouveau_mm_free(struct nouveau_mm *mm, struct nouveau_mm_node **pthis)
51 {
52 struct nouveau_mm_node *this = *pthis;
53
54 if (this) {
55 struct nouveau_mm_node *prev = node(this, prev);
56 struct nouveau_mm_node *next = node(this, next);
57
58 if (prev && prev->type == NVKM_MM_TYPE_NONE) {
59 prev->length += this->length;
60 list_del(&this->nl_entry);
61 kfree(this); this = prev;
62 }
63
64 if (next && next->type == NVKM_MM_TYPE_NONE) {
65 next->offset = this->offset;
66 next->length += this->length;
67 if (this->type == NVKM_MM_TYPE_NONE)
68 list_del(&this->fl_entry);
69 list_del(&this->nl_entry);
70 kfree(this); this = NULL;
71 }
72
73 if (this && this->type != NVKM_MM_TYPE_NONE) {
74 list_for_each_entry(prev, &mm->free, fl_entry) {
75 if (this->offset < prev->offset)
76 break;
77 }
78
79 list_add_tail(&this->fl_entry, &prev->fl_entry);
80 this->type = NVKM_MM_TYPE_NONE;
81 }
82 }
83
84 *pthis = NULL;
85 }
86
87 static struct nouveau_mm_node *
88 region_head(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
89 {
90 struct nouveau_mm_node *b;
91
92 if (a->length == size)
93 return a;
94
95 b = kmalloc(sizeof(*b), GFP_KERNEL);
96 if (unlikely(b == NULL))
97 return NULL;
98
99 b->offset = a->offset;
100 b->length = size;
101 b->type = a->type;
102 a->offset += size;
103 a->length -= size;
104 list_add_tail(&b->nl_entry, &a->nl_entry);
105 if (b->type == NVKM_MM_TYPE_NONE)
106 list_add_tail(&b->fl_entry, &a->fl_entry);
107 return b;
108 }
109
110 int
111 nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
112 u32 align, struct nouveau_mm_node **pnode)
113 {
114 struct nouveau_mm_node *prev, *this, *next;
115 u32 mask = align - 1;
116 u32 splitoff;
117 u32 s, e;
118
119 BUG_ON(type == NVKM_MM_TYPE_NONE || type == NVKM_MM_TYPE_HOLE);
120
121 list_for_each_entry(this, &mm->free, fl_entry) {
122 e = this->offset + this->length;
123 s = this->offset;
124
125 prev = node(this, prev);
126 if (prev && prev->type != type)
127 s = roundup(s, mm->block_size);
128
129 next = node(this, next);
130 if (next && next->type != type)
131 e = rounddown(e, mm->block_size);
132
133 s = (s + mask) & ~mask;
134 e &= ~mask;
135 if (s > e || e - s < size_min)
136 continue;
137
138 splitoff = s - this->offset;
139 if (splitoff && !region_head(mm, this, splitoff))
140 return -ENOMEM;
141
142 this = region_head(mm, this, min(size_max, e - s));
143 if (!this)
144 return -ENOMEM;
145
146 this->type = type;
147 list_del(&this->fl_entry);
148 *pnode = this;
149 return 0;
150 }
151
152 return -ENOSPC;
153 }
154
155 static struct nouveau_mm_node *
156 region_tail(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
157 {
158 struct nouveau_mm_node *b;
159
160 if (a->length == size)
161 return a;
162
163 b = kmalloc(sizeof(*b), GFP_KERNEL);
164 if (unlikely(b == NULL))
165 return NULL;
166
167 a->length -= size;
168 b->offset = a->offset + a->length;
169 b->length = size;
170 b->type = a->type;
171
172 list_add(&b->nl_entry, &a->nl_entry);
173 if (b->type == NVKM_MM_TYPE_NONE)
174 list_add(&b->fl_entry, &a->fl_entry);
175 return b;
176 }
177
178 int
179 nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
180 u32 align, struct nouveau_mm_node **pnode)
181 {
182 struct nouveau_mm_node *prev, *this, *next;
183 u32 mask = align - 1;
184
185 BUG_ON(type == NVKM_MM_TYPE_NONE || type == NVKM_MM_TYPE_HOLE);
186
187 list_for_each_entry_reverse(this, &mm->free, fl_entry) {
188 u32 e = this->offset + this->length;
189 u32 s = this->offset;
190 u32 c = 0, a;
191
192 prev = node(this, prev);
193 if (prev && prev->type != type)
194 s = roundup(s, mm->block_size);
195
196 next = node(this, next);
197 if (next && next->type != type) {
198 e = rounddown(e, mm->block_size);
199 c = next->offset - e;
200 }
201
202 s = (s + mask) & ~mask;
203 a = e - s;
204 if (s > e || a < size_min)
205 continue;
206
207 a = min(a, size_max);
208 s = (e - a) & ~mask;
209 c += (e - s) - a;
210
211 if (c && !region_tail(mm, this, c))
212 return -ENOMEM;
213
214 this = region_tail(mm, this, a);
215 if (!this)
216 return -ENOMEM;
217
218 this->type = type;
219 list_del(&this->fl_entry);
220 *pnode = this;
221 return 0;
222 }
223
224 return -ENOSPC;
225 }
226
227 int
228 nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
229 {
230 struct nouveau_mm_node *node, *prev;
231 u32 next;
232
233 if (nouveau_mm_initialised(mm)) {
234 prev = list_last_entry(&mm->nodes, typeof(*node), nl_entry);
235 next = prev->offset + prev->length;
236 if (next != offset) {
237 BUG_ON(next > offset);
238 if (!(node = kzalloc(sizeof(*node), GFP_KERNEL)))
239 return -ENOMEM;
240 node->type = NVKM_MM_TYPE_HOLE;
241 node->offset = next;
242 node->length = offset - next;
243 list_add_tail(&node->nl_entry, &mm->nodes);
244 }
245 BUG_ON(block != mm->block_size);
246 } else {
247 INIT_LIST_HEAD(&mm->nodes);
248 INIT_LIST_HEAD(&mm->free);
249 mm->block_size = block;
250 mm->heap_nodes = 0;
251 }
252
253 node = kzalloc(sizeof(*node), GFP_KERNEL);
254 if (!node)
255 return -ENOMEM;
256
257 if (length) {
258 node->offset = roundup(offset, mm->block_size);
259 node->length = rounddown(offset + length, mm->block_size);
260 node->length -= node->offset;
261 }
262
263 list_add_tail(&node->nl_entry, &mm->nodes);
264 list_add_tail(&node->fl_entry, &mm->free);
265 mm->heap_nodes++;
266 return 0;
267 }
268
269 int
270 nouveau_mm_fini(struct nouveau_mm *mm)
271 {
272 struct nouveau_mm_node *node, *temp;
273 int nodes = 0;
274
275 if (!nouveau_mm_initialised(mm))
276 return 0;
277
278 list_for_each_entry(node, &mm->nodes, nl_entry) {
279 if (node->type != NVKM_MM_TYPE_HOLE) {
280 if (++nodes > mm->heap_nodes) {
281 nouveau_mm_dump(mm, "mm not clean!");
282 return -EBUSY;
283 }
284 }
285 }
286
287 list_for_each_entry_safe(node, temp, &mm->nodes, nl_entry) {
288 list_del(&node->nl_entry);
289 kfree(node);
290 }
291 mm->heap_nodes = 0;
292 return 0;
293 }
This page took 0.037577 seconds and 5 git commands to generate.