Merge branch 'kconfig' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild-2.6
[deliverable/linux.git] / drivers / gpu / drm / drm_mm.c
1 /**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28
29 /*
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
32 *
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
37 *
38 * Aligned allocations can also see improvement.
39 *
40 * Authors:
41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42 */
43
44 #include "drmP.h"
45 #include "drm_mm.h"
46 #include <linux/slab.h>
47 #include <linux/seq_file.h>
48
49 #define MM_UNUSED_TARGET 4
50
51 static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
52 {
53 struct drm_mm_node *child;
54
55 if (atomic)
56 child = kzalloc(sizeof(*child), GFP_ATOMIC);
57 else
58 child = kzalloc(sizeof(*child), GFP_KERNEL);
59
60 if (unlikely(child == NULL)) {
61 spin_lock(&mm->unused_lock);
62 if (list_empty(&mm->unused_nodes))
63 child = NULL;
64 else {
65 child =
66 list_entry(mm->unused_nodes.next,
67 struct drm_mm_node, free_stack);
68 list_del(&child->free_stack);
69 --mm->num_unused;
70 }
71 spin_unlock(&mm->unused_lock);
72 }
73 return child;
74 }
75
76 /* drm_mm_pre_get() - pre allocate drm_mm_node structure
77 * drm_mm: memory manager struct we are pre-allocating for
78 *
79 * Returns 0 on success or -ENOMEM if allocation fails.
80 */
81 int drm_mm_pre_get(struct drm_mm *mm)
82 {
83 struct drm_mm_node *node;
84
85 spin_lock(&mm->unused_lock);
86 while (mm->num_unused < MM_UNUSED_TARGET) {
87 spin_unlock(&mm->unused_lock);
88 node = kzalloc(sizeof(*node), GFP_KERNEL);
89 spin_lock(&mm->unused_lock);
90
91 if (unlikely(node == NULL)) {
92 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
93 spin_unlock(&mm->unused_lock);
94 return ret;
95 }
96 ++mm->num_unused;
97 list_add_tail(&node->free_stack, &mm->unused_nodes);
98 }
99 spin_unlock(&mm->unused_lock);
100 return 0;
101 }
102 EXPORT_SYMBOL(drm_mm_pre_get);
103
104 static int drm_mm_create_tail_node(struct drm_mm *mm,
105 unsigned long start,
106 unsigned long size, int atomic)
107 {
108 struct drm_mm_node *child;
109
110 child = drm_mm_kmalloc(mm, atomic);
111 if (unlikely(child == NULL))
112 return -ENOMEM;
113
114 child->free = 1;
115 child->size = size;
116 child->start = start;
117 child->mm = mm;
118
119 list_add_tail(&child->node_list, &mm->node_list);
120 list_add_tail(&child->free_stack, &mm->free_stack);
121
122 return 0;
123 }
124
125 static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
126 unsigned long size,
127 int atomic)
128 {
129 struct drm_mm_node *child;
130
131 child = drm_mm_kmalloc(parent->mm, atomic);
132 if (unlikely(child == NULL))
133 return NULL;
134
135 INIT_LIST_HEAD(&child->free_stack);
136
137 child->size = size;
138 child->start = parent->start;
139 child->mm = parent->mm;
140
141 list_add_tail(&child->node_list, &parent->node_list);
142 INIT_LIST_HEAD(&child->free_stack);
143
144 parent->size -= size;
145 parent->start += size;
146 return child;
147 }
148
149
150 struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
151 unsigned long size,
152 unsigned alignment,
153 int atomic)
154 {
155
156 struct drm_mm_node *align_splitoff = NULL;
157 unsigned tmp = 0;
158
159 if (alignment)
160 tmp = node->start % alignment;
161
162 if (tmp) {
163 align_splitoff =
164 drm_mm_split_at_start(node, alignment - tmp, atomic);
165 if (unlikely(align_splitoff == NULL))
166 return NULL;
167 }
168
169 if (node->size == size) {
170 list_del_init(&node->free_stack);
171 node->free = 0;
172 } else {
173 node = drm_mm_split_at_start(node, size, atomic);
174 }
175
176 if (align_splitoff)
177 drm_mm_put_block(align_splitoff);
178
179 return node;
180 }
181 EXPORT_SYMBOL(drm_mm_get_block_generic);
182
183 struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
184 unsigned long size,
185 unsigned alignment,
186 unsigned long start,
187 unsigned long end,
188 int atomic)
189 {
190 struct drm_mm_node *align_splitoff = NULL;
191 unsigned tmp = 0;
192 unsigned wasted = 0;
193
194 if (node->start < start)
195 wasted += start - node->start;
196 if (alignment)
197 tmp = ((node->start + wasted) % alignment);
198
199 if (tmp)
200 wasted += alignment - tmp;
201 if (wasted) {
202 align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
203 if (unlikely(align_splitoff == NULL))
204 return NULL;
205 }
206
207 if (node->size == size) {
208 list_del_init(&node->free_stack);
209 node->free = 0;
210 } else {
211 node = drm_mm_split_at_start(node, size, atomic);
212 }
213
214 if (align_splitoff)
215 drm_mm_put_block(align_splitoff);
216
217 return node;
218 }
219 EXPORT_SYMBOL(drm_mm_get_block_range_generic);
220
221 /*
222 * Put a block. Merge with the previous and / or next block if they are free.
223 * Otherwise add to the free stack.
224 */
225
226 void drm_mm_put_block(struct drm_mm_node *cur)
227 {
228
229 struct drm_mm *mm = cur->mm;
230 struct list_head *cur_head = &cur->node_list;
231 struct list_head *root_head = &mm->node_list;
232 struct drm_mm_node *prev_node = NULL;
233 struct drm_mm_node *next_node;
234
235 int merged = 0;
236
237 BUG_ON(cur->scanned_block || cur->scanned_prev_free
238 || cur->scanned_next_free);
239
240 if (cur_head->prev != root_head) {
241 prev_node =
242 list_entry(cur_head->prev, struct drm_mm_node, node_list);
243 if (prev_node->free) {
244 prev_node->size += cur->size;
245 merged = 1;
246 }
247 }
248 if (cur_head->next != root_head) {
249 next_node =
250 list_entry(cur_head->next, struct drm_mm_node, node_list);
251 if (next_node->free) {
252 if (merged) {
253 prev_node->size += next_node->size;
254 list_del(&next_node->node_list);
255 list_del(&next_node->free_stack);
256 spin_lock(&mm->unused_lock);
257 if (mm->num_unused < MM_UNUSED_TARGET) {
258 list_add(&next_node->free_stack,
259 &mm->unused_nodes);
260 ++mm->num_unused;
261 } else
262 kfree(next_node);
263 spin_unlock(&mm->unused_lock);
264 } else {
265 next_node->size += cur->size;
266 next_node->start = cur->start;
267 merged = 1;
268 }
269 }
270 }
271 if (!merged) {
272 cur->free = 1;
273 list_add(&cur->free_stack, &mm->free_stack);
274 } else {
275 list_del(&cur->node_list);
276 spin_lock(&mm->unused_lock);
277 if (mm->num_unused < MM_UNUSED_TARGET) {
278 list_add(&cur->free_stack, &mm->unused_nodes);
279 ++mm->num_unused;
280 } else
281 kfree(cur);
282 spin_unlock(&mm->unused_lock);
283 }
284 }
285
286 EXPORT_SYMBOL(drm_mm_put_block);
287
288 static int check_free_hole(unsigned long start, unsigned long end,
289 unsigned long size, unsigned alignment)
290 {
291 unsigned wasted = 0;
292
293 if (end - start < size)
294 return 0;
295
296 if (alignment) {
297 unsigned tmp = start % alignment;
298 if (tmp)
299 wasted = alignment - tmp;
300 }
301
302 if (end >= start + size + wasted) {
303 return 1;
304 }
305
306 return 0;
307 }
308
309 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
310 unsigned long size,
311 unsigned alignment, int best_match)
312 {
313 struct drm_mm_node *entry;
314 struct drm_mm_node *best;
315 unsigned long best_size;
316
317 BUG_ON(mm->scanned_blocks);
318
319 best = NULL;
320 best_size = ~0UL;
321
322 list_for_each_entry(entry, &mm->free_stack, free_stack) {
323 if (!check_free_hole(entry->start, entry->start + entry->size,
324 size, alignment))
325 continue;
326
327 if (!best_match)
328 return entry;
329
330 if (entry->size < best_size) {
331 best = entry;
332 best_size = entry->size;
333 }
334 }
335
336 return best;
337 }
338 EXPORT_SYMBOL(drm_mm_search_free);
339
340 struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
341 unsigned long size,
342 unsigned alignment,
343 unsigned long start,
344 unsigned long end,
345 int best_match)
346 {
347 struct drm_mm_node *entry;
348 struct drm_mm_node *best;
349 unsigned long best_size;
350
351 BUG_ON(mm->scanned_blocks);
352
353 best = NULL;
354 best_size = ~0UL;
355
356 list_for_each_entry(entry, &mm->free_stack, free_stack) {
357 unsigned long adj_start = entry->start < start ?
358 start : entry->start;
359 unsigned long adj_end = entry->start + entry->size > end ?
360 end : entry->start + entry->size;
361
362 if (!check_free_hole(adj_start, adj_end, size, alignment))
363 continue;
364
365 if (!best_match)
366 return entry;
367
368 if (entry->size < best_size) {
369 best = entry;
370 best_size = entry->size;
371 }
372 }
373
374 return best;
375 }
376 EXPORT_SYMBOL(drm_mm_search_free_in_range);
377
378 /**
379 * Initializa lru scanning.
380 *
381 * This simply sets up the scanning routines with the parameters for the desired
382 * hole.
383 *
384 * Warning: As long as the scan list is non-empty, no other operations than
385 * adding/removing nodes to/from the scan list are allowed.
386 */
387 void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
388 unsigned alignment)
389 {
390 mm->scan_alignment = alignment;
391 mm->scan_size = size;
392 mm->scanned_blocks = 0;
393 mm->scan_hit_start = 0;
394 mm->scan_hit_size = 0;
395 }
396 EXPORT_SYMBOL(drm_mm_init_scan);
397
398 /**
399 * Add a node to the scan list that might be freed to make space for the desired
400 * hole.
401 *
402 * Returns non-zero, if a hole has been found, zero otherwise.
403 */
404 int drm_mm_scan_add_block(struct drm_mm_node *node)
405 {
406 struct drm_mm *mm = node->mm;
407 struct list_head *prev_free, *next_free;
408 struct drm_mm_node *prev_node, *next_node;
409
410 mm->scanned_blocks++;
411
412 prev_free = next_free = NULL;
413
414 BUG_ON(node->free);
415 node->scanned_block = 1;
416 node->free = 1;
417
418 if (node->node_list.prev != &mm->node_list) {
419 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
420 node_list);
421
422 if (prev_node->free) {
423 list_del(&prev_node->node_list);
424
425 node->start = prev_node->start;
426 node->size += prev_node->size;
427
428 prev_node->scanned_prev_free = 1;
429
430 prev_free = &prev_node->free_stack;
431 }
432 }
433
434 if (node->node_list.next != &mm->node_list) {
435 next_node = list_entry(node->node_list.next, struct drm_mm_node,
436 node_list);
437
438 if (next_node->free) {
439 list_del(&next_node->node_list);
440
441 node->size += next_node->size;
442
443 next_node->scanned_next_free = 1;
444
445 next_free = &next_node->free_stack;
446 }
447 }
448
449 /* The free_stack list is not used for allocated objects, so these two
450 * pointers can be abused (as long as no allocations in this memory
451 * manager happens). */
452 node->free_stack.prev = prev_free;
453 node->free_stack.next = next_free;
454
455 if (check_free_hole(node->start, node->start + node->size,
456 mm->scan_size, mm->scan_alignment)) {
457 mm->scan_hit_start = node->start;
458 mm->scan_hit_size = node->size;
459
460 return 1;
461 }
462
463 return 0;
464 }
465 EXPORT_SYMBOL(drm_mm_scan_add_block);
466
467 /**
468 * Remove a node from the scan list.
469 *
470 * Nodes _must_ be removed in the exact same order from the scan list as they
471 * have been added, otherwise the internal state of the memory manager will be
472 * corrupted.
473 *
474 * When the scan list is empty, the selected memory nodes can be freed. An
475 * immediatly following drm_mm_search_free with best_match = 0 will then return
476 * the just freed block (because its at the top of the free_stack list).
477 *
478 * Returns one if this block should be evicted, zero otherwise. Will always
479 * return zero when no hole has been found.
480 */
481 int drm_mm_scan_remove_block(struct drm_mm_node *node)
482 {
483 struct drm_mm *mm = node->mm;
484 struct drm_mm_node *prev_node, *next_node;
485
486 mm->scanned_blocks--;
487
488 BUG_ON(!node->scanned_block);
489 node->scanned_block = 0;
490 node->free = 0;
491
492 prev_node = list_entry(node->free_stack.prev, struct drm_mm_node,
493 free_stack);
494 next_node = list_entry(node->free_stack.next, struct drm_mm_node,
495 free_stack);
496
497 if (prev_node) {
498 BUG_ON(!prev_node->scanned_prev_free);
499 prev_node->scanned_prev_free = 0;
500
501 list_add_tail(&prev_node->node_list, &node->node_list);
502
503 node->start = prev_node->start + prev_node->size;
504 node->size -= prev_node->size;
505 }
506
507 if (next_node) {
508 BUG_ON(!next_node->scanned_next_free);
509 next_node->scanned_next_free = 0;
510
511 list_add(&next_node->node_list, &node->node_list);
512
513 node->size -= next_node->size;
514 }
515
516 INIT_LIST_HEAD(&node->free_stack);
517
518 /* Only need to check for containement because start&size for the
519 * complete resulting free block (not just the desired part) is
520 * stored. */
521 if (node->start >= mm->scan_hit_start &&
522 node->start + node->size
523 <= mm->scan_hit_start + mm->scan_hit_size) {
524 return 1;
525 }
526
527 return 0;
528 }
529 EXPORT_SYMBOL(drm_mm_scan_remove_block);
530
531 int drm_mm_clean(struct drm_mm * mm)
532 {
533 struct list_head *head = &mm->node_list;
534
535 return (head->next->next == head);
536 }
537 EXPORT_SYMBOL(drm_mm_clean);
538
539 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
540 {
541 INIT_LIST_HEAD(&mm->node_list);
542 INIT_LIST_HEAD(&mm->free_stack);
543 INIT_LIST_HEAD(&mm->unused_nodes);
544 mm->num_unused = 0;
545 mm->scanned_blocks = 0;
546 spin_lock_init(&mm->unused_lock);
547
548 return drm_mm_create_tail_node(mm, start, size, 0);
549 }
550 EXPORT_SYMBOL(drm_mm_init);
551
552 void drm_mm_takedown(struct drm_mm * mm)
553 {
554 struct list_head *bnode = mm->free_stack.next;
555 struct drm_mm_node *entry;
556 struct drm_mm_node *next;
557
558 entry = list_entry(bnode, struct drm_mm_node, free_stack);
559
560 if (entry->node_list.next != &mm->node_list ||
561 entry->free_stack.next != &mm->free_stack) {
562 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
563 return;
564 }
565
566 list_del(&entry->free_stack);
567 list_del(&entry->node_list);
568 kfree(entry);
569
570 spin_lock(&mm->unused_lock);
571 list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) {
572 list_del(&entry->free_stack);
573 kfree(entry);
574 --mm->num_unused;
575 }
576 spin_unlock(&mm->unused_lock);
577
578 BUG_ON(mm->num_unused != 0);
579 }
580 EXPORT_SYMBOL(drm_mm_takedown);
581
582 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
583 {
584 struct drm_mm_node *entry;
585 int total_used = 0, total_free = 0, total = 0;
586
587 list_for_each_entry(entry, &mm->node_list, node_list) {
588 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
589 prefix, entry->start, entry->start + entry->size,
590 entry->size, entry->free ? "free" : "used");
591 total += entry->size;
592 if (entry->free)
593 total_free += entry->size;
594 else
595 total_used += entry->size;
596 }
597 printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
598 total_used, total_free);
599 }
600 EXPORT_SYMBOL(drm_mm_debug_table);
601
602 #if defined(CONFIG_DEBUG_FS)
603 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
604 {
605 struct drm_mm_node *entry;
606 int total_used = 0, total_free = 0, total = 0;
607
608 list_for_each_entry(entry, &mm->node_list, node_list) {
609 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
610 total += entry->size;
611 if (entry->free)
612 total_free += entry->size;
613 else
614 total_used += entry->size;
615 }
616 seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
617 return 0;
618 }
619 EXPORT_SYMBOL(drm_mm_dump_table);
620 #endif
This page took 0.058954 seconds and 6 git commands to generate.