drm/exynos: Make ipp_handle_cmd_work static
[deliverable/linux.git] / drivers / gpu / drm / drm_mm.c
CommitLineData
3a1bd924
TH
1/**************************************************************************
2 *
3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 *
27 **************************************************************************/
28
29/*
30 * Generic simple memory manager implementation. Intended to be used as a base
31 * class implementation for more advanced memory managers.
32 *
33 * Note that the algorithm used is quite simple and there might be substantial
34 * performance gains if a smarter free list is implemented. Currently it is just an
35 * unordered stack of free regions. This could easily be improved if an RB-tree
36 * is used instead. At least if we expect heavy fragmentation.
37 *
38 * Aligned allocations can also see improvement.
39 *
40 * Authors:
96de0e25 41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
3a1bd924
TH
42 */
43
760285e7
DH
44#include <drm/drmP.h>
45#include <drm/drm_mm.h>
1d58420b 46#include <linux/slab.h>
fa8a1238 47#include <linux/seq_file.h>
2d1a8a48 48#include <linux/export.h>
1d58420b 49
249d6048
JG
50#define MM_UNUSED_TARGET 4
51
249d6048
JG
52static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
53{
54 struct drm_mm_node *child;
55
56 if (atomic)
709ea971 57 child = kzalloc(sizeof(*child), GFP_ATOMIC);
249d6048 58 else
709ea971 59 child = kzalloc(sizeof(*child), GFP_KERNEL);
249d6048
JG
60
61 if (unlikely(child == NULL)) {
62 spin_lock(&mm->unused_lock);
63 if (list_empty(&mm->unused_nodes))
64 child = NULL;
65 else {
66 child =
67 list_entry(mm->unused_nodes.next,
ea7b1dd4
DV
68 struct drm_mm_node, node_list);
69 list_del(&child->node_list);
249d6048
JG
70 --mm->num_unused;
71 }
72 spin_unlock(&mm->unused_lock);
73 }
74 return child;
75}
76
a698cf34
JG
77/* drm_mm_pre_get() - pre allocate drm_mm_node structure
78 * drm_mm: memory manager struct we are pre-allocating for
79 *
80 * Returns 0 on success or -ENOMEM if allocation fails.
81 */
249d6048
JG
82int drm_mm_pre_get(struct drm_mm *mm)
83{
84 struct drm_mm_node *node;
85
86 spin_lock(&mm->unused_lock);
87 while (mm->num_unused < MM_UNUSED_TARGET) {
88 spin_unlock(&mm->unused_lock);
709ea971 89 node = kzalloc(sizeof(*node), GFP_KERNEL);
249d6048
JG
90 spin_lock(&mm->unused_lock);
91
92 if (unlikely(node == NULL)) {
93 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
94 spin_unlock(&mm->unused_lock);
95 return ret;
96 }
97 ++mm->num_unused;
ea7b1dd4 98 list_add_tail(&node->node_list, &mm->unused_nodes);
249d6048
JG
99 }
100 spin_unlock(&mm->unused_lock);
101 return 0;
102}
103EXPORT_SYMBOL(drm_mm_pre_get);
1d58420b 104
ea7b1dd4 105static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
1d58420b 106{
ea7b1dd4 107 return hole_node->start + hole_node->size;
1d58420b
TH
108}
109
ea7b1dd4 110static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
1d58420b 111{
ea7b1dd4
DV
112 struct drm_mm_node *next_node =
113 list_entry(hole_node->node_list.next, struct drm_mm_node,
114 node_list);
1d58420b 115
ea7b1dd4 116 return next_node->start;
1d58420b
TH
117}
118
9fc935de
DV
119static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
120 struct drm_mm_node *node,
6b9d89b4
CW
121 unsigned long size, unsigned alignment,
122 unsigned long color)
3a1bd924 123{
ea7b1dd4 124 struct drm_mm *mm = hole_node->mm;
ea7b1dd4
DV
125 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
126 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
6b9d89b4
CW
127 unsigned long adj_start = hole_start;
128 unsigned long adj_end = hole_end;
ea7b1dd4 129
b0b7af18
DV
130 BUG_ON(!hole_node->hole_follows || node->allocated);
131
6b9d89b4
CW
132 if (mm->color_adjust)
133 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
1d58420b 134
6b9d89b4
CW
135 if (alignment) {
136 unsigned tmp = adj_start % alignment;
137 if (tmp)
138 adj_start += alignment - tmp;
139 }
140
141 if (adj_start == hole_start) {
ea7b1dd4 142 hole_node->hole_follows = 0;
6b9d89b4
CW
143 list_del(&hole_node->hole_stack);
144 }
ea7b1dd4 145
6b9d89b4 146 node->start = adj_start;
ea7b1dd4
DV
147 node->size = size;
148 node->mm = mm;
6b9d89b4 149 node->color = color;
b0b7af18 150 node->allocated = 1;
3a1bd924 151
ea7b1dd4
DV
152 INIT_LIST_HEAD(&node->hole_stack);
153 list_add(&node->node_list, &hole_node->node_list);
154
6b9d89b4 155 BUG_ON(node->start + node->size > adj_end);
ea7b1dd4 156
6b9d89b4 157 node->hole_follows = 0;
ea7b1dd4
DV
158 if (node->start + node->size < hole_end) {
159 list_add(&node->hole_stack, &mm->hole_stack);
160 node->hole_follows = 1;
1d58420b 161 }
9fc935de
DV
162}
163
164struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
165 unsigned long size,
166 unsigned alignment,
6b9d89b4 167 unsigned long color,
9fc935de
DV
168 int atomic)
169{
170 struct drm_mm_node *node;
171
9fc935de
DV
172 node = drm_mm_kmalloc(hole_node->mm, atomic);
173 if (unlikely(node == NULL))
174 return NULL;
175
6b9d89b4 176 drm_mm_insert_helper(hole_node, node, size, alignment, color);
3a1bd924 177
e6c03c5b 178 return node;
3a1bd924 179}
89579f77 180EXPORT_SYMBOL(drm_mm_get_block_generic);
249d6048 181
b0b7af18
DV
182/**
183 * Search for free space and insert a preallocated memory node. Returns
184 * -ENOSPC if no suitable free area is available. The preallocated memory node
185 * must be cleared.
186 */
b8103450
CW
187int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
188 unsigned long size, unsigned alignment,
189 unsigned long color)
b0b7af18
DV
190{
191 struct drm_mm_node *hole_node;
192
b8103450
CW
193 hole_node = drm_mm_search_free_generic(mm, size, alignment,
194 color, 0);
b0b7af18
DV
195 if (!hole_node)
196 return -ENOSPC;
197
b8103450 198 drm_mm_insert_helper(hole_node, node, size, alignment, color);
b0b7af18
DV
199 return 0;
200}
b8103450
CW
201EXPORT_SYMBOL(drm_mm_insert_node_generic);
202
203int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
204 unsigned long size, unsigned alignment)
205{
206 return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
207}
b0b7af18
DV
208EXPORT_SYMBOL(drm_mm_insert_node);
209
9fc935de
DV
210static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
211 struct drm_mm_node *node,
212 unsigned long size, unsigned alignment,
6b9d89b4 213 unsigned long color,
9fc935de 214 unsigned long start, unsigned long end)
a2e68e92 215{
ea7b1dd4 216 struct drm_mm *mm = hole_node->mm;
ea7b1dd4
DV
217 unsigned long hole_start = drm_mm_hole_node_start(hole_node);
218 unsigned long hole_end = drm_mm_hole_node_end(hole_node);
6b9d89b4
CW
219 unsigned long adj_start = hole_start;
220 unsigned long adj_end = hole_end;
a2e68e92 221
b0b7af18
DV
222 BUG_ON(!hole_node->hole_follows || node->allocated);
223
6b9d89b4
CW
224 if (adj_start < start)
225 adj_start = start;
901593f2
CW
226 if (adj_end > end)
227 adj_end = end;
228
229 if (mm->color_adjust)
230 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
6b9d89b4
CW
231
232 if (alignment) {
233 unsigned tmp = adj_start % alignment;
234 if (tmp)
235 adj_start += alignment - tmp;
236 }
ea7b1dd4 237
6b9d89b4 238 if (adj_start == hole_start) {
ea7b1dd4 239 hole_node->hole_follows = 0;
6b9d89b4 240 list_del(&hole_node->hole_stack);
a2e68e92
JG
241 }
242
6b9d89b4 243 node->start = adj_start;
ea7b1dd4
DV
244 node->size = size;
245 node->mm = mm;
6b9d89b4 246 node->color = color;
b0b7af18 247 node->allocated = 1;
ea7b1dd4
DV
248
249 INIT_LIST_HEAD(&node->hole_stack);
250 list_add(&node->node_list, &hole_node->node_list);
251
6b9d89b4 252 BUG_ON(node->start + node->size > adj_end);
ea7b1dd4
DV
253 BUG_ON(node->start + node->size > end);
254
6b9d89b4 255 node->hole_follows = 0;
ea7b1dd4
DV
256 if (node->start + node->size < hole_end) {
257 list_add(&node->hole_stack, &mm->hole_stack);
258 node->hole_follows = 1;
a2e68e92 259 }
9fc935de
DV
260}
261
262struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
263 unsigned long size,
264 unsigned alignment,
6b9d89b4 265 unsigned long color,
9fc935de
DV
266 unsigned long start,
267 unsigned long end,
268 int atomic)
269{
270 struct drm_mm_node *node;
271
9fc935de
DV
272 node = drm_mm_kmalloc(hole_node->mm, atomic);
273 if (unlikely(node == NULL))
274 return NULL;
275
6b9d89b4 276 drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
9fc935de 277 start, end);
a2e68e92 278
a2e68e92
JG
279 return node;
280}
281EXPORT_SYMBOL(drm_mm_get_block_range_generic);
282
b0b7af18
DV
283/**
284 * Search for free space and insert a preallocated memory node. Returns
285 * -ENOSPC if no suitable free area is available. This is for range
286 * restricted allocations. The preallocated memory node must be cleared.
3a1bd924 287 */
b8103450
CW
288int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
289 unsigned long size, unsigned alignment, unsigned long color,
290 unsigned long start, unsigned long end)
3a1bd924 291{
b0b7af18
DV
292 struct drm_mm_node *hole_node;
293
b8103450
CW
294 hole_node = drm_mm_search_free_in_range_generic(mm,
295 size, alignment, color,
296 start, end, 0);
b0b7af18
DV
297 if (!hole_node)
298 return -ENOSPC;
299
b8103450
CW
300 drm_mm_insert_helper_range(hole_node, node,
301 size, alignment, color,
b0b7af18 302 start, end);
b0b7af18
DV
303 return 0;
304}
b8103450
CW
305EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
306
307int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
308 unsigned long size, unsigned alignment,
309 unsigned long start, unsigned long end)
310{
311 return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
312}
b0b7af18
DV
313EXPORT_SYMBOL(drm_mm_insert_node_in_range);
314
315/**
316 * Remove a memory node from the allocator.
317 */
318void drm_mm_remove_node(struct drm_mm_node *node)
319{
ea7b1dd4
DV
320 struct drm_mm *mm = node->mm;
321 struct drm_mm_node *prev_node;
3a1bd924 322
ea7b1dd4
DV
323 BUG_ON(node->scanned_block || node->scanned_prev_free
324 || node->scanned_next_free);
3a1bd924 325
ea7b1dd4
DV
326 prev_node =
327 list_entry(node->node_list.prev, struct drm_mm_node, node_list);
709ea971 328
ea7b1dd4
DV
329 if (node->hole_follows) {
330 BUG_ON(drm_mm_hole_node_start(node)
331 == drm_mm_hole_node_end(node));
332 list_del(&node->hole_stack);
333 } else
334 BUG_ON(drm_mm_hole_node_start(node)
335 != drm_mm_hole_node_end(node));
249d6048 336
ea7b1dd4
DV
337 if (!prev_node->hole_follows) {
338 prev_node->hole_follows = 1;
339 list_add(&prev_node->hole_stack, &mm->hole_stack);
340 } else
341 list_move(&prev_node->hole_stack, &mm->hole_stack);
342
343 list_del(&node->node_list);
b0b7af18
DV
344 node->allocated = 0;
345}
346EXPORT_SYMBOL(drm_mm_remove_node);
347
348/*
349 * Remove a memory node from the allocator and free the allocated struct
350 * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
351 * drm_mm_get_block functions.
352 */
353void drm_mm_put_block(struct drm_mm_node *node)
354{
355
356 struct drm_mm *mm = node->mm;
357
358 drm_mm_remove_node(node);
359
ea7b1dd4
DV
360 spin_lock(&mm->unused_lock);
361 if (mm->num_unused < MM_UNUSED_TARGET) {
362 list_add(&node->node_list, &mm->unused_nodes);
363 ++mm->num_unused;
364 } else
365 kfree(node);
366 spin_unlock(&mm->unused_lock);
367}
673a394b 368EXPORT_SYMBOL(drm_mm_put_block);
3a1bd924 369
75214733
DV
370static int check_free_hole(unsigned long start, unsigned long end,
371 unsigned long size, unsigned alignment)
7a6b2896 372{
75214733 373 if (end - start < size)
7a6b2896
DV
374 return 0;
375
376 if (alignment) {
75214733 377 unsigned tmp = start % alignment;
7a6b2896 378 if (tmp)
6b9d89b4 379 start += alignment - tmp;
7a6b2896
DV
380 }
381
6b9d89b4 382 return end >= start + size;
7a6b2896
DV
383}
384
6b9d89b4
CW
385struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
386 unsigned long size,
387 unsigned alignment,
388 unsigned long color,
389 bool best_match)
3a1bd924 390{
55910517
DA
391 struct drm_mm_node *entry;
392 struct drm_mm_node *best;
3a1bd924
TH
393 unsigned long best_size;
394
709ea971
DV
395 BUG_ON(mm->scanned_blocks);
396
3a1bd924
TH
397 best = NULL;
398 best_size = ~0UL;
399
ea7b1dd4 400 list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
6b9d89b4
CW
401 unsigned long adj_start = drm_mm_hole_node_start(entry);
402 unsigned long adj_end = drm_mm_hole_node_end(entry);
403
404 if (mm->color_adjust) {
405 mm->color_adjust(entry, color, &adj_start, &adj_end);
406 if (adj_end <= adj_start)
407 continue;
408 }
409
ea7b1dd4 410 BUG_ON(!entry->hole_follows);
6b9d89b4 411 if (!check_free_hole(adj_start, adj_end, size, alignment))
1d58420b
TH
412 continue;
413
7a6b2896
DV
414 if (!best_match)
415 return entry;
1d58420b 416
7a6b2896
DV
417 if (entry->size < best_size) {
418 best = entry;
419 best_size = entry->size;
3a1bd924
TH
420 }
421 }
422
423 return best;
424}
6b9d89b4
CW
425EXPORT_SYMBOL(drm_mm_search_free_generic);
426
427struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
428 unsigned long size,
429 unsigned alignment,
430 unsigned long color,
431 unsigned long start,
432 unsigned long end,
433 bool best_match)
a2e68e92 434{
a2e68e92
JG
435 struct drm_mm_node *entry;
436 struct drm_mm_node *best;
437 unsigned long best_size;
a2e68e92 438
709ea971
DV
439 BUG_ON(mm->scanned_blocks);
440
a2e68e92
JG
441 best = NULL;
442 best_size = ~0UL;
443
ea7b1dd4
DV
444 list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
445 unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
446 start : drm_mm_hole_node_start(entry);
447 unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
448 end : drm_mm_hole_node_end(entry);
a2e68e92 449
ea7b1dd4 450 BUG_ON(!entry->hole_follows);
6b9d89b4
CW
451
452 if (mm->color_adjust) {
453 mm->color_adjust(entry, color, &adj_start, &adj_end);
454 if (adj_end <= adj_start)
455 continue;
456 }
457
75214733 458 if (!check_free_hole(adj_start, adj_end, size, alignment))
a2e68e92
JG
459 continue;
460
7a6b2896
DV
461 if (!best_match)
462 return entry;
a2e68e92 463
7a6b2896
DV
464 if (entry->size < best_size) {
465 best = entry;
466 best_size = entry->size;
a2e68e92
JG
467 }
468 }
469
470 return best;
471}
6b9d89b4 472EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
a2e68e92 473
b0b7af18
DV
474/**
475 * Moves an allocation. To be used with embedded struct drm_mm_node.
476 */
477void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
478{
479 list_replace(&old->node_list, &new->node_list);
2bbd4492 480 list_replace(&old->hole_stack, &new->hole_stack);
b0b7af18
DV
481 new->hole_follows = old->hole_follows;
482 new->mm = old->mm;
483 new->start = old->start;
484 new->size = old->size;
6b9d89b4 485 new->color = old->color;
b0b7af18
DV
486
487 old->allocated = 0;
488 new->allocated = 1;
489}
490EXPORT_SYMBOL(drm_mm_replace_node);
491
709ea971
DV
492/**
493 * Initializa lru scanning.
494 *
495 * This simply sets up the scanning routines with the parameters for the desired
496 * hole.
497 *
498 * Warning: As long as the scan list is non-empty, no other operations than
499 * adding/removing nodes to/from the scan list are allowed.
500 */
6b9d89b4
CW
501void drm_mm_init_scan(struct drm_mm *mm,
502 unsigned long size,
503 unsigned alignment,
504 unsigned long color)
709ea971 505{
6b9d89b4 506 mm->scan_color = color;
709ea971
DV
507 mm->scan_alignment = alignment;
508 mm->scan_size = size;
509 mm->scanned_blocks = 0;
510 mm->scan_hit_start = 0;
901593f2 511 mm->scan_hit_end = 0;
d935cc61 512 mm->scan_check_range = 0;
ae0cec28 513 mm->prev_scanned_node = NULL;
709ea971
DV
514}
515EXPORT_SYMBOL(drm_mm_init_scan);
516
d935cc61
DV
517/**
518 * Initializa lru scanning.
519 *
520 * This simply sets up the scanning routines with the parameters for the desired
521 * hole. This version is for range-restricted scans.
522 *
523 * Warning: As long as the scan list is non-empty, no other operations than
524 * adding/removing nodes to/from the scan list are allowed.
525 */
6b9d89b4
CW
526void drm_mm_init_scan_with_range(struct drm_mm *mm,
527 unsigned long size,
d935cc61 528 unsigned alignment,
6b9d89b4 529 unsigned long color,
d935cc61
DV
530 unsigned long start,
531 unsigned long end)
532{
6b9d89b4 533 mm->scan_color = color;
d935cc61
DV
534 mm->scan_alignment = alignment;
535 mm->scan_size = size;
536 mm->scanned_blocks = 0;
537 mm->scan_hit_start = 0;
901593f2 538 mm->scan_hit_end = 0;
d935cc61
DV
539 mm->scan_start = start;
540 mm->scan_end = end;
541 mm->scan_check_range = 1;
ae0cec28 542 mm->prev_scanned_node = NULL;
d935cc61
DV
543}
544EXPORT_SYMBOL(drm_mm_init_scan_with_range);
545
709ea971
DV
546/**
547 * Add a node to the scan list that might be freed to make space for the desired
548 * hole.
549 *
550 * Returns non-zero, if a hole has been found, zero otherwise.
551 */
552int drm_mm_scan_add_block(struct drm_mm_node *node)
553{
554 struct drm_mm *mm = node->mm;
ea7b1dd4
DV
555 struct drm_mm_node *prev_node;
556 unsigned long hole_start, hole_end;
901593f2 557 unsigned long adj_start, adj_end;
709ea971
DV
558
559 mm->scanned_blocks++;
560
ea7b1dd4 561 BUG_ON(node->scanned_block);
709ea971 562 node->scanned_block = 1;
709ea971 563
ea7b1dd4
DV
564 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
565 node_list);
709ea971 566
ea7b1dd4
DV
567 node->scanned_preceeds_hole = prev_node->hole_follows;
568 prev_node->hole_follows = 1;
569 list_del(&node->node_list);
570 node->node_list.prev = &prev_node->node_list;
ae0cec28
DV
571 node->node_list.next = &mm->prev_scanned_node->node_list;
572 mm->prev_scanned_node = node;
709ea971 573
901593f2
CW
574 adj_start = hole_start = drm_mm_hole_node_start(prev_node);
575 adj_end = hole_end = drm_mm_hole_node_end(prev_node);
6b9d89b4 576
d935cc61 577 if (mm->scan_check_range) {
6b9d89b4
CW
578 if (adj_start < mm->scan_start)
579 adj_start = mm->scan_start;
580 if (adj_end > mm->scan_end)
581 adj_end = mm->scan_end;
d935cc61
DV
582 }
583
901593f2
CW
584 if (mm->color_adjust)
585 mm->color_adjust(prev_node, mm->scan_color,
586 &adj_start, &adj_end);
587
6b9d89b4 588 if (check_free_hole(adj_start, adj_end,
75214733 589 mm->scan_size, mm->scan_alignment)) {
ea7b1dd4 590 mm->scan_hit_start = hole_start;
901593f2 591 mm->scan_hit_end = hole_end;
709ea971
DV
592 return 1;
593 }
594
595 return 0;
596}
597EXPORT_SYMBOL(drm_mm_scan_add_block);
598
599/**
600 * Remove a node from the scan list.
601 *
602 * Nodes _must_ be removed in the exact same order from the scan list as they
603 * have been added, otherwise the internal state of the memory manager will be
604 * corrupted.
605 *
606 * When the scan list is empty, the selected memory nodes can be freed. An
25985edc 607 * immediately following drm_mm_search_free with best_match = 0 will then return
709ea971
DV
608 * the just freed block (because its at the top of the free_stack list).
609 *
610 * Returns one if this block should be evicted, zero otherwise. Will always
611 * return zero when no hole has been found.
612 */
613int drm_mm_scan_remove_block(struct drm_mm_node *node)
614{
615 struct drm_mm *mm = node->mm;
ea7b1dd4 616 struct drm_mm_node *prev_node;
709ea971
DV
617
618 mm->scanned_blocks--;
619
620 BUG_ON(!node->scanned_block);
621 node->scanned_block = 0;
709ea971 622
ea7b1dd4
DV
623 prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
624 node_list);
709ea971 625
ea7b1dd4 626 prev_node->hole_follows = node->scanned_preceeds_hole;
ea7b1dd4 627 list_add(&node->node_list, &prev_node->node_list);
709ea971 628
901593f2
CW
629 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
630 node->start < mm->scan_hit_end);
709ea971
DV
631}
632EXPORT_SYMBOL(drm_mm_scan_remove_block);
633
55910517 634int drm_mm_clean(struct drm_mm * mm)
3a1bd924 635{
ea7b1dd4 636 struct list_head *head = &mm->head_node.node_list;
3a1bd924 637
1d58420b
TH
638 return (head->next->next == head);
639}
249d6048 640EXPORT_SYMBOL(drm_mm_clean);
3a1bd924 641
55910517 642int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
1d58420b 643{
ea7b1dd4 644 INIT_LIST_HEAD(&mm->hole_stack);
249d6048
JG
645 INIT_LIST_HEAD(&mm->unused_nodes);
646 mm->num_unused = 0;
709ea971 647 mm->scanned_blocks = 0;
249d6048 648 spin_lock_init(&mm->unused_lock);
3a1bd924 649
ea7b1dd4
DV
650 /* Clever trick to avoid a special case in the free hole tracking. */
651 INIT_LIST_HEAD(&mm->head_node.node_list);
652 INIT_LIST_HEAD(&mm->head_node.hole_stack);
653 mm->head_node.hole_follows = 1;
654 mm->head_node.scanned_block = 0;
655 mm->head_node.scanned_prev_free = 0;
656 mm->head_node.scanned_next_free = 0;
657 mm->head_node.mm = mm;
658 mm->head_node.start = start + size;
659 mm->head_node.size = start - mm->head_node.start;
660 list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
661
6b9d89b4
CW
662 mm->color_adjust = NULL;
663
ea7b1dd4 664 return 0;
3a1bd924 665}
673a394b 666EXPORT_SYMBOL(drm_mm_init);
3a1bd924 667
55910517 668void drm_mm_takedown(struct drm_mm * mm)
3a1bd924 669{
ea7b1dd4 670 struct drm_mm_node *entry, *next;
3a1bd924 671
ea7b1dd4 672 if (!list_empty(&mm->head_node.node_list)) {
3a1bd924
TH
673 DRM_ERROR("Memory manager not clean. Delaying takedown\n");
674 return;
675 }
676
249d6048 677 spin_lock(&mm->unused_lock);
ea7b1dd4
DV
678 list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
679 list_del(&entry->node_list);
249d6048
JG
680 kfree(entry);
681 --mm->num_unused;
682 }
683 spin_unlock(&mm->unused_lock);
3a1bd924 684
249d6048 685 BUG_ON(mm->num_unused != 0);
3a1bd924 686}
f453ba04 687EXPORT_SYMBOL(drm_mm_takedown);
fa8a1238 688
99d7e48e
JG
689void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
690{
691 struct drm_mm_node *entry;
ea7b1dd4
DV
692 unsigned long total_used = 0, total_free = 0, total = 0;
693 unsigned long hole_start, hole_end, hole_size;
694
695 hole_start = drm_mm_hole_node_start(&mm->head_node);
696 hole_end = drm_mm_hole_node_end(&mm->head_node);
697 hole_size = hole_end - hole_start;
698 if (hole_size)
699 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
700 prefix, hole_start, hole_end,
701 hole_size);
702 total_free += hole_size;
703
704 drm_mm_for_each_node(entry, mm) {
705 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
99d7e48e 706 prefix, entry->start, entry->start + entry->size,
ea7b1dd4
DV
707 entry->size);
708 total_used += entry->size;
709
710 if (entry->hole_follows) {
711 hole_start = drm_mm_hole_node_start(entry);
712 hole_end = drm_mm_hole_node_end(entry);
713 hole_size = hole_end - hole_start;
714 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
715 prefix, hole_start, hole_end,
716 hole_size);
717 total_free += hole_size;
718 }
99d7e48e 719 }
ea7b1dd4
DV
720 total = total_free + total_used;
721
722 printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
99d7e48e
JG
723 total_used, total_free);
724}
725EXPORT_SYMBOL(drm_mm_debug_table);
726
fa8a1238
DA
727#if defined(CONFIG_DEBUG_FS)
728int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
729{
730 struct drm_mm_node *entry;
ea7b1dd4
DV
731 unsigned long total_used = 0, total_free = 0, total = 0;
732 unsigned long hole_start, hole_end, hole_size;
733
734 hole_start = drm_mm_hole_node_start(&mm->head_node);
735 hole_end = drm_mm_hole_node_end(&mm->head_node);
736 hole_size = hole_end - hole_start;
737 if (hole_size)
738 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
739 hole_start, hole_end, hole_size);
740 total_free += hole_size;
741
742 drm_mm_for_each_node(entry, mm) {
743 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
744 entry->start, entry->start + entry->size,
745 entry->size);
746 total_used += entry->size;
747 if (entry->hole_follows) {
2bbd4492
DV
748 hole_start = drm_mm_hole_node_start(entry);
749 hole_end = drm_mm_hole_node_end(entry);
ea7b1dd4
DV
750 hole_size = hole_end - hole_start;
751 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
752 hole_start, hole_end, hole_size);
753 total_free += hole_size;
754 }
fa8a1238 755 }
ea7b1dd4
DV
756 total = total_free + total_used;
757
758 seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
fa8a1238
DA
759 return 0;
760}
761EXPORT_SYMBOL(drm_mm_dump_table);
762#endif
This page took 0.701023 seconds and 5 git commands to generate.