2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
28 #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
29 list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
32 nouveau_mm_free(struct nouveau_mm
*mm
, struct nouveau_mm_node
**pthis
)
34 struct nouveau_mm_node
*this = *pthis
;
37 struct nouveau_mm_node
*prev
= node(this, prev
);
38 struct nouveau_mm_node
*next
= node(this, next
);
40 if (prev
&& prev
->type
== 0) {
41 prev
->length
+= this->length
;
42 list_del(&this->nl_entry
);
43 kfree(this); this = prev
;
46 if (next
&& next
->type
== 0) {
47 next
->offset
= this->offset
;
48 next
->length
+= this->length
;
50 list_del(&this->fl_entry
);
51 list_del(&this->nl_entry
);
52 kfree(this); this = NULL
;
55 if (this && this->type
!= 0) {
56 list_for_each_entry(prev
, &mm
->free
, fl_entry
) {
57 if (this->offset
< prev
->offset
)
61 list_add_tail(&this->fl_entry
, &prev
->fl_entry
);
69 static struct nouveau_mm_node
*
70 region_head(struct nouveau_mm
*mm
, struct nouveau_mm_node
*a
, u32 size
)
72 struct nouveau_mm_node
*b
;
74 if (a
->length
== size
)
77 b
= kmalloc(sizeof(*b
), GFP_KERNEL
);
78 if (unlikely(b
== NULL
))
81 b
->offset
= a
->offset
;
86 list_add_tail(&b
->nl_entry
, &a
->nl_entry
);
88 list_add_tail(&b
->fl_entry
, &a
->fl_entry
);
93 nouveau_mm_head(struct nouveau_mm
*mm
, u8 type
, u32 size_max
, u32 size_min
,
94 u32 align
, struct nouveau_mm_node
**pnode
)
96 struct nouveau_mm_node
*prev
, *this, *next
;
103 list_for_each_entry(this, &mm
->free
, fl_entry
) {
104 e
= this->offset
+ this->length
;
107 prev
= node(this, prev
);
108 if (prev
&& prev
->type
!= type
)
109 s
= roundup(s
, mm
->block_size
);
111 next
= node(this, next
);
112 if (next
&& next
->type
!= type
)
113 e
= rounddown(e
, mm
->block_size
);
115 s
= (s
+ mask
) & ~mask
;
117 if (s
> e
|| e
- s
< size_min
)
120 splitoff
= s
- this->offset
;
121 if (splitoff
&& !region_head(mm
, this, splitoff
))
124 this = region_head(mm
, this, min(size_max
, e
- s
));
129 list_del(&this->fl_entry
);
137 static struct nouveau_mm_node
*
138 region_tail(struct nouveau_mm
*mm
, struct nouveau_mm_node
*a
, u32 size
)
140 struct nouveau_mm_node
*b
;
142 if (a
->length
== size
)
145 b
= kmalloc(sizeof(*b
), GFP_KERNEL
);
146 if (unlikely(b
== NULL
))
150 b
->offset
= a
->offset
+ a
->length
;
154 list_add(&b
->nl_entry
, &a
->nl_entry
);
156 list_add(&b
->fl_entry
, &a
->fl_entry
);
161 nouveau_mm_tail(struct nouveau_mm
*mm
, u8 type
, u32 size_max
, u32 size_min
,
162 u32 align
, struct nouveau_mm_node
**pnode
)
164 struct nouveau_mm_node
*prev
, *this, *next
;
165 u32 mask
= align
- 1;
169 list_for_each_entry_reverse(this, &mm
->free
, fl_entry
) {
170 u32 e
= this->offset
+ this->length
;
171 u32 s
= this->offset
;
174 prev
= node(this, prev
);
175 if (prev
&& prev
->type
!= type
)
176 s
= roundup(s
, mm
->block_size
);
178 next
= node(this, next
);
179 if (next
&& next
->type
!= type
) {
180 e
= rounddown(e
, mm
->block_size
);
181 c
= next
->offset
- e
;
184 s
= (s
+ mask
) & ~mask
;
186 if (s
> e
|| a
< size_min
)
189 a
= min(a
, size_max
);
193 if (c
&& !region_tail(mm
, this, c
))
196 this = region_tail(mm
, this, a
);
201 list_del(&this->fl_entry
);
210 nouveau_mm_init(struct nouveau_mm
*mm
, u32 offset
, u32 length
, u32 block
)
212 struct nouveau_mm_node
*node
;
215 INIT_LIST_HEAD(&mm
->nodes
);
216 INIT_LIST_HEAD(&mm
->free
);
217 mm
->block_size
= block
;
221 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
226 node
->offset
= roundup(offset
, mm
->block_size
);
227 node
->length
= rounddown(offset
+ length
, mm
->block_size
);
228 node
->length
-= node
->offset
;
231 list_add_tail(&node
->nl_entry
, &mm
->nodes
);
232 list_add_tail(&node
->fl_entry
, &mm
->free
);
238 nouveau_mm_fini(struct nouveau_mm
*mm
)
240 if (nouveau_mm_initialised(mm
)) {
241 struct nouveau_mm_node
*node
, *heap
=
242 list_first_entry(&mm
->nodes
, typeof(*heap
), nl_entry
);
245 list_for_each_entry(node
, &mm
->nodes
, nl_entry
) {
246 if (WARN_ON(nodes
++ == mm
->heap_nodes
))
This page took 0.039291 seconds and 5 git commands to generate.