2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
28 #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
29 list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
32 nouveau_mm_dump(struct nouveau_mm
*mm
, const char *header
)
34 struct nouveau_mm_node
*node
;
36 printk(KERN_ERR
"nouveau: %s\n", header
);
37 printk(KERN_ERR
"nouveau: node list:\n");
38 list_for_each_entry(node
, &mm
->nodes
, nl_entry
) {
39 printk(KERN_ERR
"nouveau: \t%08x %08x %d\n",
40 node
->offset
, node
->length
, node
->type
);
42 printk(KERN_ERR
"nouveau: free list:\n");
43 list_for_each_entry(node
, &mm
->free
, fl_entry
) {
44 printk(KERN_ERR
"nouveau: \t%08x %08x %d\n",
45 node
->offset
, node
->length
, node
->type
);
50 nouveau_mm_free(struct nouveau_mm
*mm
, struct nouveau_mm_node
**pthis
)
52 struct nouveau_mm_node
*this = *pthis
;
55 struct nouveau_mm_node
*prev
= node(this, prev
);
56 struct nouveau_mm_node
*next
= node(this, next
);
58 if (prev
&& prev
->type
== NVKM_MM_TYPE_NONE
) {
59 prev
->length
+= this->length
;
60 list_del(&this->nl_entry
);
61 kfree(this); this = prev
;
64 if (next
&& next
->type
== NVKM_MM_TYPE_NONE
) {
65 next
->offset
= this->offset
;
66 next
->length
+= this->length
;
67 if (this->type
== NVKM_MM_TYPE_NONE
)
68 list_del(&this->fl_entry
);
69 list_del(&this->nl_entry
);
70 kfree(this); this = NULL
;
73 if (this && this->type
!= NVKM_MM_TYPE_NONE
) {
74 list_for_each_entry(prev
, &mm
->free
, fl_entry
) {
75 if (this->offset
< prev
->offset
)
79 list_add_tail(&this->fl_entry
, &prev
->fl_entry
);
80 this->type
= NVKM_MM_TYPE_NONE
;
87 static struct nouveau_mm_node
*
88 region_head(struct nouveau_mm
*mm
, struct nouveau_mm_node
*a
, u32 size
)
90 struct nouveau_mm_node
*b
;
92 if (a
->length
== size
)
95 b
= kmalloc(sizeof(*b
), GFP_KERNEL
);
96 if (unlikely(b
== NULL
))
99 b
->offset
= a
->offset
;
104 list_add_tail(&b
->nl_entry
, &a
->nl_entry
);
105 if (b
->type
== NVKM_MM_TYPE_NONE
)
106 list_add_tail(&b
->fl_entry
, &a
->fl_entry
);
111 nouveau_mm_head(struct nouveau_mm
*mm
, u8 type
, u32 size_max
, u32 size_min
,
112 u32 align
, struct nouveau_mm_node
**pnode
)
114 struct nouveau_mm_node
*prev
, *this, *next
;
115 u32 mask
= align
- 1;
119 BUG_ON(type
== NVKM_MM_TYPE_NONE
|| type
== NVKM_MM_TYPE_HOLE
);
121 list_for_each_entry(this, &mm
->free
, fl_entry
) {
122 e
= this->offset
+ this->length
;
125 prev
= node(this, prev
);
126 if (prev
&& prev
->type
!= type
)
127 s
= roundup(s
, mm
->block_size
);
129 next
= node(this, next
);
130 if (next
&& next
->type
!= type
)
131 e
= rounddown(e
, mm
->block_size
);
133 s
= (s
+ mask
) & ~mask
;
135 if (s
> e
|| e
- s
< size_min
)
138 splitoff
= s
- this->offset
;
139 if (splitoff
&& !region_head(mm
, this, splitoff
))
142 this = region_head(mm
, this, min(size_max
, e
- s
));
147 list_del(&this->fl_entry
);
155 static struct nouveau_mm_node
*
156 region_tail(struct nouveau_mm
*mm
, struct nouveau_mm_node
*a
, u32 size
)
158 struct nouveau_mm_node
*b
;
160 if (a
->length
== size
)
163 b
= kmalloc(sizeof(*b
), GFP_KERNEL
);
164 if (unlikely(b
== NULL
))
168 b
->offset
= a
->offset
+ a
->length
;
172 list_add(&b
->nl_entry
, &a
->nl_entry
);
173 if (b
->type
== NVKM_MM_TYPE_NONE
)
174 list_add(&b
->fl_entry
, &a
->fl_entry
);
179 nouveau_mm_tail(struct nouveau_mm
*mm
, u8 type
, u32 size_max
, u32 size_min
,
180 u32 align
, struct nouveau_mm_node
**pnode
)
182 struct nouveau_mm_node
*prev
, *this, *next
;
183 u32 mask
= align
- 1;
185 BUG_ON(type
== NVKM_MM_TYPE_NONE
|| type
== NVKM_MM_TYPE_HOLE
);
187 list_for_each_entry_reverse(this, &mm
->free
, fl_entry
) {
188 u32 e
= this->offset
+ this->length
;
189 u32 s
= this->offset
;
192 prev
= node(this, prev
);
193 if (prev
&& prev
->type
!= type
)
194 s
= roundup(s
, mm
->block_size
);
196 next
= node(this, next
);
197 if (next
&& next
->type
!= type
) {
198 e
= rounddown(e
, mm
->block_size
);
199 c
= next
->offset
- e
;
202 s
= (s
+ mask
) & ~mask
;
204 if (s
> e
|| a
< size_min
)
207 a
= min(a
, size_max
);
211 if (c
&& !region_tail(mm
, this, c
))
214 this = region_tail(mm
, this, a
);
219 list_del(&this->fl_entry
);
228 nouveau_mm_init(struct nouveau_mm
*mm
, u32 offset
, u32 length
, u32 block
)
230 struct nouveau_mm_node
*node
, *prev
;
233 if (nouveau_mm_initialised(mm
)) {
234 prev
= list_last_entry(&mm
->nodes
, typeof(*node
), nl_entry
);
235 next
= prev
->offset
+ prev
->length
;
236 if (next
!= offset
) {
237 BUG_ON(next
> offset
);
238 if (!(node
= kzalloc(sizeof(*node
), GFP_KERNEL
)))
240 node
->type
= NVKM_MM_TYPE_HOLE
;
242 node
->length
= offset
- next
;
243 list_add_tail(&node
->nl_entry
, &mm
->nodes
);
245 BUG_ON(block
!= mm
->block_size
);
247 INIT_LIST_HEAD(&mm
->nodes
);
248 INIT_LIST_HEAD(&mm
->free
);
249 mm
->block_size
= block
;
253 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
258 node
->offset
= roundup(offset
, mm
->block_size
);
259 node
->length
= rounddown(offset
+ length
, mm
->block_size
);
260 node
->length
-= node
->offset
;
263 list_add_tail(&node
->nl_entry
, &mm
->nodes
);
264 list_add_tail(&node
->fl_entry
, &mm
->free
);
270 nouveau_mm_fini(struct nouveau_mm
*mm
)
272 struct nouveau_mm_node
*node
, *temp
;
275 if (!nouveau_mm_initialised(mm
))
278 list_for_each_entry(node
, &mm
->nodes
, nl_entry
) {
279 if (node
->type
!= NVKM_MM_TYPE_HOLE
) {
280 if (++nodes
> mm
->heap_nodes
) {
281 nouveau_mm_dump(mm
, "mm not clean!");
287 list_for_each_entry_safe(node
, temp
, &mm
->nodes
, nl_entry
) {
288 list_del(&node
->nl_entry
);
This page took 0.037577 seconds and 5 git commands to generate.