2 * SLOB Allocator: Simple List Of Blocks
4 * Matt Mackall <mpm@selenic.com> 12/30/03
6 * NUMA support by Paul Mundt, 2007.
10 * The core of SLOB is a traditional K&R style heap allocator, with
11 * support for returning aligned objects. The granularity of this
12 * allocator is as little as 2 bytes, however typically most architectures
13 * will require 4 bytes on 32-bit and 8 bytes on 64-bit.
15 * The slob heap is a set of linked list of pages from alloc_pages(),
16 * and within each page, there is a singly-linked list of free blocks
17 * (slob_t). The heap is grown on demand. To reduce fragmentation,
18 * heap pages are segregated into three lists, with objects less than
19 * 256 bytes, objects less than 1024 bytes, and all other objects.
21 * Allocation from heap involves first searching for a page with
22 * sufficient free blocks (using a next-fit-like approach) followed by
23 * a first-fit scan of the page. Deallocation inserts objects back
24 * into the free list in address order, so this is effectively an
25 * address-ordered first fit.
27 * Above this is an implementation of kmalloc/kfree. Blocks returned
28 * from kmalloc are prepended with a 4-byte header with the kmalloc size.
29 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
30 * alloc_pages() directly, allocating compound pages so the page order
31 * does not have to be separately tracked, and also stores the exact
32 * allocation size in page->private so that it can be used to accurately
33 * provide ksize(). These objects are detected in kfree() because slob_page()
36 * SLAB is emulated on top of SLOB by simply calling constructors and
37 * destructors for every SLAB allocation. Objects are returned with the
38 * 4-byte alignment unless the SLAB_HWCACHE_ALIGN flag is set, in which
39 * case the low-level allocator will fragment blocks to create the proper
40 * alignment. Again, objects of page-size or greater are allocated by
41 * calling alloc_pages(). As SLAB objects know their size, no separate
42 * size bookkeeping is necessary and there is essentially no allocation
43 * space overhead, and compound pages aren't needed for multi-page
46 * NUMA support in SLOB is fairly simplistic, pushing most of the real
47 * logic down to the page allocator, and simply doing the node accounting
48 * on the upper levels. In the event that a node id is explicitly
49 * provided, alloc_pages_exact_node() with the specified node id is used
50 * instead. The common case (or when the node id isn't explicitly provided)
51 * will default to the current node, as per numa_node_id().
53 * Node aware pages are still inserted in to the global freelist, and
54 * these are scanned for by matching against the node id encoded in the
55 * page flags. As a result, block allocations that can be satisfied from
56 * the freelist will only be done so on pages residing on the same node,
57 * in order to prevent random node placement.
60 #include <linux/kernel.h>
61 #include <linux/slab.h>
63 #include <linux/swap.h> /* struct reclaim_state */
64 #include <linux/cache.h>
65 #include <linux/init.h>
66 #include <linux/export.h>
67 #include <linux/rcupdate.h>
68 #include <linux/list.h>
69 #include <linux/kmemleak.h>
71 #include <trace/events/kmem.h>
73 #include <linux/atomic.h>
76 * slob_block has a field 'units', which indicates size of block if +ve,
77 * or offset of next block if -ve (in SLOB_UNITs).
79 * Free blocks of size 1 unit simply contain the offset of the next block.
80 * Those with larger size contain their size in the first SLOB_UNIT of
81 * memory, and the offset of the next free block in the second SLOB_UNIT.
83 #if PAGE_SIZE <= (32767 * 2)
84 typedef s16 slobidx_t
;
86 typedef s32 slobidx_t
;
92 typedef struct slob_block slob_t
;
95 * free_slob_page: call before a slob_page is returned to the page allocator.
97 static inline void free_slob_page(struct page
*sp
)
99 reset_page_mapcount(sp
);
103 * All partially free slob pages go on these lists.
105 #define SLOB_BREAK1 256
106 #define SLOB_BREAK2 1024
107 static LIST_HEAD(free_slob_small
);
108 static LIST_HEAD(free_slob_medium
);
109 static LIST_HEAD(free_slob_large
);
112 * is_slob_page: True for all slob pages (false for bigblock pages)
114 static inline int is_slob_page(struct page
*sp
)
119 static inline void set_slob_page(struct page
*sp
)
124 static inline void clear_slob_page(struct page
*sp
)
129 static inline struct page
*slob_page(const void *addr
)
131 return virt_to_page(addr
);
135 * slob_page_free: true for pages on free_slob_pages list.
137 static inline int slob_page_free(struct page
*sp
)
139 return PageSlobFree(sp
);
142 static void set_slob_page_free(struct page
*sp
, struct list_head
*list
)
144 list_add(&sp
->list
, list
);
145 __SetPageSlobFree(sp
);
148 static inline void clear_slob_page_free(struct page
*sp
)
151 __ClearPageSlobFree(sp
);
154 #define SLOB_UNIT sizeof(slob_t)
155 #define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
156 #define SLOB_ALIGN L1_CACHE_BYTES
159 * struct slob_rcu is inserted at the tail of allocated slob blocks, which
160 * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
161 * the block using call_rcu.
164 struct rcu_head head
;
169 * slob_lock protects all slob allocator structures.
171 static DEFINE_SPINLOCK(slob_lock
);
174 * Encode the given size and next info into a free slob block s.
176 static void set_slob(slob_t
*s
, slobidx_t size
, slob_t
*next
)
178 slob_t
*base
= (slob_t
*)((unsigned long)s
& PAGE_MASK
);
179 slobidx_t offset
= next
- base
;
185 s
[0].units
= -offset
;
189 * Return the size of a slob block.
191 static slobidx_t
slob_units(slob_t
*s
)
199 * Return the next free slob block pointer after this one.
201 static slob_t
*slob_next(slob_t
*s
)
203 slob_t
*base
= (slob_t
*)((unsigned long)s
& PAGE_MASK
);
214 * Returns true if s is the last free block in its page.
216 static int slob_last(slob_t
*s
)
218 return !((unsigned long)slob_next(s
) & ~PAGE_MASK
);
221 static void *slob_new_pages(gfp_t gfp
, int order
, int node
)
227 page
= alloc_pages_exact_node(node
, gfp
, order
);
230 page
= alloc_pages(gfp
, order
);
235 return page_address(page
);
238 static void slob_free_pages(void *b
, int order
)
240 if (current
->reclaim_state
)
241 current
->reclaim_state
->reclaimed_slab
+= 1 << order
;
242 free_pages((unsigned long)b
, order
);
246 * Allocate a slob block within a given slob_page sp.
248 static void *slob_page_alloc(struct page
*sp
, size_t size
, int align
)
250 slob_t
*prev
, *cur
, *aligned
= NULL
;
251 int delta
= 0, units
= SLOB_UNITS(size
);
253 for (prev
= NULL
, cur
= sp
->freelist
; ; prev
= cur
, cur
= slob_next(cur
)) {
254 slobidx_t avail
= slob_units(cur
);
257 aligned
= (slob_t
*)ALIGN((unsigned long)cur
, align
);
258 delta
= aligned
- cur
;
260 if (avail
>= units
+ delta
) { /* room enough? */
263 if (delta
) { /* need to fragment head to align? */
264 next
= slob_next(cur
);
265 set_slob(aligned
, avail
- delta
, next
);
266 set_slob(cur
, delta
, aligned
);
269 avail
= slob_units(cur
);
272 next
= slob_next(cur
);
273 if (avail
== units
) { /* exact fit? unlink. */
275 set_slob(prev
, slob_units(prev
), next
);
278 } else { /* fragment */
280 set_slob(prev
, slob_units(prev
), cur
+ units
);
282 sp
->freelist
= cur
+ units
;
283 set_slob(cur
+ units
, avail
- units
, next
);
288 clear_slob_page_free(sp
);
297 * slob_alloc: entry point into the slob allocator.
299 static void *slob_alloc(size_t size
, gfp_t gfp
, int align
, int node
)
302 struct list_head
*prev
;
303 struct list_head
*slob_list
;
307 if (size
< SLOB_BREAK1
)
308 slob_list
= &free_slob_small
;
309 else if (size
< SLOB_BREAK2
)
310 slob_list
= &free_slob_medium
;
312 slob_list
= &free_slob_large
;
314 spin_lock_irqsave(&slob_lock
, flags
);
315 /* Iterate through each partially free page, try to find room */
316 list_for_each_entry(sp
, slob_list
, list
) {
319 * If there's a node specification, search for a partial
320 * page with a matching node id in the freelist.
322 if (node
!= -1 && page_to_nid(sp
) != node
)
325 /* Enough room on this page? */
326 if (sp
->units
< SLOB_UNITS(size
))
329 /* Attempt to alloc */
330 prev
= sp
->list
.prev
;
331 b
= slob_page_alloc(sp
, size
, align
);
335 /* Improve fragment distribution and reduce our average
336 * search time by starting our next search here. (see
337 * Knuth vol 1, sec 2.5, pg 449) */
338 if (prev
!= slob_list
->prev
&&
339 slob_list
->next
!= prev
->next
)
340 list_move_tail(slob_list
, prev
->next
);
343 spin_unlock_irqrestore(&slob_lock
, flags
);
345 /* Not enough space: must allocate a new page */
347 b
= slob_new_pages(gfp
& ~__GFP_ZERO
, 0, node
);
353 spin_lock_irqsave(&slob_lock
, flags
);
354 sp
->units
= SLOB_UNITS(PAGE_SIZE
);
356 INIT_LIST_HEAD(&sp
->list
);
357 set_slob(b
, SLOB_UNITS(PAGE_SIZE
), b
+ SLOB_UNITS(PAGE_SIZE
));
358 set_slob_page_free(sp
, slob_list
);
359 b
= slob_page_alloc(sp
, size
, align
);
361 spin_unlock_irqrestore(&slob_lock
, flags
);
363 if (unlikely((gfp
& __GFP_ZERO
) && b
))
369 * slob_free: entry point into the slob allocator.
371 static void slob_free(void *block
, int size
)
374 slob_t
*prev
, *next
, *b
= (slob_t
*)block
;
377 struct list_head
*slob_list
;
379 if (unlikely(ZERO_OR_NULL_PTR(block
)))
383 sp
= slob_page(block
);
384 units
= SLOB_UNITS(size
);
386 spin_lock_irqsave(&slob_lock
, flags
);
388 if (sp
->units
+ units
== SLOB_UNITS(PAGE_SIZE
)) {
389 /* Go directly to page allocator. Do not pass slob allocator */
390 if (slob_page_free(sp
))
391 clear_slob_page_free(sp
);
392 spin_unlock_irqrestore(&slob_lock
, flags
);
395 slob_free_pages(b
, 0);
399 if (!slob_page_free(sp
)) {
400 /* This slob page is about to become partially free. Easy! */
404 (void *)((unsigned long)(b
+
405 SLOB_UNITS(PAGE_SIZE
)) & PAGE_MASK
));
406 if (size
< SLOB_BREAK1
)
407 slob_list
= &free_slob_small
;
408 else if (size
< SLOB_BREAK2
)
409 slob_list
= &free_slob_medium
;
411 slob_list
= &free_slob_large
;
412 set_slob_page_free(sp
, slob_list
);
417 * Otherwise the page is already partially free, so find reinsertion
422 if (b
< (slob_t
*)sp
->freelist
) {
423 if (b
+ units
== sp
->freelist
) {
424 units
+= slob_units(sp
->freelist
);
425 sp
->freelist
= slob_next(sp
->freelist
);
427 set_slob(b
, units
, sp
->freelist
);
431 next
= slob_next(prev
);
434 next
= slob_next(prev
);
437 if (!slob_last(prev
) && b
+ units
== next
) {
438 units
+= slob_units(next
);
439 set_slob(b
, units
, slob_next(next
));
441 set_slob(b
, units
, next
);
443 if (prev
+ slob_units(prev
) == b
) {
444 units
= slob_units(b
) + slob_units(prev
);
445 set_slob(prev
, units
, slob_next(b
));
447 set_slob(prev
, slob_units(prev
), b
);
450 spin_unlock_irqrestore(&slob_lock
, flags
);
454 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
457 void *__kmalloc_node(size_t size
, gfp_t gfp
, int node
)
460 int align
= max(ARCH_KMALLOC_MINALIGN
, ARCH_SLAB_MINALIGN
);
463 gfp
&= gfp_allowed_mask
;
465 lockdep_trace_alloc(gfp
);
467 if (size
< PAGE_SIZE
- align
) {
469 return ZERO_SIZE_PTR
;
471 m
= slob_alloc(size
+ align
, gfp
, align
, node
);
476 ret
= (void *)m
+ align
;
478 trace_kmalloc_node(_RET_IP_
, ret
,
479 size
, size
+ align
, gfp
, node
);
481 unsigned int order
= get_order(size
);
485 ret
= slob_new_pages(gfp
, order
, node
);
488 page
= virt_to_page(ret
);
489 page
->private = size
;
492 trace_kmalloc_node(_RET_IP_
, ret
,
493 size
, PAGE_SIZE
<< order
, gfp
, node
);
496 kmemleak_alloc(ret
, size
, 1, gfp
);
499 EXPORT_SYMBOL(__kmalloc_node
);
501 void kfree(const void *block
)
505 trace_kfree(_RET_IP_
, block
);
507 if (unlikely(ZERO_OR_NULL_PTR(block
)))
509 kmemleak_free(block
);
511 sp
= slob_page(block
);
512 if (is_slob_page(sp
)) {
513 int align
= max(ARCH_KMALLOC_MINALIGN
, ARCH_SLAB_MINALIGN
);
514 unsigned int *m
= (unsigned int *)(block
- align
);
515 slob_free(m
, *m
+ align
);
519 EXPORT_SYMBOL(kfree
);
521 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
522 size_t ksize(const void *block
)
527 if (unlikely(block
== ZERO_SIZE_PTR
))
530 sp
= slob_page(block
);
531 if (is_slob_page(sp
)) {
532 int align
= max(ARCH_KMALLOC_MINALIGN
, ARCH_SLAB_MINALIGN
);
533 unsigned int *m
= (unsigned int *)(block
- align
);
534 return SLOB_UNITS(*m
) * SLOB_UNIT
;
538 EXPORT_SYMBOL(ksize
);
541 unsigned int size
, align
;
544 void (*ctor
)(void *);
547 struct kmem_cache
*kmem_cache_create(const char *name
, size_t size
,
548 size_t align
, unsigned long flags
, void (*ctor
)(void *))
550 struct kmem_cache
*c
;
552 c
= slob_alloc(sizeof(struct kmem_cache
),
553 GFP_KERNEL
, ARCH_KMALLOC_MINALIGN
, -1);
558 if (flags
& SLAB_DESTROY_BY_RCU
) {
559 /* leave room for rcu footer at the end of object */
560 c
->size
+= sizeof(struct slob_rcu
);
564 /* ignore alignment unless it's forced */
565 c
->align
= (flags
& SLAB_HWCACHE_ALIGN
) ? SLOB_ALIGN
: 0;
566 if (c
->align
< ARCH_SLAB_MINALIGN
)
567 c
->align
= ARCH_SLAB_MINALIGN
;
568 if (c
->align
< align
)
570 } else if (flags
& SLAB_PANIC
)
571 panic("Cannot create slab cache %s\n", name
);
573 kmemleak_alloc(c
, sizeof(struct kmem_cache
), 1, GFP_KERNEL
);
576 EXPORT_SYMBOL(kmem_cache_create
);
578 void kmem_cache_destroy(struct kmem_cache
*c
)
581 if (c
->flags
& SLAB_DESTROY_BY_RCU
)
583 slob_free(c
, sizeof(struct kmem_cache
));
585 EXPORT_SYMBOL(kmem_cache_destroy
);
587 void *kmem_cache_alloc_node(struct kmem_cache
*c
, gfp_t flags
, int node
)
591 flags
&= gfp_allowed_mask
;
593 lockdep_trace_alloc(flags
);
595 if (c
->size
< PAGE_SIZE
) {
596 b
= slob_alloc(c
->size
, flags
, c
->align
, node
);
597 trace_kmem_cache_alloc_node(_RET_IP_
, b
, c
->size
,
598 SLOB_UNITS(c
->size
) * SLOB_UNIT
,
601 b
= slob_new_pages(flags
, get_order(c
->size
), node
);
602 trace_kmem_cache_alloc_node(_RET_IP_
, b
, c
->size
,
603 PAGE_SIZE
<< get_order(c
->size
),
610 kmemleak_alloc_recursive(b
, c
->size
, 1, c
->flags
, flags
);
613 EXPORT_SYMBOL(kmem_cache_alloc_node
);
615 static void __kmem_cache_free(void *b
, int size
)
617 if (size
< PAGE_SIZE
)
620 slob_free_pages(b
, get_order(size
));
623 static void kmem_rcu_free(struct rcu_head
*head
)
625 struct slob_rcu
*slob_rcu
= (struct slob_rcu
*)head
;
626 void *b
= (void *)slob_rcu
- (slob_rcu
->size
- sizeof(struct slob_rcu
));
628 __kmem_cache_free(b
, slob_rcu
->size
);
631 void kmem_cache_free(struct kmem_cache
*c
, void *b
)
633 kmemleak_free_recursive(b
, c
->flags
);
634 if (unlikely(c
->flags
& SLAB_DESTROY_BY_RCU
)) {
635 struct slob_rcu
*slob_rcu
;
636 slob_rcu
= b
+ (c
->size
- sizeof(struct slob_rcu
));
637 slob_rcu
->size
= c
->size
;
638 call_rcu(&slob_rcu
->head
, kmem_rcu_free
);
640 __kmem_cache_free(b
, c
->size
);
643 trace_kmem_cache_free(_RET_IP_
, b
);
645 EXPORT_SYMBOL(kmem_cache_free
);
647 unsigned int kmem_cache_size(struct kmem_cache
*c
)
651 EXPORT_SYMBOL(kmem_cache_size
);
653 int kmem_cache_shrink(struct kmem_cache
*d
)
657 EXPORT_SYMBOL(kmem_cache_shrink
);
659 static unsigned int slob_ready __read_mostly
;
661 int slab_is_available(void)
666 void __init
kmem_cache_init(void)
671 void __init
kmem_cache_init_late(void)
This page took 0.044976 seconds and 6 git commands to generate.