Commit | Line | Data |
---|---|---|
81819f0f CL |
1 | #ifndef _LINUX_SLUB_DEF_H |
2 | #define _LINUX_SLUB_DEF_H | |
3 | ||
4 | /* | |
5 | * SLUB : A Slab allocator without object queues. | |
6 | * | |
7 | * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com> | |
8 | */ | |
9 | #include <linux/types.h> | |
10 | #include <linux/gfp.h> | |
11 | #include <linux/workqueue.h> | |
12 | #include <linux/kobject.h> | |
13 | ||
8ff12cfc CL |
14 | enum stat_item { |
15 | ALLOC_FASTPATH, /* Allocation from cpu slab */ | |
16 | ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ | |
17 | FREE_FASTPATH, /* Free to cpu slub */ | |
18 | FREE_SLOWPATH, /* Freeing not to cpu slab */ | |
19 | FREE_FROZEN, /* Freeing to frozen slab */ | |
20 | FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ | |
21 | FREE_REMOVE_PARTIAL, /* Freeing removes last object */ | |
22 | ALLOC_FROM_PARTIAL, /* Cpu slab acquired from partial list */ | |
23 | ALLOC_SLAB, /* Cpu slab acquired from page allocator */ | |
24 | ALLOC_REFILL, /* Refill cpu slab from slab freelist */ | |
25 | FREE_SLAB, /* Slab freed to the page allocator */ | |
26 | CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ | |
27 | DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ | |
28 | DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ | |
29 | DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ | |
30 | DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ | |
31 | DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ | |
65c3376a | 32 | ORDER_FALLBACK, /* Number of times fallback was necessary */ |
8ff12cfc CL |
33 | NR_SLUB_STAT_ITEMS }; |
34 | ||
dfb4f096 | 35 | struct kmem_cache_cpu { |
da89b79e CL |
36 | void **freelist; /* Pointer to first free per cpu object */ |
37 | struct page *page; /* The slab from which we are allocating */ | |
38 | int node; /* The node of the page (or -1 for debug) */ | |
39 | unsigned int offset; /* Freepointer offset (in word units) */ | |
40 | unsigned int objsize; /* Size of an object (from kmem_cache) */ | |
8ff12cfc CL |
41 | #ifdef CONFIG_SLUB_STATS |
42 | unsigned stat[NR_SLUB_STAT_ITEMS]; | |
43 | #endif | |
4c93c355 | 44 | }; |
dfb4f096 | 45 | |
81819f0f CL |
46 | struct kmem_cache_node { |
47 | spinlock_t list_lock; /* Protect partial list and nr_partial */ | |
48 | unsigned long nr_partial; | |
81819f0f | 49 | struct list_head partial; |
0c710013 | 50 | #ifdef CONFIG_SLUB_DEBUG |
0f389ec6 | 51 | atomic_long_t nr_slabs; |
205ab99d | 52 | atomic_long_t total_objects; |
643b1138 | 53 | struct list_head full; |
0c710013 | 54 | #endif |
81819f0f CL |
55 | }; |
56 | ||
834f3d11 CL |
57 | /* |
58 | * Word size structure that can be atomically updated or read and that | |
59 | * contains both the order and the number of objects that a slab of the | |
60 | * given order would contain. | |
61 | */ | |
62 | struct kmem_cache_order_objects { | |
63 | unsigned long x; | |
64 | }; | |
65 | ||
81819f0f CL |
66 | /* |
67 | * Slab cache management. | |
68 | */ | |
69 | struct kmem_cache { | |
70 | /* Used for retriving partial slabs etc */ | |
71 | unsigned long flags; | |
72 | int size; /* The size of an object including meta data */ | |
73 | int objsize; /* The size of an object without meta data */ | |
74 | int offset; /* Free pointer offset. */ | |
834f3d11 | 75 | struct kmem_cache_order_objects oo; |
81819f0f CL |
76 | |
77 | /* | |
78 | * Avoid an extra cache line for UP, SMP and for the node local to | |
79 | * struct kmem_cache. | |
80 | */ | |
81 | struct kmem_cache_node local_node; | |
82 | ||
83 | /* Allocation and freeing of slabs */ | |
205ab99d | 84 | struct kmem_cache_order_objects max; |
65c3376a | 85 | struct kmem_cache_order_objects min; |
b7a49f0d | 86 | gfp_t allocflags; /* gfp flags to use on each alloc */ |
81819f0f | 87 | int refcount; /* Refcount for slab cache destroy */ |
4ba9b9d0 | 88 | void (*ctor)(struct kmem_cache *, void *); |
81819f0f CL |
89 | int inuse; /* Offset to metadata */ |
90 | int align; /* Alignment */ | |
91 | const char *name; /* Name (only for display!) */ | |
92 | struct list_head list; /* List of slab caches */ | |
0c710013 | 93 | #ifdef CONFIG_SLUB_DEBUG |
81819f0f | 94 | struct kobject kobj; /* For sysfs */ |
0c710013 | 95 | #endif |
81819f0f CL |
96 | |
97 | #ifdef CONFIG_NUMA | |
9824601e CL |
98 | /* |
99 | * Defragmentation by allocating from a remote node. | |
100 | */ | |
101 | int remote_node_defrag_ratio; | |
81819f0f CL |
102 | struct kmem_cache_node *node[MAX_NUMNODES]; |
103 | #endif | |
4c93c355 CL |
104 | #ifdef CONFIG_SMP |
105 | struct kmem_cache_cpu *cpu_slab[NR_CPUS]; | |
106 | #else | |
107 | struct kmem_cache_cpu cpu_slab; | |
108 | #endif | |
81819f0f CL |
109 | }; |
110 | ||
111 | /* | |
112 | * Kmalloc subsystem. | |
113 | */ | |
4b356be0 CL |
114 | #if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8 |
115 | #define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN | |
116 | #else | |
117 | #define KMALLOC_MIN_SIZE 8 | |
118 | #endif | |
119 | ||
120 | #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) | |
81819f0f | 121 | |
81819f0f CL |
122 | /* |
123 | * We keep the general caches in an array of slab caches that are used for | |
124 | * 2^x bytes of allocations. | |
125 | */ | |
331dc558 | 126 | extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1]; |
81819f0f CL |
127 | |
128 | /* | |
129 | * Sorry that the following has to be that ugly but some versions of GCC | |
130 | * have trouble with constant propagation and loops. | |
131 | */ | |
aa137f9d | 132 | static __always_inline int kmalloc_index(size_t size) |
81819f0f | 133 | { |
272c1d21 CL |
134 | if (!size) |
135 | return 0; | |
614410d5 | 136 | |
4b356be0 CL |
137 | if (size <= KMALLOC_MIN_SIZE) |
138 | return KMALLOC_SHIFT_LOW; | |
139 | ||
81819f0f CL |
140 | if (size > 64 && size <= 96) |
141 | return 1; | |
142 | if (size > 128 && size <= 192) | |
143 | return 2; | |
144 | if (size <= 8) return 3; | |
145 | if (size <= 16) return 4; | |
146 | if (size <= 32) return 5; | |
147 | if (size <= 64) return 6; | |
148 | if (size <= 128) return 7; | |
149 | if (size <= 256) return 8; | |
150 | if (size <= 512) return 9; | |
151 | if (size <= 1024) return 10; | |
152 | if (size <= 2 * 1024) return 11; | |
6446faa2 | 153 | if (size <= 4 * 1024) return 12; |
aadb4bc4 CL |
154 | /* |
155 | * The following is only needed to support architectures with a larger page | |
156 | * size than 4k. | |
157 | */ | |
81819f0f CL |
158 | if (size <= 8 * 1024) return 13; |
159 | if (size <= 16 * 1024) return 14; | |
160 | if (size <= 32 * 1024) return 15; | |
161 | if (size <= 64 * 1024) return 16; | |
162 | if (size <= 128 * 1024) return 17; | |
163 | if (size <= 256 * 1024) return 18; | |
aadb4bc4 | 164 | if (size <= 512 * 1024) return 19; |
81819f0f | 165 | if (size <= 1024 * 1024) return 20; |
81819f0f | 166 | if (size <= 2 * 1024 * 1024) return 21; |
81819f0f CL |
167 | return -1; |
168 | ||
169 | /* | |
170 | * What we really wanted to do and cannot do because of compiler issues is: | |
171 | * int i; | |
172 | * for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) | |
173 | * if (size <= (1 << i)) | |
174 | * return i; | |
175 | */ | |
176 | } | |
177 | ||
178 | /* | |
179 | * Find the slab cache for a given combination of allocation flags and size. | |
180 | * | |
181 | * This ought to end up with a global pointer to the right cache | |
182 | * in kmalloc_caches. | |
183 | */ | |
aa137f9d | 184 | static __always_inline struct kmem_cache *kmalloc_slab(size_t size) |
81819f0f CL |
185 | { |
186 | int index = kmalloc_index(size); | |
187 | ||
188 | if (index == 0) | |
189 | return NULL; | |
190 | ||
81819f0f CL |
191 | return &kmalloc_caches[index]; |
192 | } | |
193 | ||
194 | #ifdef CONFIG_ZONE_DMA | |
195 | #define SLUB_DMA __GFP_DMA | |
196 | #else | |
197 | /* Disable DMA functionality */ | |
d046943c | 198 | #define SLUB_DMA (__force gfp_t)0 |
81819f0f CL |
199 | #endif |
200 | ||
6193a2ff PM |
201 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
202 | void *__kmalloc(size_t size, gfp_t flags); | |
203 | ||
eada35ef PE |
204 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) |
205 | { | |
206 | return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); | |
207 | } | |
208 | ||
aa137f9d | 209 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
81819f0f | 210 | { |
aadb4bc4 | 211 | if (__builtin_constant_p(size)) { |
331dc558 | 212 | if (size > PAGE_SIZE) |
eada35ef | 213 | return kmalloc_large(size, flags); |
81819f0f | 214 | |
aadb4bc4 CL |
215 | if (!(flags & SLUB_DMA)) { |
216 | struct kmem_cache *s = kmalloc_slab(size); | |
217 | ||
218 | if (!s) | |
219 | return ZERO_SIZE_PTR; | |
81819f0f | 220 | |
aadb4bc4 CL |
221 | return kmem_cache_alloc(s, flags); |
222 | } | |
223 | } | |
224 | return __kmalloc(size, flags); | |
81819f0f CL |
225 | } |
226 | ||
81819f0f | 227 | #ifdef CONFIG_NUMA |
6193a2ff PM |
228 | void *__kmalloc_node(size_t size, gfp_t flags, int node); |
229 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | |
81819f0f | 230 | |
aa137f9d | 231 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
81819f0f | 232 | { |
aadb4bc4 | 233 | if (__builtin_constant_p(size) && |
331dc558 | 234 | size <= PAGE_SIZE && !(flags & SLUB_DMA)) { |
aadb4bc4 | 235 | struct kmem_cache *s = kmalloc_slab(size); |
81819f0f CL |
236 | |
237 | if (!s) | |
272c1d21 | 238 | return ZERO_SIZE_PTR; |
81819f0f CL |
239 | |
240 | return kmem_cache_alloc_node(s, flags, node); | |
aadb4bc4 CL |
241 | } |
242 | return __kmalloc_node(size, flags, node); | |
81819f0f CL |
243 | } |
244 | #endif | |
245 | ||
246 | #endif /* _LINUX_SLUB_DEF_H */ |