Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
2e892f43 CL |
2 | * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). |
3 | * | |
cde53535 | 4 | * (C) SGI 2006, Christoph Lameter |
2e892f43 CL |
5 | * Cleaned up and restructured to ease the addition of alternative |
6 | * implementations of SLAB allocators. | |
f1b6eb6e CL |
7 | * (C) Linux Foundation 2008-2013 |
8 | * Unified interface for all slab allocators | |
1da177e4 LT |
9 | */ |
10 | ||
11 | #ifndef _LINUX_SLAB_H | |
12 | #define _LINUX_SLAB_H | |
13 | ||
1b1cec4b | 14 | #include <linux/gfp.h> |
1b1cec4b | 15 | #include <linux/types.h> |
1f458cbf GC |
16 | #include <linux/workqueue.h> |
17 | ||
1da177e4 | 18 | |
2e892f43 CL |
19 | /* |
20 | * Flags to pass to kmem_cache_create(). | |
21 | * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. | |
1da177e4 | 22 | */ |
55935a34 | 23 | #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ |
55935a34 CL |
24 | #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ |
25 | #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ | |
26 | #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ | |
2e892f43 | 27 | #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ |
2e892f43 | 28 | #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ |
2e892f43 | 29 | #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ |
d7de4c1d PZ |
30 | /* |
31 | * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS! | |
32 | * | |
33 | * This delays freeing the SLAB page by a grace period, it does _NOT_ | |
34 | * delay object freeing. This means that if you do kmem_cache_free() | |
35 | * that memory location is free to be reused at any time. Thus it may | |
36 | * be possible to see another object there in the same RCU grace period. | |
37 | * | |
38 | * This feature only ensures the memory location backing the object | |
39 | * stays valid, the trick to using this is relying on an independent | |
40 | * object validation pass. Something like: | |
41 | * | |
42 | * rcu_read_lock() | |
43 | * again: | |
44 | * obj = lockless_lookup(key); | |
45 | * if (obj) { | |
46 | * if (!try_get_ref(obj)) // might fail for free objects | |
47 | * goto again; | |
48 | * | |
49 | * if (obj->key != key) { // not the object we expected | |
50 | * put_ref(obj); | |
51 | * goto again; | |
52 | * } | |
53 | * } | |
54 | * rcu_read_unlock(); | |
55 | * | |
68126702 JK |
56 | * This is useful if we need to approach a kernel structure obliquely, |
57 | * from its address obtained without the usual locking. We can lock | |
58 | * the structure to stabilize it and check it's still at the given address, | |
59 | * only if we can be sure that the memory has not been meanwhile reused | |
60 | * for some other kind of object (which our subsystem's lock might corrupt). | |
61 | * | |
62 | * rcu_read_lock before reading the address, then rcu_read_unlock after | |
63 | * taking the spinlock within the structure expected at that address. | |
d7de4c1d | 64 | */ |
2e892f43 | 65 | #define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ |
101a5001 | 66 | #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ |
81819f0f | 67 | #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ |
1da177e4 | 68 | |
30327acf TG |
69 | /* Flag to prevent checks on free */ |
70 | #ifdef CONFIG_DEBUG_OBJECTS | |
71 | # define SLAB_DEBUG_OBJECTS 0x00400000UL | |
72 | #else | |
73 | # define SLAB_DEBUG_OBJECTS 0x00000000UL | |
74 | #endif | |
75 | ||
d5cff635 CM |
76 | #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ |
77 | ||
2dff4405 VN |
78 | /* Don't track use of uninitialized memory */ |
79 | #ifdef CONFIG_KMEMCHECK | |
80 | # define SLAB_NOTRACK 0x01000000UL | |
81 | #else | |
82 | # define SLAB_NOTRACK 0x00000000UL | |
83 | #endif | |
4c13dd3b DM |
84 | #ifdef CONFIG_FAILSLAB |
85 | # define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */ | |
86 | #else | |
87 | # define SLAB_FAILSLAB 0x00000000UL | |
88 | #endif | |
2dff4405 | 89 | |
e12ba74d MG |
90 | /* The following flags affect the page allocator grouping pages by mobility */ |
91 | #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ | |
92 | #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ | |
6cb8f913 CL |
93 | /* |
94 | * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. | |
95 | * | |
96 | * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. | |
97 | * | |
98 | * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. | |
99 | * Both make kfree a no-op. | |
100 | */ | |
101 | #define ZERO_SIZE_PTR ((void *)16) | |
102 | ||
1d4ec7b1 | 103 | #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ |
6cb8f913 CL |
104 | (unsigned long)ZERO_SIZE_PTR) |
105 | ||
f1b6eb6e | 106 | #include <linux/kmemleak.h> |
3b0efdfa | 107 | |
2633d7a0 | 108 | struct mem_cgroup; |
2e892f43 CL |
109 | /* |
110 | * struct kmem_cache related prototypes | |
111 | */ | |
112 | void __init kmem_cache_init(void); | |
81819f0f | 113 | int slab_is_available(void); |
1da177e4 | 114 | |
2e892f43 | 115 | struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, |
ebe29738 | 116 | unsigned long, |
51cc5068 | 117 | void (*)(void *)); |
2633d7a0 GC |
118 | struct kmem_cache * |
119 | kmem_cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t, | |
943a451a | 120 | unsigned long, void (*)(void *), struct kmem_cache *); |
2e892f43 CL |
121 | void kmem_cache_destroy(struct kmem_cache *); |
122 | int kmem_cache_shrink(struct kmem_cache *); | |
2e892f43 | 123 | void kmem_cache_free(struct kmem_cache *, void *); |
2e892f43 | 124 | |
0a31bd5f CL |
125 | /* |
126 | * Please use this macro to create slab caches. Simply specify the | |
127 | * name of the structure and maybe some flags that are listed above. | |
128 | * | |
129 | * The alignment of the struct determines object alignment. If you | |
130 | * f.e. add ____cacheline_aligned_in_smp to the struct declaration | |
131 | * then the objects will be properly aligned in SMP configurations. | |
132 | */ | |
133 | #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ | |
134 | sizeof(struct __struct), __alignof__(struct __struct),\ | |
20c2df83 | 135 | (__flags), NULL) |
0a31bd5f | 136 | |
34504667 CL |
137 | /* |
138 | * Common kmalloc functions provided by all allocators | |
139 | */ | |
140 | void * __must_check __krealloc(const void *, size_t, gfp_t); | |
141 | void * __must_check krealloc(const void *, size_t, gfp_t); | |
142 | void kfree(const void *); | |
143 | void kzfree(const void *); | |
144 | size_t ksize(const void *); | |
145 | ||
c601fd69 CL |
146 | /* |
147 | * Some archs want to perform DMA into kmalloc caches and need a guaranteed | |
148 | * alignment larger than the alignment of a 64-bit integer. | |
149 | * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that. | |
150 | */ | |
151 | #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 | |
152 | #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN | |
153 | #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN | |
154 | #define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN) | |
155 | #else | |
156 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) | |
157 | #endif | |
158 | ||
ce6a5026 CL |
159 | #ifdef CONFIG_SLOB |
160 | /* | |
161 | * Common fields provided in kmem_cache by all slab allocators | |
162 | * This struct is either used directly by the allocator (SLOB) | |
163 | * or the allocator must include definitions for all fields | |
164 | * provided in kmem_cache_common in their definition of kmem_cache. | |
165 | * | |
166 | * Once we can do anonymous structs (C11 standard) we could put a | |
167 | * anonymous struct definition in these allocators so that the | |
168 | * separate allocations in the kmem_cache structure of SLAB and | |
169 | * SLUB is no longer needed. | |
170 | */ | |
171 | struct kmem_cache { | |
172 | unsigned int object_size;/* The original size of the object */ | |
173 | unsigned int size; /* The aligned/padded/added on size */ | |
174 | unsigned int align; /* Alignment as calculated */ | |
175 | unsigned long flags; /* Active flags on the slab */ | |
176 | const char *name; /* Slab name for sysfs */ | |
177 | int refcount; /* Use counter */ | |
178 | void (*ctor)(void *); /* Called on object slot creation */ | |
179 | struct list_head list; /* List of all slab caches on the system */ | |
180 | }; | |
181 | ||
069e2b35 | 182 | #endif /* CONFIG_SLOB */ |
ce6a5026 | 183 | |
0aa817f0 | 184 | /* |
95a05b42 CL |
185 | * Kmalloc array related definitions |
186 | */ | |
187 | ||
188 | #ifdef CONFIG_SLAB | |
189 | /* | |
190 | * The largest kmalloc size supported by the SLAB allocators is | |
0aa817f0 CL |
191 | * 32 megabyte (2^25) or the maximum allocatable page order if that is |
192 | * less than 32 MB. | |
193 | * | |
194 | * WARNING: Its not easy to increase this value since the allocators have | |
195 | * to do various tricks to work around compiler limitations in order to | |
196 | * ensure proper constant folding. | |
197 | */ | |
debee076 CL |
198 | #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \ |
199 | (MAX_ORDER + PAGE_SHIFT - 1) : 25) | |
95a05b42 | 200 | #define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH |
c601fd69 | 201 | #ifndef KMALLOC_SHIFT_LOW |
95a05b42 | 202 | #define KMALLOC_SHIFT_LOW 5 |
c601fd69 | 203 | #endif |
069e2b35 CL |
204 | #endif |
205 | ||
206 | #ifdef CONFIG_SLUB | |
95a05b42 CL |
207 | /* |
208 | * SLUB allocates up to order 2 pages directly and otherwise | |
209 | * passes the request to the page allocator. | |
210 | */ | |
211 | #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) | |
212 | #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT) | |
c601fd69 | 213 | #ifndef KMALLOC_SHIFT_LOW |
95a05b42 CL |
214 | #define KMALLOC_SHIFT_LOW 3 |
215 | #endif | |
c601fd69 | 216 | #endif |
0aa817f0 | 217 | |
069e2b35 CL |
218 | #ifdef CONFIG_SLOB |
219 | /* | |
220 | * SLOB passes all page size and larger requests to the page allocator. | |
221 | * No kmalloc array is necessary since objects of different sizes can | |
222 | * be allocated from the same page. | |
223 | */ | |
224 | #define KMALLOC_SHIFT_MAX 30 | |
225 | #define KMALLOC_SHIFT_HIGH PAGE_SHIFT | |
226 | #ifndef KMALLOC_SHIFT_LOW | |
227 | #define KMALLOC_SHIFT_LOW 3 | |
228 | #endif | |
229 | #endif | |
230 | ||
95a05b42 CL |
231 | /* Maximum allocatable size */ |
232 | #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) | |
233 | /* Maximum size for which we actually use a slab cache */ | |
234 | #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH) | |
235 | /* Maximum order allocatable via the slab allocagtor */ | |
236 | #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT) | |
0aa817f0 | 237 | |
ce6a5026 CL |
238 | /* |
239 | * Kmalloc subsystem. | |
240 | */ | |
c601fd69 | 241 | #ifndef KMALLOC_MIN_SIZE |
95a05b42 | 242 | #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) |
ce6a5026 CL |
243 | #endif |
244 | ||
069e2b35 | 245 | #ifndef CONFIG_SLOB |
9425c58e CL |
246 | extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1]; |
247 | #ifdef CONFIG_ZONE_DMA | |
248 | extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1]; | |
249 | #endif | |
250 | ||
ce6a5026 CL |
251 | /* |
252 | * Figure out which kmalloc slab an allocation of a certain size | |
253 | * belongs to. | |
254 | * 0 = zero alloc | |
255 | * 1 = 65 .. 96 bytes | |
256 | * 2 = 120 .. 192 bytes | |
257 | * n = 2^(n-1) .. 2^n -1 | |
258 | */ | |
259 | static __always_inline int kmalloc_index(size_t size) | |
260 | { | |
261 | if (!size) | |
262 | return 0; | |
263 | ||
264 | if (size <= KMALLOC_MIN_SIZE) | |
265 | return KMALLOC_SHIFT_LOW; | |
266 | ||
267 | if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) | |
268 | return 1; | |
269 | if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) | |
270 | return 2; | |
271 | if (size <= 8) return 3; | |
272 | if (size <= 16) return 4; | |
273 | if (size <= 32) return 5; | |
274 | if (size <= 64) return 6; | |
275 | if (size <= 128) return 7; | |
276 | if (size <= 256) return 8; | |
277 | if (size <= 512) return 9; | |
278 | if (size <= 1024) return 10; | |
279 | if (size <= 2 * 1024) return 11; | |
280 | if (size <= 4 * 1024) return 12; | |
281 | if (size <= 8 * 1024) return 13; | |
282 | if (size <= 16 * 1024) return 14; | |
283 | if (size <= 32 * 1024) return 15; | |
284 | if (size <= 64 * 1024) return 16; | |
285 | if (size <= 128 * 1024) return 17; | |
286 | if (size <= 256 * 1024) return 18; | |
287 | if (size <= 512 * 1024) return 19; | |
288 | if (size <= 1024 * 1024) return 20; | |
289 | if (size <= 2 * 1024 * 1024) return 21; | |
290 | if (size <= 4 * 1024 * 1024) return 22; | |
291 | if (size <= 8 * 1024 * 1024) return 23; | |
292 | if (size <= 16 * 1024 * 1024) return 24; | |
293 | if (size <= 32 * 1024 * 1024) return 25; | |
294 | if (size <= 64 * 1024 * 1024) return 26; | |
295 | BUG(); | |
296 | ||
297 | /* Will never be reached. Needed because the compiler may complain */ | |
298 | return -1; | |
299 | } | |
069e2b35 | 300 | #endif /* !CONFIG_SLOB */ |
ce6a5026 | 301 | |
f1b6eb6e CL |
302 | void *__kmalloc(size_t size, gfp_t flags); |
303 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags); | |
304 | ||
305 | #ifdef CONFIG_NUMA | |
306 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | |
307 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | |
308 | #else | |
309 | static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) | |
310 | { | |
311 | return __kmalloc(size, flags); | |
312 | } | |
313 | ||
314 | static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) | |
315 | { | |
316 | return kmem_cache_alloc(s, flags); | |
317 | } | |
318 | #endif | |
319 | ||
320 | #ifdef CONFIG_TRACING | |
321 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); | |
322 | ||
323 | #ifdef CONFIG_NUMA | |
324 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, | |
325 | gfp_t gfpflags, | |
326 | int node, size_t size); | |
327 | #else | |
328 | static __always_inline void * | |
329 | kmem_cache_alloc_node_trace(struct kmem_cache *s, | |
330 | gfp_t gfpflags, | |
331 | int node, size_t size) | |
332 | { | |
333 | return kmem_cache_alloc_trace(s, gfpflags, size); | |
334 | } | |
335 | #endif /* CONFIG_NUMA */ | |
336 | ||
337 | #else /* CONFIG_TRACING */ | |
338 | static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, | |
339 | gfp_t flags, size_t size) | |
340 | { | |
341 | return kmem_cache_alloc(s, flags); | |
342 | } | |
343 | ||
344 | static __always_inline void * | |
345 | kmem_cache_alloc_node_trace(struct kmem_cache *s, | |
346 | gfp_t gfpflags, | |
347 | int node, size_t size) | |
348 | { | |
349 | return kmem_cache_alloc_node(s, gfpflags, node); | |
350 | } | |
351 | #endif /* CONFIG_TRACING */ | |
352 | ||
ce6a5026 CL |
353 | #ifdef CONFIG_SLAB |
354 | #include <linux/slab_def.h> | |
069e2b35 CL |
355 | #endif |
356 | ||
357 | #ifdef CONFIG_SLUB | |
ce6a5026 | 358 | #include <linux/slub_def.h> |
069e2b35 CL |
359 | #endif |
360 | ||
f1b6eb6e CL |
361 | static __always_inline void * |
362 | kmalloc_order(size_t size, gfp_t flags, unsigned int order) | |
363 | { | |
364 | void *ret; | |
365 | ||
366 | flags |= (__GFP_COMP | __GFP_KMEMCG); | |
367 | ret = (void *) __get_free_pages(flags, order); | |
368 | kmemleak_alloc(ret, size, 1, flags); | |
369 | return ret; | |
370 | } | |
371 | ||
372 | #ifdef CONFIG_TRACING | |
373 | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); | |
374 | #else | |
375 | static __always_inline void * | |
376 | kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | |
377 | { | |
378 | return kmalloc_order(size, flags, order); | |
379 | } | |
ce6a5026 CL |
380 | #endif |
381 | ||
f1b6eb6e CL |
382 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) |
383 | { | |
384 | unsigned int order = get_order(size); | |
385 | return kmalloc_order_trace(size, flags, order); | |
386 | } | |
387 | ||
388 | /** | |
389 | * kmalloc - allocate memory | |
390 | * @size: how many bytes of memory are required. | |
7e3528c3 | 391 | * @flags: the type of memory to allocate. |
f1b6eb6e CL |
392 | * |
393 | * kmalloc is the normal method of allocating memory | |
394 | * for objects smaller than page size in the kernel. | |
7e3528c3 RD |
395 | * |
396 | * The @flags argument may be one of: | |
397 | * | |
398 | * %GFP_USER - Allocate memory on behalf of user. May sleep. | |
399 | * | |
400 | * %GFP_KERNEL - Allocate normal kernel ram. May sleep. | |
401 | * | |
402 | * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools. | |
403 | * For example, use this inside interrupt handlers. | |
404 | * | |
405 | * %GFP_HIGHUSER - Allocate pages from high memory. | |
406 | * | |
407 | * %GFP_NOIO - Do not do any I/O at all while trying to get memory. | |
408 | * | |
409 | * %GFP_NOFS - Do not make any fs calls while trying to get memory. | |
410 | * | |
411 | * %GFP_NOWAIT - Allocation will not sleep. | |
412 | * | |
413 | * %GFP_THISNODE - Allocate node-local memory only. | |
414 | * | |
415 | * %GFP_DMA - Allocation suitable for DMA. | |
416 | * Should only be used for kmalloc() caches. Otherwise, use a | |
417 | * slab created with SLAB_DMA. | |
418 | * | |
419 | * Also it is possible to set different flags by OR'ing | |
420 | * in one or more of the following additional @flags: | |
421 | * | |
422 | * %__GFP_COLD - Request cache-cold pages instead of | |
423 | * trying to return cache-warm pages. | |
424 | * | |
425 | * %__GFP_HIGH - This allocation has high priority and may use emergency pools. | |
426 | * | |
427 | * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail | |
428 | * (think twice before using). | |
429 | * | |
430 | * %__GFP_NORETRY - If memory is not immediately available, | |
431 | * then give up at once. | |
432 | * | |
433 | * %__GFP_NOWARN - If allocation fails, don't issue any warnings. | |
434 | * | |
435 | * %__GFP_REPEAT - If allocation fails initially, try once more before failing. | |
436 | * | |
437 | * There are other flags available as well, but these are not intended | |
438 | * for general use, and so are not documented here. For a full list of | |
439 | * potential flags, always refer to linux/gfp.h. | |
f1b6eb6e CL |
440 | */ |
441 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | |
442 | { | |
443 | if (__builtin_constant_p(size)) { | |
444 | if (size > KMALLOC_MAX_CACHE_SIZE) | |
445 | return kmalloc_large(size, flags); | |
446 | #ifndef CONFIG_SLOB | |
447 | if (!(flags & GFP_DMA)) { | |
448 | int index = kmalloc_index(size); | |
449 | ||
450 | if (!index) | |
451 | return ZERO_SIZE_PTR; | |
452 | ||
453 | return kmem_cache_alloc_trace(kmalloc_caches[index], | |
454 | flags, size); | |
455 | } | |
456 | #endif | |
457 | } | |
458 | return __kmalloc(size, flags); | |
459 | } | |
460 | ||
ce6a5026 CL |
461 | /* |
462 | * Determine size used for the nth kmalloc cache. | |
463 | * return size or 0 if a kmalloc cache for that | |
464 | * size does not exist | |
465 | */ | |
466 | static __always_inline int kmalloc_size(int n) | |
467 | { | |
069e2b35 | 468 | #ifndef CONFIG_SLOB |
ce6a5026 CL |
469 | if (n > 2) |
470 | return 1 << n; | |
471 | ||
472 | if (n == 1 && KMALLOC_MIN_SIZE <= 32) | |
473 | return 96; | |
474 | ||
475 | if (n == 2 && KMALLOC_MIN_SIZE <= 64) | |
476 | return 192; | |
069e2b35 | 477 | #endif |
ce6a5026 CL |
478 | return 0; |
479 | } | |
ce6a5026 | 480 | |
f1b6eb6e CL |
481 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
482 | { | |
483 | #ifndef CONFIG_SLOB | |
484 | if (__builtin_constant_p(size) && | |
23774a2f | 485 | size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) { |
f1b6eb6e CL |
486 | int i = kmalloc_index(size); |
487 | ||
488 | if (!i) | |
489 | return ZERO_SIZE_PTR; | |
490 | ||
491 | return kmem_cache_alloc_node_trace(kmalloc_caches[i], | |
492 | flags, node, size); | |
493 | } | |
494 | #endif | |
495 | return __kmalloc_node(size, flags, node); | |
496 | } | |
497 | ||
90810645 CL |
498 | /* |
499 | * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. | |
500 | * Intended for arches that get misalignment faults even for 64 bit integer | |
501 | * aligned buffers. | |
502 | */ | |
3192b920 CL |
503 | #ifndef ARCH_SLAB_MINALIGN |
504 | #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) | |
505 | #endif | |
ba6c496e GC |
506 | /* |
507 | * This is the main placeholder for memcg-related information in kmem caches. | |
508 | * struct kmem_cache will hold a pointer to it, so the memory cost while | |
509 | * disabled is 1 pointer. The runtime cost while enabled, gets bigger than it | |
510 | * would otherwise be if that would be bundled in kmem_cache: we'll need an | |
511 | * extra pointer chase. But the trade off clearly lays in favor of not | |
512 | * penalizing non-users. | |
513 | * | |
514 | * Both the root cache and the child caches will have it. For the root cache, | |
515 | * this will hold a dynamically allocated array large enough to hold | |
516 | * information about the currently limited memcgs in the system. | |
517 | * | |
518 | * Child caches will hold extra metadata needed for its operation. Fields are: | |
519 | * | |
520 | * @memcg: pointer to the memcg this cache belongs to | |
2633d7a0 GC |
521 | * @list: list_head for the list of all caches in this memcg |
522 | * @root_cache: pointer to the global, root cache, this cache was derived from | |
1f458cbf GC |
523 | * @dead: set to true after the memcg dies; the cache may still be around. |
524 | * @nr_pages: number of pages that belongs to this cache. | |
525 | * @destroy: worker to be called whenever we are ready, or believe we may be | |
526 | * ready, to destroy this cache. | |
ba6c496e GC |
527 | */ |
528 | struct memcg_cache_params { | |
529 | bool is_root_cache; | |
530 | union { | |
531 | struct kmem_cache *memcg_caches[0]; | |
2633d7a0 GC |
532 | struct { |
533 | struct mem_cgroup *memcg; | |
534 | struct list_head list; | |
535 | struct kmem_cache *root_cache; | |
1f458cbf GC |
536 | bool dead; |
537 | atomic_t nr_pages; | |
538 | struct work_struct destroy; | |
2633d7a0 | 539 | }; |
ba6c496e GC |
540 | }; |
541 | }; | |
542 | ||
2633d7a0 GC |
543 | int memcg_update_all_caches(int num_memcgs); |
544 | ||
749c5415 GC |
545 | struct seq_file; |
546 | int cache_show(struct kmem_cache *s, struct seq_file *m); | |
547 | void print_slabinfo_header(struct seq_file *m); | |
548 | ||
e7efa615 MO |
549 | /** |
550 | * kmalloc_array - allocate memory for an array. | |
551 | * @n: number of elements. | |
552 | * @size: element size. | |
553 | * @flags: the type of memory to allocate (see kmalloc). | |
800590f5 | 554 | */ |
a8203725 | 555 | static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) |
1da177e4 | 556 | { |
a3860c1c | 557 | if (size != 0 && n > SIZE_MAX / size) |
6193a2ff | 558 | return NULL; |
a8203725 XW |
559 | return __kmalloc(n * size, flags); |
560 | } | |
561 | ||
562 | /** | |
563 | * kcalloc - allocate memory for an array. The memory is set to zero. | |
564 | * @n: number of elements. | |
565 | * @size: element size. | |
566 | * @flags: the type of memory to allocate (see kmalloc). | |
567 | */ | |
568 | static inline void *kcalloc(size_t n, size_t size, gfp_t flags) | |
569 | { | |
570 | return kmalloc_array(n, size, flags | __GFP_ZERO); | |
1da177e4 LT |
571 | } |
572 | ||
1d2c8eea CH |
573 | /* |
574 | * kmalloc_track_caller is a special version of kmalloc that records the | |
575 | * calling function of the routine calling it for slab leak tracking instead | |
576 | * of just the calling function (confusing, eh?). | |
577 | * It's useful when the call to kmalloc comes from a widely-used standard | |
578 | * allocator where we care about the real place the memory allocation | |
579 | * request comes from. | |
580 | */ | |
7adde04a | 581 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ |
f3f74101 EG |
582 | (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ |
583 | (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) | |
ce71e27c | 584 | extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); |
1d2c8eea | 585 | #define kmalloc_track_caller(size, flags) \ |
ce71e27c | 586 | __kmalloc_track_caller(size, flags, _RET_IP_) |
2e892f43 CL |
587 | #else |
588 | #define kmalloc_track_caller(size, flags) \ | |
589 | __kmalloc(size, flags) | |
590 | #endif /* DEBUG_SLAB */ | |
1da177e4 | 591 | |
97e2bde4 | 592 | #ifdef CONFIG_NUMA |
8b98c169 CH |
593 | /* |
594 | * kmalloc_node_track_caller is a special version of kmalloc_node that | |
595 | * records the calling function of the routine calling it for slab leak | |
596 | * tracking instead of just the calling function (confusing, eh?). | |
597 | * It's useful when the call to kmalloc_node comes from a widely-used | |
598 | * standard allocator where we care about the real place the memory | |
599 | * allocation request comes from. | |
600 | */ | |
7adde04a | 601 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ |
f3f74101 EG |
602 | (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ |
603 | (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) | |
ce71e27c | 604 | extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); |
8b98c169 CH |
605 | #define kmalloc_node_track_caller(size, flags, node) \ |
606 | __kmalloc_node_track_caller(size, flags, node, \ | |
ce71e27c | 607 | _RET_IP_) |
2e892f43 CL |
608 | #else |
609 | #define kmalloc_node_track_caller(size, flags, node) \ | |
610 | __kmalloc_node(size, flags, node) | |
8b98c169 | 611 | #endif |
2e892f43 | 612 | |
8b98c169 | 613 | #else /* CONFIG_NUMA */ |
8b98c169 CH |
614 | |
615 | #define kmalloc_node_track_caller(size, flags, node) \ | |
616 | kmalloc_track_caller(size, flags) | |
97e2bde4 | 617 | |
dfcd3610 | 618 | #endif /* CONFIG_NUMA */ |
10cef602 | 619 | |
81cda662 CL |
620 | /* |
621 | * Shortcuts | |
622 | */ | |
623 | static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) | |
624 | { | |
625 | return kmem_cache_alloc(k, flags | __GFP_ZERO); | |
626 | } | |
627 | ||
628 | /** | |
629 | * kzalloc - allocate memory. The memory is set to zero. | |
630 | * @size: how many bytes of memory are required. | |
631 | * @flags: the type of memory to allocate (see kmalloc). | |
632 | */ | |
633 | static inline void *kzalloc(size_t size, gfp_t flags) | |
634 | { | |
635 | return kmalloc(size, flags | __GFP_ZERO); | |
636 | } | |
637 | ||
979b0fea JL |
638 | /** |
639 | * kzalloc_node - allocate zeroed memory from a particular memory node. | |
640 | * @size: how many bytes of memory are required. | |
641 | * @flags: the type of memory to allocate (see kmalloc). | |
642 | * @node: memory node from which to allocate | |
643 | */ | |
644 | static inline void *kzalloc_node(size_t size, gfp_t flags, int node) | |
645 | { | |
646 | return kmalloc_node(size, flags | __GFP_ZERO, node); | |
647 | } | |
648 | ||
242860a4 EG |
649 | /* |
650 | * Determine the size of a slab object | |
651 | */ | |
652 | static inline unsigned int kmem_cache_size(struct kmem_cache *s) | |
653 | { | |
654 | return s->object_size; | |
655 | } | |
656 | ||
7e85ee0c PE |
657 | void __init kmem_cache_init_late(void); |
658 | ||
1da177e4 | 659 | #endif /* _LINUX_SLAB_H */ |