lcd: add callbacks for early fb event blank support
[deliverable/linux.git] / include / linux / slab_def.h
CommitLineData
2e892f43
CL
1#ifndef _LINUX_SLAB_DEF_H
2#define _LINUX_SLAB_DEF_H
3
4/*
5 * Definitions unique to the original Linux SLAB allocator.
6 *
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
10 * compile time.
11 */
12
13#include <linux/init.h>
14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16#include <linux/compiler.h>
039ca4e7 17
8eae985f
PE
18/*
19 * struct kmem_cache
20 *
21 * manages a cache.
22 */
23
24struct kmem_cache {
b56efcf0 25/* 1) Cache tunables. Protected by cache_chain_mutex */
8eae985f
PE
26 unsigned int batchcount;
27 unsigned int limit;
28 unsigned int shared;
29
30 unsigned int buffer_size;
31 u32 reciprocal_buffer_size;
b56efcf0 32/* 2) touched by every alloc & free from the backend */
8eae985f
PE
33
34 unsigned int flags; /* constant flags */
35 unsigned int num; /* # of objs per slab */
36
b56efcf0 37/* 3) cache_grow/shrink */
8eae985f
PE
38 /* order of pgs per slab (2^n) */
39 unsigned int gfporder;
40
41 /* force GFP flags, e.g. GFP_DMA */
42 gfp_t gfpflags;
43
44 size_t colour; /* cache colouring range */
45 unsigned int colour_off; /* colour offset */
46 struct kmem_cache *slabp_cache;
47 unsigned int slab_size;
48 unsigned int dflags; /* dynamic flags */
49
50 /* constructor func */
51 void (*ctor)(void *obj);
52
b56efcf0 53/* 4) cache creation/removal */
8eae985f
PE
54 const char *name;
55 struct list_head next;
56
b56efcf0 57/* 5) statistics */
8eae985f
PE
58#ifdef CONFIG_DEBUG_SLAB
59 unsigned long num_active;
60 unsigned long num_allocations;
61 unsigned long high_mark;
62 unsigned long grown;
63 unsigned long reaped;
64 unsigned long errors;
65 unsigned long max_freeable;
66 unsigned long node_allocs;
67 unsigned long node_frees;
68 unsigned long node_overflow;
69 atomic_t allochit;
70 atomic_t allocmiss;
71 atomic_t freehit;
72 atomic_t freemiss;
73
74 /*
75 * If debugging is enabled, then the allocator can add additional
76 * fields and/or padding to every object. buffer_size contains the total
77 * object size including these internal fields, the following two
78 * variables contain the offset to the user object and its size.
79 */
80 int obj_offset;
81 int obj_size;
82#endif /* CONFIG_DEBUG_SLAB */
83
b56efcf0 84/* 6) per-cpu/per-node data, touched during every alloc/free */
8eae985f 85 /*
b56efcf0
ED
86 * We put array[] at the end of kmem_cache, because we want to size
87 * this array to nr_cpu_ids slots instead of NR_CPUS
8eae985f 88 * (see kmem_cache_init())
b56efcf0
ED
89 * We still use [NR_CPUS] and not [1] or [0] because cache_cache
90 * is statically defined, so we reserve the max number of cpus.
8eae985f 91 */
b56efcf0
ED
92 struct kmem_list3 **nodelists;
93 struct array_cache *array[NR_CPUS];
8eae985f 94 /*
b56efcf0 95 * Do not add fields after array[]
8eae985f
PE
96 */
97};
98
2e892f43
CL
99/* Size description struct for general caches. */
100struct cache_sizes {
101 size_t cs_size;
102 struct kmem_cache *cs_cachep;
4b51d669 103#ifdef CONFIG_ZONE_DMA
2e892f43 104 struct kmem_cache *cs_dmacachep;
4b51d669 105#endif
2e892f43
CL
106};
107extern struct cache_sizes malloc_sizes[];
108
6193a2ff
PM
109void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
110void *__kmalloc(size_t size, gfp_t flags);
111
0f24f128 112#ifdef CONFIG_TRACING
85beb586
SR
113extern void *kmem_cache_alloc_trace(size_t size,
114 struct kmem_cache *cachep, gfp_t flags);
36555751
EGM
115extern size_t slab_buffer_size(struct kmem_cache *cachep);
116#else
117static __always_inline void *
85beb586 118kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
2e892f43 119{
36555751
EGM
120 return kmem_cache_alloc(cachep, flags);
121}
122static inline size_t slab_buffer_size(struct kmem_cache *cachep)
123{
124 return 0;
125}
126#endif
127
128static __always_inline void *kmalloc(size_t size, gfp_t flags)
129{
130 struct kmem_cache *cachep;
131 void *ret;
132
2e892f43
CL
133 if (__builtin_constant_p(size)) {
134 int i = 0;
6cb8f913
CL
135
136 if (!size)
137 return ZERO_SIZE_PTR;
138
2e892f43
CL
139#define CACHE(x) \
140 if (size <= x) \
141 goto found; \
142 else \
143 i++;
1c61fc40 144#include <linux/kmalloc_sizes.h>
2e892f43 145#undef CACHE
1cf3eb2f 146 return NULL;
2e892f43 147found:
4b51d669
CL
148#ifdef CONFIG_ZONE_DMA
149 if (flags & GFP_DMA)
36555751
EGM
150 cachep = malloc_sizes[i].cs_dmacachep;
151 else
4b51d669 152#endif
36555751
EGM
153 cachep = malloc_sizes[i].cs_cachep;
154
85beb586 155 ret = kmem_cache_alloc_trace(size, cachep, flags);
36555751
EGM
156
157 return ret;
2e892f43
CL
158 }
159 return __kmalloc(size, flags);
160}
161
2e892f43
CL
162#ifdef CONFIG_NUMA
163extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
6193a2ff 164extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
2e892f43 165
0f24f128 166#ifdef CONFIG_TRACING
85beb586
SR
167extern void *kmem_cache_alloc_node_trace(size_t size,
168 struct kmem_cache *cachep,
169 gfp_t flags,
170 int nodeid);
36555751
EGM
171#else
172static __always_inline void *
85beb586
SR
173kmem_cache_alloc_node_trace(size_t size,
174 struct kmem_cache *cachep,
175 gfp_t flags,
176 int nodeid)
36555751
EGM
177{
178 return kmem_cache_alloc_node(cachep, flags, nodeid);
179}
180#endif
181
182static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
2e892f43 183{
36555751 184 struct kmem_cache *cachep;
36555751 185
2e892f43
CL
186 if (__builtin_constant_p(size)) {
187 int i = 0;
6cb8f913
CL
188
189 if (!size)
190 return ZERO_SIZE_PTR;
191
2e892f43
CL
192#define CACHE(x) \
193 if (size <= x) \
194 goto found; \
195 else \
196 i++;
1c61fc40 197#include <linux/kmalloc_sizes.h>
2e892f43 198#undef CACHE
1cf3eb2f 199 return NULL;
2e892f43 200found:
4b51d669
CL
201#ifdef CONFIG_ZONE_DMA
202 if (flags & GFP_DMA)
36555751
EGM
203 cachep = malloc_sizes[i].cs_dmacachep;
204 else
4b51d669 205#endif
36555751
EGM
206 cachep = malloc_sizes[i].cs_cachep;
207
85beb586 208 return kmem_cache_alloc_node_trace(size, cachep, flags, node);
2e892f43
CL
209 }
210 return __kmalloc_node(size, flags, node);
211}
212
213#endif /* CONFIG_NUMA */
214
215#endif /* _LINUX_SLAB_DEF_H */
This page took 0.70942 seconds and 5 git commands to generate.