Commit | Line | Data |
---|---|---|
81819f0f CL |
1 | #ifndef _LINUX_SLUB_DEF_H |
2 | #define _LINUX_SLUB_DEF_H | |
3 | ||
4 | /* | |
5 | * SLUB : A Slab allocator without object queues. | |
6 | * | |
cde53535 | 7 | * (C) 2007 SGI, Christoph Lameter |
81819f0f | 8 | */ |
81819f0f CL |
9 | #include <linux/kobject.h> |
10 | ||
8ff12cfc CL |
11 | enum stat_item { |
12 | ALLOC_FASTPATH, /* Allocation from cpu slab */ | |
13 | ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ | |
a941f836 | 14 | FREE_FASTPATH, /* Free to cpu slab */ |
8ff12cfc CL |
15 | FREE_SLOWPATH, /* Freeing not to cpu slab */ |
16 | FREE_FROZEN, /* Freeing to frozen slab */ | |
17 | FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ | |
18 | FREE_REMOVE_PARTIAL, /* Freeing removes last object */ | |
8028dcea | 19 | ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */ |
8ff12cfc CL |
20 | ALLOC_SLAB, /* Cpu slab acquired from page allocator */ |
21 | ALLOC_REFILL, /* Refill cpu slab from slab freelist */ | |
e36a2652 | 22 | ALLOC_NODE_MISMATCH, /* Switching cpu slab */ |
8ff12cfc CL |
23 | FREE_SLAB, /* Slab freed to the page allocator */ |
24 | CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ | |
25 | DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ | |
26 | DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ | |
27 | DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ | |
28 | DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ | |
29 | DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ | |
03e404af | 30 | DEACTIVATE_BYPASS, /* Implicit deactivation */ |
65c3376a | 31 | ORDER_FALLBACK, /* Number of times fallback was necessary */ |
4fdccdfb | 32 | CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */ |
b789ef51 | 33 | CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */ |
49e22585 | 34 | CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */ |
8028dcea AS |
35 | CPU_PARTIAL_FREE, /* Refill cpu partial on free */ |
36 | CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */ | |
37 | CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */ | |
8ff12cfc CL |
38 | NR_SLUB_STAT_ITEMS }; |
39 | ||
dfb4f096 | 40 | struct kmem_cache_cpu { |
8a5ec0ba | 41 | void **freelist; /* Pointer to next available object */ |
8a5ec0ba | 42 | unsigned long tid; /* Globally unique transaction id */ |
da89b79e | 43 | struct page *page; /* The slab from which we are allocating */ |
49e22585 | 44 | struct page *partial; /* Partially allocated frozen slabs */ |
8ff12cfc CL |
45 | #ifdef CONFIG_SLUB_STATS |
46 | unsigned stat[NR_SLUB_STAT_ITEMS]; | |
47 | #endif | |
4c93c355 | 48 | }; |
dfb4f096 | 49 | |
834f3d11 CL |
50 | /* |
51 | * Word size structure that can be atomically updated or read and that | |
52 | * contains both the order and the number of objects that a slab of the | |
53 | * given order would contain. | |
54 | */ | |
55 | struct kmem_cache_order_objects { | |
56 | unsigned long x; | |
57 | }; | |
58 | ||
81819f0f CL |
59 | /* |
60 | * Slab cache management. | |
61 | */ | |
62 | struct kmem_cache { | |
1b5ad248 | 63 | struct kmem_cache_cpu __percpu *cpu_slab; |
81819f0f CL |
64 | /* Used for retriving partial slabs etc */ |
65 | unsigned long flags; | |
1a757fe5 | 66 | unsigned long min_partial; |
81819f0f | 67 | int size; /* The size of an object including meta data */ |
3b0efdfa | 68 | int object_size; /* The size of an object without meta data */ |
81819f0f | 69 | int offset; /* Free pointer offset. */ |
9f264904 | 70 | int cpu_partial; /* Number of per cpu partial objects to keep around */ |
834f3d11 | 71 | struct kmem_cache_order_objects oo; |
81819f0f | 72 | |
81819f0f | 73 | /* Allocation and freeing of slabs */ |
205ab99d | 74 | struct kmem_cache_order_objects max; |
65c3376a | 75 | struct kmem_cache_order_objects min; |
b7a49f0d | 76 | gfp_t allocflags; /* gfp flags to use on each alloc */ |
81819f0f | 77 | int refcount; /* Refcount for slab cache destroy */ |
51cc5068 | 78 | void (*ctor)(void *); |
81819f0f CL |
79 | int inuse; /* Offset to metadata */ |
80 | int align; /* Alignment */ | |
ab9a0f19 | 81 | int reserved; /* Reserved bytes at the end of slabs */ |
81819f0f CL |
82 | const char *name; /* Name (only for display!) */ |
83 | struct list_head list; /* List of slab caches */ | |
d86bd1be | 84 | int red_left_pad; /* Left redzone padding size */ |
ab4d5ed5 | 85 | #ifdef CONFIG_SYSFS |
81819f0f | 86 | struct kobject kobj; /* For sysfs */ |
0c710013 | 87 | #endif |
127424c8 | 88 | #ifdef CONFIG_MEMCG |
f7ce3190 | 89 | struct memcg_cache_params memcg_params; |
107dab5c | 90 | int max_attr_size; /* for propagation, maximum size of a stored attr */ |
9a41707b VD |
91 | #ifdef CONFIG_SYSFS |
92 | struct kset *memcg_kset; | |
93 | #endif | |
ba6c496e | 94 | #endif |
81819f0f CL |
95 | |
96 | #ifdef CONFIG_NUMA | |
9824601e CL |
97 | /* |
98 | * Defragmentation by allocating from a remote node. | |
99 | */ | |
100 | int remote_node_defrag_ratio; | |
81819f0f | 101 | #endif |
7340cc84 | 102 | struct kmem_cache_node *node[MAX_NUMNODES]; |
81819f0f CL |
103 | }; |
104 | ||
41a21285 CL |
105 | #ifdef CONFIG_SYSFS |
106 | #define SLAB_SUPPORTS_SYSFS | |
107 | void sysfs_slab_remove(struct kmem_cache *); | |
108 | #else | |
109 | static inline void sysfs_slab_remove(struct kmem_cache *s) | |
110 | { | |
111 | } | |
112 | #endif | |
113 | ||
912f5fbf AR |
114 | |
115 | /** | |
116 | * virt_to_obj - returns address of the beginning of object. | |
117 | * @s: object's kmem_cache | |
118 | * @slab_page: address of slab page | |
119 | * @x: address within object memory range | |
120 | * | |
121 | * Returns address of the beginning of object | |
122 | */ | |
123 | static inline void *virt_to_obj(struct kmem_cache *s, | |
124 | const void *slab_page, | |
125 | const void *x) | |
126 | { | |
127 | return (void *)x - ((x - slab_page) % s->size); | |
128 | } | |
129 | ||
75c66def AR |
130 | void object_err(struct kmem_cache *s, struct page *page, |
131 | u8 *object, char *reason); | |
132 | ||
7ed2f9e6 AP |
133 | static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, |
134 | void *x) { | |
135 | void *object = x - (x - page_address(page)) % cache->size; | |
136 | void *last_object = page_address(page) + | |
137 | (page->objects - 1) * cache->size; | |
138 | if (unlikely(object > last_object)) | |
139 | return last_object; | |
140 | else | |
141 | return object; | |
142 | } | |
143 | ||
81819f0f | 144 | #endif /* _LINUX_SLUB_DEF_H */ |