SLUB: Fix build breakage in linux/mm_types.h
[deliverable/linux.git] / include / linux / mm_types.h
CommitLineData
5b99cd0e
HC
1#ifndef _LINUX_MM_TYPES_H
2#define _LINUX_MM_TYPES_H
3
4f9a58d7 4#include <linux/auxvec.h>
5b99cd0e
HC
5#include <linux/types.h>
6#include <linux/threads.h>
7#include <linux/list.h>
8#include <linux/spinlock.h>
c92ff1bd
MS
9#include <linux/prio_tree.h>
10#include <linux/rbtree.h>
11#include <linux/rwsem.h>
12#include <linux/completion.h>
cddb8a5c 13#include <linux/cpumask.h>
6a11f75b 14#include <linux/page-debug-flags.h>
c92ff1bd
MS
15#include <asm/page.h>
16#include <asm/mmu.h>
5b99cd0e 17
4f9a58d7
OH
18#ifndef AT_VECTOR_SIZE_ARCH
19#define AT_VECTOR_SIZE_ARCH 0
20#endif
21#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
22
5b99cd0e
HC
23struct address_space;
24
f7d0b926
JF
25#define USE_SPLIT_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
26
5b99cd0e
HC
27/*
28 * Each physical page in the system has a struct page associated with
29 * it to keep track of whatever it is we are using the page for at the
30 * moment. Note that we have no way to track which tasks are using
31 * a page, though if it is a pagecache page, rmap structures can tell us
32 * who is mapping it.
fc9bb8c7
CL
33 *
34 * The objects in struct page are organized in double word blocks in
35 * order to allows us to use atomic double word operations on portions
36 * of struct page. That is currently only used by slub but the arrangement
37 * allows the use of atomic double word operations on the flags/mapping
38 * and lru list pointers also.
5b99cd0e
HC
39 */
40struct page {
fc9bb8c7 41 /* First double word block */
5b99cd0e
HC
42 unsigned long flags; /* Atomic flags, some possibly
43 * updated asynchronously */
fc9bb8c7
CL
44 struct address_space *mapping; /* If low bit clear, points to
45 * inode address_space, or NULL.
46 * If page mapped as anonymous
47 * memory, low bit is set, and
48 * it points to anon_vma object:
49 * see PAGE_MAPPING_ANON below.
50 */
51 /* Second double word */
ea6bd8ee
CL
52 struct {
53 union {
fc9bb8c7 54 pgoff_t index; /* Our offset within mapping. */
ea6bd8ee
CL
55 void *freelist; /* slub first free object */
56 };
57
58 union {
fc9bb8c7
CL
59 atomic_t _mapcount; /* Count of ptes mapped in mms,
60 * to show when page is mapped
61 * & limit reverse map searches.
62 */
fc9bb8c7 63
ea6bd8ee
CL
64 /* Used for cmpxchg_double in slub */
65 unsigned long counters;
66 struct {
fc9bb8c7
CL
67 unsigned inuse:16;
68 unsigned objects:15;
69 unsigned frozen:1;
fc9bb8c7 70 };
39b26464 71 };
ea6bd8ee 72 atomic_t _count; /* Usage count, see below. */
81819f0f 73 };
fc9bb8c7
CL
74
75 /* Third double word block */
76 struct list_head lru; /* Pageout list, eg. active_list
77 * protected by zone->lru_lock !
78 */
79
80 /* Remainder is not double word aligned */
5b99cd0e 81 union {
5b99cd0e
HC
82 unsigned long private; /* Mapping-private opaque data:
83 * usually used for buffer_heads
84 * if PagePrivate set; used for
85 * swp_entry_t if PageSwapCache;
86 * indicates order in the buddy
87 * system if PG_buddy is set.
88 */
f7d0b926 89#if USE_SPLIT_PTLOCKS
fc9bb8c7 90 spinlock_t ptl;
5b99cd0e 91#endif
fc9bb8c7
CL
92 struct kmem_cache *slab; /* SLUB: Pointer to slab */
93 struct page *first_page; /* Compound tail pages */
81819f0f 94 };
fc9bb8c7 95
5b99cd0e
HC
96 /*
97 * On machines where all RAM is mapped into kernel address space,
98 * we can simply calculate the virtual address. On machines with
99 * highmem some memory is mapped into kernel virtual memory
100 * dynamically, so we need a place to store that address.
101 * Note that this field could be 16 bits on x86 ... ;)
102 *
103 * Architectures with slow multiplication can define
104 * WANT_PAGE_VIRTUAL in asm/page.h
105 */
106#if defined(WANT_PAGE_VIRTUAL)
107 void *virtual; /* Kernel virtual address (NULL if
108 not kmapped, ie. highmem) */
109#endif /* WANT_PAGE_VIRTUAL */
ee3b4290
AM
110#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
111 unsigned long debug_flags; /* Use atomic bitops on this */
112#endif
dfec072e
VN
113
114#ifdef CONFIG_KMEMCHECK
115 /*
116 * kmemcheck wants to track the status of each byte in a page; this
117 * is a pointer to such a status block. NULL if not tracked.
118 */
119 void *shadow;
120#endif
fc9bb8c7
CL
121}
122/*
123 * If another subsystem starts using the double word pairing for atomic
124 * operations on struct page then it must change the #if to ensure
125 * proper alignment of the page struct.
126 */
127#if defined(CONFIG_SLUB) && defined(CONFIG_CMPXCHG_LOCAL)
128 __attribute__((__aligned__(2*sizeof(unsigned long))))
129#endif
130;
5b99cd0e 131
ca16d140
KM
132typedef unsigned long __nocast vm_flags_t;
133
8feae131
DH
134/*
135 * A region containing a mapping of a non-memory backed file under NOMMU
136 * conditions. These are held in a global tree and are pinned by the VMAs that
137 * map parts of them.
138 */
139struct vm_region {
140 struct rb_node vm_rb; /* link in global region tree */
ca16d140 141 vm_flags_t vm_flags; /* VMA vm_flags */
8feae131
DH
142 unsigned long vm_start; /* start address of region */
143 unsigned long vm_end; /* region initialised to here */
dd8632a1 144 unsigned long vm_top; /* region allocated to here */
8feae131
DH
145 unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */
146 struct file *vm_file; /* the backing file or NULL */
147
1e2ae599 148 int vm_usage; /* region usage count (access under nommu_region_sem) */
cfe79c00
MF
149 bool vm_icache_flushed : 1; /* true if the icache has been flushed for
150 * this region */
8feae131
DH
151};
152
c92ff1bd
MS
153/*
154 * This struct defines a memory VMM memory area. There is one of these
155 * per VM-area/task. A VM area is any part of the process virtual memory
156 * space that has a special rule for the page-fault handlers (ie a shared
157 * library, the executable area etc).
158 */
159struct vm_area_struct {
160 struct mm_struct * vm_mm; /* The address space we belong to. */
161 unsigned long vm_start; /* Our start address within vm_mm. */
162 unsigned long vm_end; /* The first byte after our end address
163 within vm_mm. */
164
165 /* linked list of VM areas per task, sorted by address */
297c5eee 166 struct vm_area_struct *vm_next, *vm_prev;
c92ff1bd
MS
167
168 pgprot_t vm_page_prot; /* Access permissions of this VMA. */
605d9288 169 unsigned long vm_flags; /* Flags, see mm.h. */
c92ff1bd
MS
170
171 struct rb_node vm_rb;
172
173 /*
174 * For areas with an address space and backing store,
175 * linkage into the address_space->i_mmap prio tree, or
176 * linkage to the list of like vmas hanging off its node, or
177 * linkage of vma in the address_space->i_mmap_nonlinear list.
178 */
179 union {
180 struct {
181 struct list_head list;
182 void *parent; /* aligns with prio_tree_node parent */
183 struct vm_area_struct *head;
184 } vm_set;
185
186 struct raw_prio_tree_node prio_tree_node;
187 } shared;
188
189 /*
190 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
191 * list, after a COW of one of the file pages. A MAP_SHARED vma
192 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
193 * or brk vma (with NULL file) can only be in an anon_vma list.
194 */
5beb4930
RR
195 struct list_head anon_vma_chain; /* Serialized by mmap_sem &
196 * page_table_lock */
c92ff1bd
MS
197 struct anon_vma *anon_vma; /* Serialized by page_table_lock */
198
199 /* Function pointers to deal with this struct. */
f0f37e2f 200 const struct vm_operations_struct *vm_ops;
c92ff1bd
MS
201
202 /* Information about our backing store: */
203 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
204 units, *not* PAGE_CACHE_SIZE */
205 struct file * vm_file; /* File we map to (can be NULL). */
206 void * vm_private_data; /* was vm_pte (shared mem) */
c92ff1bd
MS
207
208#ifndef CONFIG_MMU
8feae131 209 struct vm_region *vm_region; /* NOMMU mapping region */
c92ff1bd
MS
210#endif
211#ifdef CONFIG_NUMA
212 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
213#endif
214};
215
b564daf8
ON
216struct core_thread {
217 struct task_struct *task;
218 struct core_thread *next;
219};
220
32ecb1f2 221struct core_state {
c5f1cc8c 222 atomic_t nr_threads;
b564daf8 223 struct core_thread dumper;
32ecb1f2
ON
224 struct completion startup;
225};
226
d559db08
KH
227enum {
228 MM_FILEPAGES,
229 MM_ANONPAGES,
b084d435 230 MM_SWAPENTS,
d559db08
KH
231 NR_MM_COUNTERS
232};
233
53bddb4e 234#if USE_SPLIT_PTLOCKS && defined(CONFIG_MMU)
34e55232 235#define SPLIT_RSS_COUNTING
34e55232
KH
236/* per-thread cached information, */
237struct task_rss_stat {
238 int events; /* for synchronization threshold */
239 int count[NR_MM_COUNTERS];
240};
172703b0
MF
241#endif /* USE_SPLIT_PTLOCKS */
242
d559db08 243struct mm_rss_stat {
172703b0 244 atomic_long_t count[NR_MM_COUNTERS];
d559db08 245};
d559db08 246
c92ff1bd
MS
247struct mm_struct {
248 struct vm_area_struct * mmap; /* list of VMAs */
249 struct rb_root mm_rb;
250 struct vm_area_struct * mmap_cache; /* last find_vma result */
efc1a3b1 251#ifdef CONFIG_MMU
c92ff1bd
MS
252 unsigned long (*get_unmapped_area) (struct file *filp,
253 unsigned long addr, unsigned long len,
254 unsigned long pgoff, unsigned long flags);
255 void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
efc1a3b1 256#endif
c92ff1bd
MS
257 unsigned long mmap_base; /* base of mmap area */
258 unsigned long task_size; /* size of task vm space */
259 unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
260 unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */
261 pgd_t * pgd;
262 atomic_t mm_users; /* How many users with user space? */
263 atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */
264 int map_count; /* number of VMAs */
481b4bb5 265
c92ff1bd 266 spinlock_t page_table_lock; /* Protects page tables and some counters */
481b4bb5 267 struct rw_semaphore mmap_sem;
c92ff1bd
MS
268
269 struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung
270 * together off init_mm.mmlist, and are protected
271 * by mmlist_lock
272 */
273
c92ff1bd
MS
274
275 unsigned long hiwater_rss; /* High-watermark of RSS usage */
276 unsigned long hiwater_vm; /* High-water virtual memory usage */
277
278 unsigned long total_vm, locked_vm, shared_vm, exec_vm;
279 unsigned long stack_vm, reserved_vm, def_flags, nr_ptes;
280 unsigned long start_code, end_code, start_data, end_data;
281 unsigned long start_brk, brk, start_stack;
282 unsigned long arg_start, arg_end, env_start, env_end;
283
284 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
285
d559db08
KH
286 /*
287 * Special counters, in some configurations protected by the
288 * page_table_lock, in other configurations by being atomic.
289 */
290 struct mm_rss_stat rss_stat;
291
801460d0
HS
292 struct linux_binfmt *binfmt;
293
6345d24d
LT
294 cpumask_var_t cpu_vm_mask_var;
295
c92ff1bd
MS
296 /* Architecture-specific MM context */
297 mm_context_t context;
298
299 /* Swap token stuff */
300 /*
301 * Last value of global fault stamp as seen by this process.
302 * In other words, this value gives an indication of how long
303 * it has been since this task got the token.
304 * Look at mm/thrash.c
305 */
306 unsigned int faultstamp;
307 unsigned int token_priority;
308 unsigned int last_interval;
309
481b4bb5
RK
310 /* How many tasks sharing this mm are OOM_DISABLE */
311 atomic_t oom_disable_count;
312
c92ff1bd
MS
313 unsigned long flags; /* Must use atomic bitops to access the bits */
314
a94e2d40 315 struct core_state *core_state; /* coredumping support */
858f0993 316#ifdef CONFIG_AIO
abf137dd
JA
317 spinlock_t ioctx_lock;
318 struct hlist_head ioctx_list;
858f0993 319#endif
cf475ad2 320#ifdef CONFIG_MM_OWNER
4cd1a8fc
KM
321 /*
322 * "owner" points to a task that is regarded as the canonical
323 * user/owner of this mm. All of the following must be true in
324 * order for it to be changed:
325 *
326 * current == mm->owner
327 * current->mm != mm
328 * new_owner->mm == mm
329 * new_owner->alloc_lock is held
330 */
4d2deb40 331 struct task_struct __rcu *owner;
78fb7466 332#endif
925d1c40 333
925d1c40
MH
334 /* store ref to file /proc/<pid>/exe symlink points to */
335 struct file *exe_file;
336 unsigned long num_exe_file_vmas;
cddb8a5c
AA
337#ifdef CONFIG_MMU_NOTIFIER
338 struct mmu_notifier_mm *mmu_notifier_mm;
e7a00c45
AA
339#endif
340#ifdef CONFIG_TRANSPARENT_HUGEPAGE
341 pgtable_t pmd_huge_pte; /* protected by page_table_lock */
cddb8a5c 342#endif
6345d24d
LT
343#ifdef CONFIG_CPUMASK_OFFSTACK
344 struct cpumask cpumask_allocation;
345#endif
c92ff1bd
MS
346};
347
6345d24d
LT
348static inline void mm_init_cpumask(struct mm_struct *mm)
349{
350#ifdef CONFIG_CPUMASK_OFFSTACK
351 mm->cpu_vm_mask_var = &mm->cpumask_allocation;
352#endif
353}
354
45e575ab 355/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
de03c72c
KM
356static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
357{
358 return mm->cpu_vm_mask_var;
359}
45e575ab 360
5b99cd0e 361#endif /* _LINUX_MM_TYPES_H */
This page took 0.633278 seconds and 5 git commands to generate.