Merge branch 'sched/urgent' into sched/clock
[deliverable/linux.git] / include / linux / rmap.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_RMAP_H
2#define _LINUX_RMAP_H
3/*
4 * Declarations for Reverse Mapping functions in mm/rmap.c
5 */
6
1da177e4
LT
7#include <linux/list.h>
8#include <linux/slab.h>
9#include <linux/mm.h>
10#include <linux/spinlock.h>
bed7161a 11#include <linux/memcontrol.h>
1da177e4
LT
12
13/*
14 * The anon_vma heads a list of private "related" vmas, to scan if
15 * an anonymous page pointing to this anon_vma needs to be unmapped:
16 * the vmas on the list will be related by forking, or by splitting.
17 *
18 * Since vmas come and go as they are split and merged (particularly
19 * in mprotect), the mapping field of an anonymous page cannot point
20 * directly to a vma: instead it points to an anon_vma, on whose list
21 * the related vmas can be easily linked or unlinked.
22 *
23 * After unlinking the last vma on the list, we must garbage collect
24 * the anon_vma object itself: we're guaranteed no page can be
25 * pointing to this anon_vma once its vma list is empty.
26 */
27struct anon_vma {
28 spinlock_t lock; /* Serialize access to vma list */
7906d00c
AA
29 /*
30 * NOTE: the LSB of the head.next is set by
31 * mm_take_all_locks() _after_ taking the above lock. So the
32 * head must only be read/written after taking the above lock
33 * to be sure to see a valid next pointer. The LSB bit itself
34 * is serialized by a system wide lock only visible to
35 * mm_take_all_locks() (mm_all_locks_mutex).
36 */
1da177e4
LT
37 struct list_head head; /* List of private "related" vmas */
38};
39
40#ifdef CONFIG_MMU
41
e18b890b 42extern struct kmem_cache *anon_vma_cachep;
1da177e4
LT
43
44static inline struct anon_vma *anon_vma_alloc(void)
45{
e94b1766 46 return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
1da177e4
LT
47}
48
49static inline void anon_vma_free(struct anon_vma *anon_vma)
50{
51 kmem_cache_free(anon_vma_cachep, anon_vma);
52}
53
54static inline void anon_vma_lock(struct vm_area_struct *vma)
55{
56 struct anon_vma *anon_vma = vma->anon_vma;
57 if (anon_vma)
58 spin_lock(&anon_vma->lock);
59}
60
61static inline void anon_vma_unlock(struct vm_area_struct *vma)
62{
63 struct anon_vma *anon_vma = vma->anon_vma;
64 if (anon_vma)
65 spin_unlock(&anon_vma->lock);
66}
67
68/*
69 * anon_vma helper functions.
70 */
71void anon_vma_init(void); /* create anon_vma_cachep */
72int anon_vma_prepare(struct vm_area_struct *);
73void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *);
74void anon_vma_unlink(struct vm_area_struct *);
75void anon_vma_link(struct vm_area_struct *);
76void __anon_vma_link(struct vm_area_struct *);
77
78/*
79 * rmap interfaces called when adding or removing pte of page
80 */
81void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
9617d95e 82void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
1da177e4 83void page_add_file_rmap(struct page *);
7de6b805 84void page_remove_rmap(struct page *, struct vm_area_struct *);
1da177e4 85
c97a9e10
NP
86#ifdef CONFIG_DEBUG_VM
87void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address);
88#else
89static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
1da177e4
LT
90{
91 atomic_inc(&page->_mapcount);
92}
c97a9e10 93#endif
1da177e4
LT
94
95/*
96 * Called from mm/vmscan.c to handle paging out
97 */
bed7161a 98int page_referenced(struct page *, int is_locked, struct mem_cgroup *cnt);
a48d07af 99int try_to_unmap(struct page *, int ignore_refs);
1da177e4 100
ceffc078
CO
101/*
102 * Called from mm/filemap_xip.c to unmap empty zero page
103 */
c0718806
HD
104pte_t *page_check_address(struct page *, struct mm_struct *,
105 unsigned long, spinlock_t **);
ceffc078 106
1da177e4
LT
107/*
108 * Used by swapoff to help locate where page is expected in vma.
109 */
110unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
111
d08b3851
PZ
112/*
113 * Cleans the PTEs of shared mappings.
114 * (and since clean PTEs should also be readonly, write protects them too)
115 *
116 * returns the number of cleaned PTEs.
117 */
118int page_mkclean(struct page *);
119
1da177e4
LT
120#else /* !CONFIG_MMU */
121
122#define anon_vma_init() do {} while (0)
123#define anon_vma_prepare(vma) (0)
124#define anon_vma_link(vma) do {} while (0)
125
bed7161a 126#define page_referenced(page,l,cnt) TestClearPageReferenced(page)
a48d07af 127#define try_to_unmap(page, refs) SWAP_FAIL
1da177e4 128
d08b3851
PZ
129static inline int page_mkclean(struct page *page)
130{
131 return 0;
132}
133
134
1da177e4
LT
135#endif /* CONFIG_MMU */
136
137/*
138 * Return values of try_to_unmap
139 */
140#define SWAP_SUCCESS 0
141#define SWAP_AGAIN 1
142#define SWAP_FAIL 2
143
144#endif /* _LINUX_RMAP_H */
This page took 0.590807 seconds and 5 git commands to generate.