Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Macros for manipulating and testing page->flags | |
3 | */ | |
4 | ||
5 | #ifndef PAGE_FLAGS_H | |
6 | #define PAGE_FLAGS_H | |
7 | ||
f886ed44 | 8 | #include <linux/types.h> |
9223b419 | 9 | #ifndef __GENERATING_BOUNDS_H |
6d777953 | 10 | #include <linux/mm_types.h> |
9223b419 CL |
11 | #include <linux/bounds.h> |
12 | #endif /* !__GENERATING_BOUNDS_H */ | |
f886ed44 | 13 | |
1da177e4 LT |
14 | /* |
15 | * Various page->flags bits: | |
16 | * | |
17 | * PG_reserved is set for special pages, which can never be swapped out. Some | |
18 | * of them might not even exist (eg empty_bad_page)... | |
19 | * | |
da6052f7 NP |
20 | * The PG_private bitflag is set on pagecache pages if they contain filesystem |
21 | * specific data (which is normally at page->private). It can be used by | |
22 | * private allocations for its own usage. | |
1da177e4 | 23 | * |
da6052f7 NP |
24 | * During initiation of disk I/O, PG_locked is set. This bit is set before I/O |
25 | * and cleared when writeback _starts_ or when read _completes_. PG_writeback | |
26 | * is set before writeback starts and cleared when it finishes. | |
27 | * | |
28 | * PG_locked also pins a page in pagecache, and blocks truncation of the file | |
29 | * while it is held. | |
30 | * | |
31 | * page_waitqueue(page) is a wait queue of all tasks waiting for the page | |
32 | * to become unlocked. | |
1da177e4 LT |
33 | * |
34 | * PG_uptodate tells whether the page's contents is valid. When a read | |
35 | * completes, the page becomes uptodate, unless a disk I/O error happened. | |
36 | * | |
da6052f7 NP |
37 | * PG_referenced, PG_reclaim are used for page reclaim for anonymous and |
38 | * file-backed pagecache (see mm/vmscan.c). | |
1da177e4 LT |
39 | * |
40 | * PG_error is set to indicate that an I/O error occurred on this page. | |
41 | * | |
42 | * PG_arch_1 is an architecture specific page state bit. The generic code | |
43 | * guarantees that this bit is cleared for a page when it first is entered into | |
44 | * the page cache. | |
45 | * | |
46 | * PG_highmem pages are not permanently mapped into the kernel virtual address | |
47 | * space, they need to be kmapped separately for doing IO on the pages. The | |
48 | * struct page (these bits with information) are always mapped into kernel | |
49 | * address space... | |
da6052f7 NP |
50 | * |
51 | * PG_buddy is set to indicate that the page is free and in the buddy system | |
52 | * (see mm/page_alloc.c). | |
53 | * | |
1da177e4 LT |
54 | */ |
55 | ||
56 | /* | |
57 | * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break | |
91fc8ab3 AW |
58 | * locked- and dirty-page accounting. |
59 | * | |
60 | * The page flags field is split into two parts, the main flags area | |
61 | * which extends from the low bits upwards, and the fields area which | |
62 | * extends from the high bits downwards. | |
63 | * | |
64 | * | FIELD | ... | FLAGS | | |
9223b419 CL |
65 | * N-1 ^ 0 |
66 | * (NR_PAGEFLAGS) | |
91fc8ab3 | 67 | * |
9223b419 CL |
68 | * The fields area is reserved for fields mapping zone, node (for NUMA) and |
69 | * SPARSEMEM section (for variants of SPARSEMEM that require section ids like | |
70 | * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP). | |
1da177e4 | 71 | */ |
e2683181 CL |
72 | enum pageflags { |
73 | PG_locked, /* Page is locked. Don't touch. */ | |
74 | PG_error, | |
75 | PG_referenced, | |
76 | PG_uptodate, | |
77 | PG_dirty, | |
78 | PG_lru, | |
79 | PG_active, | |
80 | PG_slab, | |
81 | PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ | |
e2683181 CL |
82 | PG_arch_1, |
83 | PG_reserved, | |
84 | PG_private, /* If pagecache, has fs-private data */ | |
85 | PG_writeback, /* Page is under writeback */ | |
e20b8cca CL |
86 | #ifdef CONFIG_PAGEFLAGS_EXTENDED |
87 | PG_head, /* A head page */ | |
88 | PG_tail, /* A tail page */ | |
89 | #else | |
e2683181 | 90 | PG_compound, /* A compound page */ |
e20b8cca | 91 | #endif |
e2683181 CL |
92 | PG_swapcache, /* Swap page: swp_entry_t in private */ |
93 | PG_mappedtodisk, /* Has blocks allocated on-disk */ | |
94 | PG_reclaim, /* To be reclaimed asap */ | |
e2683181 | 95 | PG_buddy, /* Page is free, on buddy lists */ |
602c4d11 CL |
96 | #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR |
97 | PG_uncached, /* Page has been mapped as uncached */ | |
f886ed44 | 98 | #endif |
9223b419 | 99 | __NR_PAGEFLAGS |
e2683181 | 100 | }; |
1da177e4 | 101 | |
9223b419 CL |
102 | #ifndef __GENERATING_BOUNDS_H |
103 | ||
f94a62e9 CL |
104 | /* |
105 | * Macros to create function definitions for page flags | |
106 | */ | |
107 | #define TESTPAGEFLAG(uname, lname) \ | |
108 | static inline int Page##uname(struct page *page) \ | |
109 | { return test_bit(PG_##lname, &page->flags); } | |
110 | ||
111 | #define SETPAGEFLAG(uname, lname) \ | |
112 | static inline void SetPage##uname(struct page *page) \ | |
113 | { set_bit(PG_##lname, &page->flags); } | |
114 | ||
115 | #define CLEARPAGEFLAG(uname, lname) \ | |
116 | static inline void ClearPage##uname(struct page *page) \ | |
117 | { clear_bit(PG_##lname, &page->flags); } | |
118 | ||
119 | #define __SETPAGEFLAG(uname, lname) \ | |
120 | static inline void __SetPage##uname(struct page *page) \ | |
121 | { __set_bit(PG_##lname, &page->flags); } | |
122 | ||
123 | #define __CLEARPAGEFLAG(uname, lname) \ | |
124 | static inline void __ClearPage##uname(struct page *page) \ | |
125 | { __clear_bit(PG_##lname, &page->flags); } | |
126 | ||
127 | #define TESTSETFLAG(uname, lname) \ | |
128 | static inline int TestSetPage##uname(struct page *page) \ | |
129 | { return test_and_set_bit(PG_##lname, &page->flags); } | |
130 | ||
131 | #define TESTCLEARFLAG(uname, lname) \ | |
132 | static inline int TestClearPage##uname(struct page *page) \ | |
133 | { return test_and_clear_bit(PG_##lname, &page->flags); } | |
134 | ||
135 | ||
136 | #define PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \ | |
137 | SETPAGEFLAG(uname, lname) CLEARPAGEFLAG(uname, lname) | |
138 | ||
139 | #define __PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \ | |
140 | __SETPAGEFLAG(uname, lname) __CLEARPAGEFLAG(uname, lname) | |
141 | ||
ec7cade8 CL |
142 | #define PAGEFLAG_FALSE(uname) \ |
143 | static inline int Page##uname(struct page *page) \ | |
144 | { return 0; } | |
145 | ||
f94a62e9 CL |
146 | #define TESTSCFLAG(uname, lname) \ |
147 | TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname) | |
148 | ||
6a1e7f77 CL |
149 | struct page; /* forward declaration */ |
150 | ||
151 | PAGEFLAG(Locked, locked) TESTSCFLAG(Locked, locked) | |
152 | PAGEFLAG(Error, error) | |
153 | PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) | |
154 | PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) | |
155 | PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru) | |
156 | PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active) | |
157 | __PAGEFLAG(Slab, slab) | |
0a128b2b CL |
158 | PAGEFLAG(Checked, owner_priv_1) /* Used by some filesystems */ |
159 | PAGEFLAG(Pinned, owner_priv_1) TESTSCFLAG(Pinned, owner_priv_1) /* Xen */ | |
6a1e7f77 CL |
160 | PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) |
161 | PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private) | |
162 | __SETPAGEFLAG(Private, private) | |
163 | ||
164 | /* | |
165 | * Only test-and-set exist for PG_writeback. The unconditional operators are | |
166 | * risky: they bypass page accounting. | |
167 | */ | |
168 | TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback) | |
169 | __PAGEFLAG(Buddy, buddy) | |
170 | PAGEFLAG(MappedToDisk, mappedtodisk) | |
171 | ||
172 | /* PG_readahead is only used for file reads; PG_reclaim is only for writes */ | |
173 | PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim) | |
0a128b2b | 174 | PAGEFLAG(Readahead, reclaim) /* Reminder to do async read-ahead */ |
6a1e7f77 CL |
175 | |
176 | #ifdef CONFIG_HIGHMEM | |
1da177e4 | 177 | /* |
6a1e7f77 CL |
178 | * Must use a macro here due to header dependency issues. page_zone() is not |
179 | * available at this point. | |
1da177e4 | 180 | */ |
0a128b2b | 181 | #define PageHighMem(__p) is_highmem(page_zone(__p)) |
6a1e7f77 | 182 | #else |
ec7cade8 | 183 | PAGEFLAG_FALSE(HighMem) |
6a1e7f77 CL |
184 | #endif |
185 | ||
186 | #ifdef CONFIG_SWAP | |
187 | PAGEFLAG(SwapCache, swapcache) | |
188 | #else | |
ec7cade8 | 189 | PAGEFLAG_FALSE(SwapCache) |
6a1e7f77 CL |
190 | #endif |
191 | ||
602c4d11 | 192 | #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR |
6a1e7f77 | 193 | PAGEFLAG(Uncached, uncached) |
602c4d11 | 194 | #else |
ec7cade8 | 195 | PAGEFLAG_FALSE(Uncached) |
6a1e7f77 | 196 | #endif |
1da177e4 | 197 | |
0ed361de NP |
198 | static inline int PageUptodate(struct page *page) |
199 | { | |
200 | int ret = test_bit(PG_uptodate, &(page)->flags); | |
201 | ||
202 | /* | |
203 | * Must ensure that the data we read out of the page is loaded | |
204 | * _after_ we've loaded page->flags to check for PageUptodate. | |
205 | * We can skip the barrier if the page is not uptodate, because | |
206 | * we wouldn't be reading anything from it. | |
207 | * | |
208 | * See SetPageUptodate() for the other side of the story. | |
209 | */ | |
210 | if (ret) | |
211 | smp_rmb(); | |
212 | ||
213 | return ret; | |
214 | } | |
215 | ||
216 | static inline void __SetPageUptodate(struct page *page) | |
217 | { | |
218 | smp_wmb(); | |
219 | __set_bit(PG_uptodate, &(page)->flags); | |
f6ac2354 | 220 | #ifdef CONFIG_S390 |
0ed361de NP |
221 | page_clear_dirty(page); |
222 | #endif | |
223 | } | |
224 | ||
2dcea57a HC |
225 | static inline void SetPageUptodate(struct page *page) |
226 | { | |
0ed361de | 227 | #ifdef CONFIG_S390 |
2dcea57a | 228 | if (!test_and_set_bit(PG_uptodate, &page->flags)) |
6c210482 | 229 | page_clear_dirty(page); |
f6ac2354 | 230 | #else |
0ed361de NP |
231 | /* |
232 | * Memory barrier must be issued before setting the PG_uptodate bit, | |
233 | * so that all previous stores issued in order to bring the page | |
234 | * uptodate are actually visible before PageUptodate becomes true. | |
235 | * | |
236 | * s390 doesn't need an explicit smp_wmb here because the test and | |
237 | * set bit already provides full barriers. | |
238 | */ | |
239 | smp_wmb(); | |
240 | set_bit(PG_uptodate, &(page)->flags); | |
1da177e4 | 241 | #endif |
0ed361de NP |
242 | } |
243 | ||
6a1e7f77 | 244 | CLEARPAGEFLAG(Uptodate, uptodate) |
1da177e4 | 245 | |
6a1e7f77 | 246 | extern void cancel_dirty_page(struct page *page, unsigned int account_size); |
d77c2d7c | 247 | |
6a1e7f77 CL |
248 | int test_clear_page_writeback(struct page *page); |
249 | int test_set_page_writeback(struct page *page); | |
1da177e4 | 250 | |
6a1e7f77 CL |
251 | static inline void set_page_writeback(struct page *page) |
252 | { | |
253 | test_set_page_writeback(page); | |
254 | } | |
1da177e4 | 255 | |
e20b8cca CL |
256 | #ifdef CONFIG_PAGEFLAGS_EXTENDED |
257 | /* | |
258 | * System with lots of page flags available. This allows separate | |
259 | * flags for PageHead() and PageTail() checks of compound pages so that bit | |
260 | * tests can be used in performance sensitive paths. PageCompound is | |
261 | * generally not used in hot code paths. | |
262 | */ | |
263 | __PAGEFLAG(Head, head) | |
264 | __PAGEFLAG(Tail, tail) | |
265 | ||
266 | static inline int PageCompound(struct page *page) | |
267 | { | |
268 | return page->flags & ((1L << PG_head) | (1L << PG_tail)); | |
269 | ||
270 | } | |
271 | #else | |
272 | /* | |
273 | * Reduce page flag use as much as possible by overlapping | |
274 | * compound page flags with the flags used for page cache pages. Possible | |
275 | * because PageCompound is always set for compound pages and not for | |
276 | * pages on the LRU and/or pagecache. | |
277 | */ | |
6a1e7f77 CL |
278 | TESTPAGEFLAG(Compound, compound) |
279 | __PAGEFLAG(Head, compound) | |
1da177e4 | 280 | |
d85f3385 | 281 | /* |
6d777953 | 282 | * PG_reclaim is used in combination with PG_compound to mark the |
6a1e7f77 CL |
283 | * head and tail of a compound page. This saves one page flag |
284 | * but makes it impossible to use compound pages for the page cache. | |
285 | * The PG_reclaim bit would have to be used for reclaim or readahead | |
286 | * if compound pages enter the page cache. | |
6d777953 CL |
287 | * |
288 | * PG_compound & PG_reclaim => Tail page | |
289 | * PG_compound & ~PG_reclaim => Head page | |
d85f3385 | 290 | */ |
6d777953 CL |
291 | #define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim)) |
292 | ||
6a1e7f77 CL |
293 | static inline int PageTail(struct page *page) |
294 | { | |
295 | return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask); | |
296 | } | |
6d777953 CL |
297 | |
298 | static inline void __SetPageTail(struct page *page) | |
299 | { | |
300 | page->flags |= PG_head_tail_mask; | |
301 | } | |
302 | ||
303 | static inline void __ClearPageTail(struct page *page) | |
304 | { | |
305 | page->flags &= ~PG_head_tail_mask; | |
306 | } | |
307 | ||
e20b8cca | 308 | #endif /* !PAGEFLAGS_EXTENDED */ |
9223b419 | 309 | #endif /* !__GENERATING_BOUNDS_H */ |
1da177e4 | 310 | #endif /* PAGE_FLAGS_H */ |