Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_SWAP_H |
2 | #define _LINUX_SWAP_H | |
3 | ||
1da177e4 LT |
4 | #include <linux/spinlock.h> |
5 | #include <linux/linkage.h> | |
6 | #include <linux/mmzone.h> | |
7 | #include <linux/list.h> | |
66e1707b | 8 | #include <linux/memcontrol.h> |
1da177e4 | 9 | #include <linux/sched.h> |
af936a16 | 10 | #include <linux/node.h> |
542d1c88 | 11 | |
1da177e4 LT |
12 | #include <asm/atomic.h> |
13 | #include <asm/page.h> | |
14 | ||
8bc719d3 MS |
15 | struct notifier_block; |
16 | ||
ab954160 AM |
17 | struct bio; |
18 | ||
1da177e4 LT |
19 | #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ |
20 | #define SWAP_FLAG_PRIO_MASK 0x7fff | |
21 | #define SWAP_FLAG_PRIO_SHIFT 0 | |
22 | ||
23 | static inline int current_is_kswapd(void) | |
24 | { | |
25 | return current->flags & PF_KSWAPD; | |
26 | } | |
27 | ||
28 | /* | |
29 | * MAX_SWAPFILES defines the maximum number of swaptypes: things which can | |
30 | * be swapped to. The swap type and the offset into that swap type are | |
31 | * encoded into pte's and into pgoff_t's in the swapcache. Using five bits | |
32 | * for the type means that the maximum number of swapcache pages is 27 bits | |
33 | * on 32-bit-pgoff_t architectures. And that assumes that the architecture packs | |
34 | * the type/offset into the pte as 5/27 as well. | |
35 | */ | |
36 | #define MAX_SWAPFILES_SHIFT 5 | |
0697212a | 37 | #ifndef CONFIG_MIGRATION |
1da177e4 | 38 | #define MAX_SWAPFILES (1 << MAX_SWAPFILES_SHIFT) |
0697212a CL |
39 | #else |
40 | /* Use last two entries for page migration swap entries */ | |
41 | #define MAX_SWAPFILES ((1 << MAX_SWAPFILES_SHIFT)-2) | |
42 | #define SWP_MIGRATION_READ MAX_SWAPFILES | |
43 | #define SWP_MIGRATION_WRITE (MAX_SWAPFILES + 1) | |
44 | #endif | |
1da177e4 LT |
45 | |
46 | /* | |
47 | * Magic header for a swap area. The first part of the union is | |
48 | * what the swap magic looks like for the old (limited to 128MB) | |
49 | * swap area format, the second part of the union adds - in the | |
50 | * old reserved area - some extra information. Note that the first | |
51 | * kilobyte is reserved for boot loader or disk label stuff... | |
52 | * | |
53 | * Having the magic at the end of the PAGE_SIZE makes detecting swap | |
54 | * areas somewhat tricky on machines that support multiple page sizes. | |
55 | * For 2.5 we'll probably want to move the magic to just beyond the | |
56 | * bootbits... | |
57 | */ | |
58 | union swap_header { | |
59 | struct { | |
60 | char reserved[PAGE_SIZE - 10]; | |
61 | char magic[10]; /* SWAP-SPACE or SWAPSPACE2 */ | |
62 | } magic; | |
63 | struct { | |
e8f03d02 AD |
64 | char bootbits[1024]; /* Space for disklabel etc. */ |
65 | __u32 version; | |
66 | __u32 last_page; | |
67 | __u32 nr_badpages; | |
68 | unsigned char sws_uuid[16]; | |
69 | unsigned char sws_volume[16]; | |
70 | __u32 padding[117]; | |
71 | __u32 badpages[1]; | |
1da177e4 LT |
72 | } info; |
73 | }; | |
74 | ||
75 | /* A swap entry has to fit into a "unsigned long", as | |
76 | * the entry is hidden in the "index" field of the | |
77 | * swapper address space. | |
78 | */ | |
79 | typedef struct { | |
80 | unsigned long val; | |
81 | } swp_entry_t; | |
82 | ||
83 | /* | |
84 | * current->reclaim_state points to one of these when a task is running | |
85 | * memory reclaim | |
86 | */ | |
87 | struct reclaim_state { | |
88 | unsigned long reclaimed_slab; | |
89 | }; | |
90 | ||
91 | #ifdef __KERNEL__ | |
92 | ||
93 | struct address_space; | |
94 | struct sysinfo; | |
95 | struct writeback_control; | |
96 | struct zone; | |
97 | ||
98 | /* | |
99 | * A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of | |
100 | * disk blocks. A list of swap extents maps the entire swapfile. (Where the | |
101 | * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart | |
102 | * from setup, they're handled identically. | |
103 | * | |
104 | * We always assume that blocks are of size PAGE_SIZE. | |
105 | */ | |
106 | struct swap_extent { | |
107 | struct list_head list; | |
108 | pgoff_t start_page; | |
109 | pgoff_t nr_pages; | |
110 | sector_t start_block; | |
111 | }; | |
112 | ||
113 | /* | |
114 | * Max bad pages in the new format.. | |
115 | */ | |
116 | #define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x) | |
117 | #define MAX_SWAP_BADPAGES \ | |
118 | ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int)) | |
119 | ||
120 | enum { | |
121 | SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ | |
122 | SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ | |
6a6ba831 | 123 | SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */ |
7992fde7 | 124 | SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ |
20137a49 | 125 | SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ |
52b7efdb HD |
126 | /* add others here before... */ |
127 | SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */ | |
1da177e4 LT |
128 | }; |
129 | ||
130 | #define SWAP_CLUSTER_MAX 32 | |
131 | ||
132 | #define SWAP_MAP_MAX 0x7fff | |
133 | #define SWAP_MAP_BAD 0x8000 | |
134 | ||
135 | /* | |
136 | * The in-memory structure used to track swap areas. | |
1da177e4 LT |
137 | */ |
138 | struct swap_info_struct { | |
ebebbbe9 | 139 | unsigned long flags; |
5d337b91 | 140 | int prio; /* swap priority */ |
ebebbbe9 | 141 | int next; /* next entry on swap list */ |
1da177e4 LT |
142 | struct file *swap_file; |
143 | struct block_device *bdev; | |
144 | struct list_head extent_list; | |
1da177e4 | 145 | struct swap_extent *curr_swap_extent; |
ebebbbe9 | 146 | unsigned short *swap_map; |
1da177e4 LT |
147 | unsigned int lowest_bit; |
148 | unsigned int highest_bit; | |
7992fde7 HD |
149 | unsigned int lowest_alloc; /* while preparing discard cluster */ |
150 | unsigned int highest_alloc; /* while preparing discard cluster */ | |
1da177e4 LT |
151 | unsigned int cluster_next; |
152 | unsigned int cluster_nr; | |
6eb396dc HD |
153 | unsigned int pages; |
154 | unsigned int max; | |
155 | unsigned int inuse_pages; | |
ebebbbe9 | 156 | unsigned int old_block_size; |
1da177e4 LT |
157 | }; |
158 | ||
159 | struct swap_list_t { | |
160 | int head; /* head of priority-ordered swapfile list */ | |
161 | int next; /* swapfile to be used next */ | |
162 | }; | |
163 | ||
164 | /* Swap 50% full? Release swapcache more aggressively.. */ | |
165 | #define vm_swap_full() (nr_swap_pages*2 < total_swap_pages) | |
166 | ||
1da177e4 LT |
167 | /* linux/mm/page_alloc.c */ |
168 | extern unsigned long totalram_pages; | |
cb45b0e9 | 169 | extern unsigned long totalreserve_pages; |
1da177e4 LT |
170 | extern unsigned int nr_free_buffer_pages(void); |
171 | extern unsigned int nr_free_pagecache_pages(void); | |
172 | ||
96177299 CL |
173 | /* Definition of global_page_state not available yet */ |
174 | #define nr_free_pages() global_page_state(NR_FREE_PAGES) | |
175 | ||
176 | ||
1da177e4 | 177 | /* linux/mm/swap.c */ |
f04e9ebb KM |
178 | extern void __lru_cache_add(struct page *, enum lru_list lru); |
179 | extern void lru_cache_add_lru(struct page *, enum lru_list lru); | |
b3c97528 HH |
180 | extern void activate_page(struct page *); |
181 | extern void mark_page_accessed(struct page *); | |
1da177e4 | 182 | extern void lru_add_drain(void); |
053837fc | 183 | extern int lru_add_drain_all(void); |
ac6aadb2 | 184 | extern void rotate_reclaimable_page(struct page *page); |
1da177e4 LT |
185 | extern void swap_setup(void); |
186 | ||
894bc310 LS |
187 | extern void add_page_to_unevictable_list(struct page *page); |
188 | ||
f04e9ebb KM |
189 | /** |
190 | * lru_cache_add: add a page to the page lists | |
191 | * @page: the page to add | |
192 | */ | |
4f98a2fe | 193 | static inline void lru_cache_add_anon(struct page *page) |
f04e9ebb | 194 | { |
4f98a2fe | 195 | __lru_cache_add(page, LRU_INACTIVE_ANON); |
f04e9ebb KM |
196 | } |
197 | ||
4f98a2fe | 198 | static inline void lru_cache_add_active_anon(struct page *page) |
f04e9ebb | 199 | { |
4f98a2fe RR |
200 | __lru_cache_add(page, LRU_ACTIVE_ANON); |
201 | } | |
202 | ||
203 | static inline void lru_cache_add_file(struct page *page) | |
204 | { | |
205 | __lru_cache_add(page, LRU_INACTIVE_FILE); | |
206 | } | |
207 | ||
208 | static inline void lru_cache_add_active_file(struct page *page) | |
209 | { | |
210 | __lru_cache_add(page, LRU_ACTIVE_FILE); | |
f04e9ebb KM |
211 | } |
212 | ||
1da177e4 | 213 | /* linux/mm/vmscan.c */ |
dac1d27b | 214 | extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, |
5ad333eb | 215 | gfp_t gfp_mask); |
e1a1cd59 | 216 | extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, |
8c7c6e34 | 217 | gfp_t gfp_mask, bool noswap); |
4f98a2fe | 218 | extern int __isolate_lru_page(struct page *page, int mode, int file); |
69e05944 | 219 | extern unsigned long shrink_all_memory(unsigned long nr_pages); |
1da177e4 | 220 | extern int vm_swappiness; |
b20a3503 | 221 | extern int remove_mapping(struct address_space *mapping, struct page *page); |
bd1e22b8 | 222 | extern long vm_total_pages; |
b20a3503 | 223 | |
9eeff239 CL |
224 | #ifdef CONFIG_NUMA |
225 | extern int zone_reclaim_mode; | |
9614634f | 226 | extern int sysctl_min_unmapped_ratio; |
0ff38490 | 227 | extern int sysctl_min_slab_ratio; |
9eeff239 CL |
228 | extern int zone_reclaim(struct zone *, gfp_t, unsigned int); |
229 | #else | |
230 | #define zone_reclaim_mode 0 | |
231 | static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) | |
232 | { | |
233 | return 0; | |
234 | } | |
235 | #endif | |
236 | ||
894bc310 LS |
237 | #ifdef CONFIG_UNEVICTABLE_LRU |
238 | extern int page_evictable(struct page *page, struct vm_area_struct *vma); | |
89e004ea | 239 | extern void scan_mapping_unevictable_pages(struct address_space *); |
af936a16 LS |
240 | |
241 | extern unsigned long scan_unevictable_pages; | |
242 | extern int scan_unevictable_handler(struct ctl_table *, int, struct file *, | |
243 | void __user *, size_t *, loff_t *); | |
244 | extern int scan_unevictable_register_node(struct node *node); | |
245 | extern void scan_unevictable_unregister_node(struct node *node); | |
894bc310 LS |
246 | #else |
247 | static inline int page_evictable(struct page *page, | |
248 | struct vm_area_struct *vma) | |
249 | { | |
250 | return 1; | |
251 | } | |
af936a16 | 252 | |
89e004ea LS |
253 | static inline void scan_mapping_unevictable_pages(struct address_space *mapping) |
254 | { | |
255 | } | |
af936a16 LS |
256 | |
257 | static inline int scan_unevictable_register_node(struct node *node) | |
258 | { | |
259 | return 0; | |
260 | } | |
261 | ||
262 | static inline void scan_unevictable_unregister_node(struct node *node) { } | |
894bc310 LS |
263 | #endif |
264 | ||
3218ae14 YG |
265 | extern int kswapd_run(int nid); |
266 | ||
1da177e4 LT |
267 | #ifdef CONFIG_MMU |
268 | /* linux/mm/shmem.c */ | |
269 | extern int shmem_unuse(swp_entry_t entry, struct page *page); | |
270 | #endif /* CONFIG_MMU */ | |
271 | ||
272 | extern void swap_unplug_io_fn(struct backing_dev_info *, struct page *); | |
273 | ||
274 | #ifdef CONFIG_SWAP | |
275 | /* linux/mm/page_io.c */ | |
276 | extern int swap_readpage(struct file *, struct page *); | |
277 | extern int swap_writepage(struct page *page, struct writeback_control *wbc); | |
6712ecf8 | 278 | extern void end_swap_bio_read(struct bio *bio, int err); |
1da177e4 LT |
279 | |
280 | /* linux/mm/swap_state.c */ | |
281 | extern struct address_space swapper_space; | |
282 | #define total_swapcache_pages swapper_space.nrpages | |
283 | extern void show_swap_cache_info(void); | |
ac47b003 | 284 | extern int add_to_swap(struct page *); |
73b1262f | 285 | extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t); |
1da177e4 LT |
286 | extern void __delete_from_swap_cache(struct page *); |
287 | extern void delete_from_swap_cache(struct page *); | |
1da177e4 LT |
288 | extern void free_page_and_swap_cache(struct page *); |
289 | extern void free_pages_and_swap_cache(struct page **, int); | |
46017e95 | 290 | extern struct page *lookup_swap_cache(swp_entry_t); |
02098fea | 291 | extern struct page *read_swap_cache_async(swp_entry_t, gfp_t, |
46017e95 | 292 | struct vm_area_struct *vma, unsigned long addr); |
02098fea | 293 | extern struct page *swapin_readahead(swp_entry_t, gfp_t, |
46017e95 HD |
294 | struct vm_area_struct *vma, unsigned long addr); |
295 | ||
1da177e4 | 296 | /* linux/mm/swapfile.c */ |
b962716b | 297 | extern long nr_swap_pages; |
1da177e4 | 298 | extern long total_swap_pages; |
1da177e4 LT |
299 | extern void si_swapinfo(struct sysinfo *); |
300 | extern swp_entry_t get_swap_page(void); | |
f577eb30 | 301 | extern swp_entry_t get_swap_page_of_type(int); |
1da177e4 LT |
302 | extern int swap_duplicate(swp_entry_t); |
303 | extern int valid_swaphandles(swp_entry_t, unsigned long *); | |
304 | extern void swap_free(swp_entry_t); | |
2509ef26 | 305 | extern int free_swap_and_cache(swp_entry_t); |
7bf23687 | 306 | extern int swap_type_of(dev_t, sector_t, struct block_device **); |
f577eb30 | 307 | extern unsigned int count_swap_pages(int, int); |
1da177e4 | 308 | extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t); |
3aef83e0 | 309 | extern sector_t swapdev_block(int, pgoff_t); |
1da177e4 | 310 | extern struct swap_info_struct *get_swap_info_struct(unsigned); |
7b1fe597 | 311 | extern int reuse_swap_page(struct page *); |
a2c43eed | 312 | extern int try_to_free_swap(struct page *); |
1da177e4 LT |
313 | struct backing_dev_info; |
314 | ||
1da177e4 LT |
315 | /* linux/mm/thrash.c */ |
316 | extern struct mm_struct * swap_token_mm; | |
1da177e4 LT |
317 | extern void grab_swap_token(void); |
318 | extern void __put_swap_token(struct mm_struct *); | |
319 | ||
320 | static inline int has_swap_token(struct mm_struct *mm) | |
321 | { | |
322 | return (mm == swap_token_mm); | |
323 | } | |
324 | ||
325 | static inline void put_swap_token(struct mm_struct *mm) | |
326 | { | |
327 | if (has_swap_token(mm)) | |
328 | __put_swap_token(mm); | |
329 | } | |
330 | ||
f7b7fd8f RR |
331 | static inline void disable_swap_token(void) |
332 | { | |
333 | put_swap_token(swap_token_mm); | |
334 | } | |
335 | ||
d13d1443 KH |
336 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
337 | extern int mem_cgroup_cache_charge_swapin(struct page *page, | |
338 | struct mm_struct *mm, gfp_t mask, bool locked); | |
8c7c6e34 | 339 | extern void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent); |
d13d1443 KH |
340 | #else |
341 | static inline | |
342 | int mem_cgroup_cache_charge_swapin(struct page *page, | |
343 | struct mm_struct *mm, gfp_t mask, bool locked) | |
344 | { | |
345 | return 0; | |
346 | } | |
8c7c6e34 KH |
347 | static inline void |
348 | mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) | |
349 | { | |
350 | } | |
351 | #endif | |
352 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | |
353 | extern void mem_cgroup_uncharge_swap(swp_entry_t ent); | |
354 | #else | |
355 | static inline void mem_cgroup_uncharge_swap(swp_entry_t ent) | |
d13d1443 KH |
356 | { |
357 | } | |
358 | #endif | |
359 | ||
1da177e4 LT |
360 | #else /* CONFIG_SWAP */ |
361 | ||
b962716b HD |
362 | #define nr_swap_pages 0L |
363 | #define total_swap_pages 0L | |
1da177e4 LT |
364 | #define total_swapcache_pages 0UL |
365 | ||
366 | #define si_swapinfo(val) \ | |
367 | do { (val)->freeswap = (val)->totalswap = 0; } while (0) | |
9ae5b3c7 OH |
368 | /* only sparc can not include linux/pagemap.h in this file |
369 | * so leave page_cache_release and release_pages undeclared... */ | |
1da177e4 LT |
370 | #define free_page_and_swap_cache(page) \ |
371 | page_cache_release(page) | |
372 | #define free_pages_and_swap_cache(pages, nr) \ | |
373 | release_pages((pages), (nr), 0); | |
374 | ||
bd96b9eb CK |
375 | static inline void show_swap_cache_info(void) |
376 | { | |
377 | } | |
378 | ||
2509ef26 HD |
379 | #define free_swap_and_cache(swp) is_migration_entry(swp) |
380 | #define swap_duplicate(swp) is_migration_entry(swp) | |
bd96b9eb CK |
381 | |
382 | static inline void swap_free(swp_entry_t swp) | |
383 | { | |
384 | } | |
385 | ||
02098fea | 386 | static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, |
bd96b9eb CK |
387 | struct vm_area_struct *vma, unsigned long addr) |
388 | { | |
389 | return NULL; | |
390 | } | |
391 | ||
392 | static inline struct page *lookup_swap_cache(swp_entry_t swp) | |
393 | { | |
394 | return NULL; | |
395 | } | |
396 | ||
60371d97 HD |
397 | static inline int add_to_swap(struct page *page) |
398 | { | |
399 | return 0; | |
400 | } | |
401 | ||
73b1262f HD |
402 | static inline int add_to_swap_cache(struct page *page, swp_entry_t entry, |
403 | gfp_t gfp_mask) | |
bd96b9eb | 404 | { |
73b1262f | 405 | return -1; |
bd96b9eb CK |
406 | } |
407 | ||
408 | static inline void __delete_from_swap_cache(struct page *page) | |
409 | { | |
410 | } | |
411 | ||
412 | static inline void delete_from_swap_cache(struct page *page) | |
413 | { | |
414 | } | |
415 | ||
7b1fe597 | 416 | #define reuse_swap_page(page) (page_mapcount(page) == 1) |
1da177e4 | 417 | |
a2c43eed | 418 | static inline int try_to_free_swap(struct page *page) |
68a22394 RR |
419 | { |
420 | return 0; | |
421 | } | |
422 | ||
1da177e4 LT |
423 | static inline swp_entry_t get_swap_page(void) |
424 | { | |
425 | swp_entry_t entry; | |
426 | entry.val = 0; | |
427 | return entry; | |
428 | } | |
429 | ||
430 | /* linux/mm/thrash.c */ | |
431 | #define put_swap_token(x) do { } while(0) | |
432 | #define grab_swap_token() do { } while(0) | |
433 | #define has_swap_token(x) 0 | |
f7b7fd8f | 434 | #define disable_swap_token() do { } while(0) |
1da177e4 | 435 | |
d13d1443 KH |
436 | static inline int mem_cgroup_cache_charge_swapin(struct page *page, |
437 | struct mm_struct *mm, gfp_t mask, bool locked) | |
438 | { | |
439 | return 0; | |
440 | } | |
441 | ||
1da177e4 LT |
442 | #endif /* CONFIG_SWAP */ |
443 | #endif /* __KERNEL__*/ | |
444 | #endif /* _LINUX_SWAP_H */ |