Commit | Line | Data |
---|---|---|
f6ac2354 CL |
1 | #ifndef _LINUX_VMSTAT_H |
2 | #define _LINUX_VMSTAT_H | |
3 | ||
4 | #include <linux/types.h> | |
5 | #include <linux/percpu.h> | |
96177299 | 6 | #include <linux/mm.h> |
2244b95a CL |
7 | #include <linux/mmzone.h> |
8 | #include <asm/atomic.h> | |
f6ac2354 | 9 | |
4b51d669 CL |
10 | #ifdef CONFIG_ZONE_DMA |
11 | #define DMA_ZONE(xx) xx##_DMA, | |
12 | #else | |
13 | #define DMA_ZONE(xx) | |
14 | #endif | |
15 | ||
27bf71c2 CL |
16 | #ifdef CONFIG_ZONE_DMA32 |
17 | #define DMA32_ZONE(xx) xx##_DMA32, | |
18 | #else | |
19 | #define DMA32_ZONE(xx) | |
20 | #endif | |
21 | ||
22 | #ifdef CONFIG_HIGHMEM | |
23 | #define HIGHMEM_ZONE(xx) , xx##_HIGH | |
24 | #else | |
25 | #define HIGHMEM_ZONE(xx) | |
26 | #endif | |
27 | ||
3b116300 | 28 | |
2a1e274a | 29 | #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE |
f8891e5e CL |
30 | |
31 | enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |
32 | FOR_ALL_ZONES(PGALLOC), | |
33 | PGFREE, PGACTIVATE, PGDEACTIVATE, | |
34 | PGFAULT, PGMAJFAULT, | |
35 | FOR_ALL_ZONES(PGREFILL), | |
36 | FOR_ALL_ZONES(PGSTEAL), | |
37 | FOR_ALL_ZONES(PGSCAN_KSWAPD), | |
38 | FOR_ALL_ZONES(PGSCAN_DIRECT), | |
24cf7251 MG |
39 | #ifdef CONFIG_NUMA |
40 | PGSCAN_ZONE_RECLAIM_FAILED, | |
41 | #endif | |
f8891e5e | 42 | PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, |
bb3ab596 KM |
43 | KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, |
44 | KSWAPD_SKIP_CONGESTION_WAIT, | |
f8891e5e | 45 | PAGEOUTRUN, ALLOCSTALL, PGROTATED, |
748446bb MG |
46 | #ifdef CONFIG_COMPACTION |
47 | COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED, | |
56de7263 | 48 | COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS, |
748446bb | 49 | #endif |
3b116300 AL |
50 | #ifdef CONFIG_HUGETLB_PAGE |
51 | HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, | |
bbfd28ee | 52 | #endif |
bbfd28ee LS |
53 | UNEVICTABLE_PGCULLED, /* culled to noreclaim list */ |
54 | UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */ | |
55 | UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */ | |
5344b7e6 NP |
56 | UNEVICTABLE_PGMLOCKED, |
57 | UNEVICTABLE_PGMUNLOCKED, | |
58 | UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ | |
59 | UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ | |
985737cf | 60 | UNEVICTABLE_MLOCKFREED, |
81ab4201 AK |
61 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
62 | THP_FAULT_ALLOC, | |
63 | THP_FAULT_FALLBACK, | |
64 | THP_COLLAPSE_ALLOC, | |
65 | THP_COLLAPSE_ALLOC_FAILED, | |
66 | THP_SPLIT, | |
67 | #endif | |
f8891e5e CL |
68 | NR_VM_EVENT_ITEMS |
69 | }; | |
70 | ||
c748e134 AB |
71 | extern int sysctl_stat_interval; |
72 | ||
780a0656 AM |
73 | #ifdef CONFIG_VM_EVENT_COUNTERS |
74 | /* | |
75 | * Light weight per cpu counter implementation. | |
76 | * | |
77 | * Counters should only be incremented and no critical kernel component | |
78 | * should rely on the counter values. | |
79 | * | |
80 | * Counters are handled completely inline. On many platforms the code | |
81 | * generated will simply be the increment of a global address. | |
82 | */ | |
83 | ||
f8891e5e CL |
84 | struct vm_event_state { |
85 | unsigned long event[NR_VM_EVENT_ITEMS]; | |
f6ac2354 CL |
86 | }; |
87 | ||
f8891e5e CL |
88 | DECLARE_PER_CPU(struct vm_event_state, vm_event_states); |
89 | ||
90 | static inline void __count_vm_event(enum vm_event_item item) | |
91 | { | |
dd17c8f7 | 92 | __this_cpu_inc(vm_event_states.event[item]); |
f8891e5e CL |
93 | } |
94 | ||
95 | static inline void count_vm_event(enum vm_event_item item) | |
96 | { | |
dd17c8f7 | 97 | this_cpu_inc(vm_event_states.event[item]); |
f8891e5e CL |
98 | } |
99 | ||
100 | static inline void __count_vm_events(enum vm_event_item item, long delta) | |
101 | { | |
dd17c8f7 | 102 | __this_cpu_add(vm_event_states.event[item], delta); |
f8891e5e CL |
103 | } |
104 | ||
105 | static inline void count_vm_events(enum vm_event_item item, long delta) | |
106 | { | |
dd17c8f7 | 107 | this_cpu_add(vm_event_states.event[item], delta); |
f8891e5e CL |
108 | } |
109 | ||
110 | extern void all_vm_events(unsigned long *); | |
e903387f | 111 | #ifdef CONFIG_HOTPLUG |
f8891e5e | 112 | extern void vm_events_fold_cpu(int cpu); |
e903387f MD |
113 | #else |
114 | static inline void vm_events_fold_cpu(int cpu) | |
115 | { | |
116 | } | |
117 | #endif | |
f8891e5e CL |
118 | |
119 | #else | |
120 | ||
121 | /* Disable counters */ | |
780a0656 AM |
122 | static inline void count_vm_event(enum vm_event_item item) |
123 | { | |
124 | } | |
125 | static inline void count_vm_events(enum vm_event_item item, long delta) | |
126 | { | |
127 | } | |
128 | static inline void __count_vm_event(enum vm_event_item item) | |
129 | { | |
130 | } | |
131 | static inline void __count_vm_events(enum vm_event_item item, long delta) | |
132 | { | |
133 | } | |
134 | static inline void all_vm_events(unsigned long *ret) | |
135 | { | |
136 | } | |
137 | static inline void vm_events_fold_cpu(int cpu) | |
138 | { | |
139 | } | |
f8891e5e CL |
140 | |
141 | #endif /* CONFIG_VM_EVENT_COUNTERS */ | |
142 | ||
143 | #define __count_zone_vm_events(item, zone, delta) \ | |
4b51d669 CL |
144 | __count_vm_events(item##_NORMAL - ZONE_NORMAL + \ |
145 | zone_idx(zone), delta) | |
f6ac2354 | 146 | |
2244b95a CL |
147 | /* |
148 | * Zone based page accounting with per cpu differentials. | |
149 | */ | |
150 | extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; | |
151 | ||
152 | static inline void zone_page_state_add(long x, struct zone *zone, | |
153 | enum zone_stat_item item) | |
154 | { | |
155 | atomic_long_add(x, &zone->vm_stat[item]); | |
156 | atomic_long_add(x, &vm_stat[item]); | |
157 | } | |
158 | ||
159 | static inline unsigned long global_page_state(enum zone_stat_item item) | |
160 | { | |
161 | long x = atomic_long_read(&vm_stat[item]); | |
162 | #ifdef CONFIG_SMP | |
163 | if (x < 0) | |
164 | x = 0; | |
165 | #endif | |
166 | return x; | |
167 | } | |
168 | ||
169 | static inline unsigned long zone_page_state(struct zone *zone, | |
170 | enum zone_stat_item item) | |
171 | { | |
172 | long x = atomic_long_read(&zone->vm_stat[item]); | |
173 | #ifdef CONFIG_SMP | |
174 | if (x < 0) | |
175 | x = 0; | |
176 | #endif | |
177 | return x; | |
178 | } | |
179 | ||
aa454840 CL |
180 | /* |
181 | * More accurate version that also considers the currently pending | |
182 | * deltas. For that we need to loop over all cpus to find the current | |
183 | * deltas. There is no synchronization so the result cannot be | |
184 | * exactly accurate either. | |
185 | */ | |
186 | static inline unsigned long zone_page_state_snapshot(struct zone *zone, | |
187 | enum zone_stat_item item) | |
188 | { | |
189 | long x = atomic_long_read(&zone->vm_stat[item]); | |
190 | ||
191 | #ifdef CONFIG_SMP | |
192 | int cpu; | |
193 | for_each_online_cpu(cpu) | |
194 | x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; | |
195 | ||
196 | if (x < 0) | |
197 | x = 0; | |
198 | #endif | |
199 | return x; | |
200 | } | |
201 | ||
adea02a1 WF |
202 | extern unsigned long global_reclaimable_pages(void); |
203 | extern unsigned long zone_reclaimable_pages(struct zone *zone); | |
4f98a2fe | 204 | |
2244b95a CL |
205 | #ifdef CONFIG_NUMA |
206 | /* | |
207 | * Determine the per node value of a stat item. This function | |
208 | * is called frequently in a NUMA machine, so try to be as | |
209 | * frugal as possible. | |
210 | */ | |
211 | static inline unsigned long node_page_state(int node, | |
212 | enum zone_stat_item item) | |
213 | { | |
214 | struct zone *zones = NODE_DATA(node)->node_zones; | |
215 | ||
216 | return | |
4b51d669 CL |
217 | #ifdef CONFIG_ZONE_DMA |
218 | zone_page_state(&zones[ZONE_DMA], item) + | |
219 | #endif | |
fb0e7942 | 220 | #ifdef CONFIG_ZONE_DMA32 |
2244b95a CL |
221 | zone_page_state(&zones[ZONE_DMA32], item) + |
222 | #endif | |
2244b95a CL |
223 | #ifdef CONFIG_HIGHMEM |
224 | zone_page_state(&zones[ZONE_HIGHMEM], item) + | |
225 | #endif | |
2a1e274a MG |
226 | zone_page_state(&zones[ZONE_NORMAL], item) + |
227 | zone_page_state(&zones[ZONE_MOVABLE], item); | |
2244b95a | 228 | } |
ca889e6c | 229 | |
78afd561 | 230 | extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp); |
ca889e6c | 231 | |
2244b95a | 232 | #else |
ca889e6c | 233 | |
2244b95a | 234 | #define node_page_state(node, item) global_page_state(item) |
78afd561 | 235 | #define zone_statistics(_zl, _z, gfp) do { } while (0) |
ca889e6c CL |
236 | |
237 | #endif /* CONFIG_NUMA */ | |
2244b95a | 238 | |
2244b95a CL |
239 | #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) |
240 | #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) | |
241 | ||
242 | static inline void zap_zone_vm_stats(struct zone *zone) | |
243 | { | |
244 | memset(zone->vm_stat, 0, sizeof(zone->vm_stat)); | |
245 | } | |
246 | ||
ca889e6c CL |
247 | extern void inc_zone_state(struct zone *, enum zone_stat_item); |
248 | ||
2244b95a CL |
249 | #ifdef CONFIG_SMP |
250 | void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); | |
251 | void __inc_zone_page_state(struct page *, enum zone_stat_item); | |
252 | void __dec_zone_page_state(struct page *, enum zone_stat_item); | |
f6ac2354 | 253 | |
2244b95a CL |
254 | void mod_zone_page_state(struct zone *, enum zone_stat_item, int); |
255 | void inc_zone_page_state(struct page *, enum zone_stat_item); | |
256 | void dec_zone_page_state(struct page *, enum zone_stat_item); | |
257 | ||
258 | extern void inc_zone_state(struct zone *, enum zone_stat_item); | |
c8785385 CL |
259 | extern void __inc_zone_state(struct zone *, enum zone_stat_item); |
260 | extern void dec_zone_state(struct zone *, enum zone_stat_item); | |
261 | extern void __dec_zone_state(struct zone *, enum zone_stat_item); | |
2244b95a CL |
262 | |
263 | void refresh_cpu_vm_stats(int); | |
b44129b3 MG |
264 | |
265 | int calculate_pressure_threshold(struct zone *zone); | |
266 | int calculate_normal_threshold(struct zone *zone); | |
267 | void set_pgdat_percpu_threshold(pg_data_t *pgdat, | |
268 | int (*calculate_pressure)(struct zone *)); | |
2244b95a CL |
269 | #else /* CONFIG_SMP */ |
270 | ||
271 | /* | |
272 | * We do not maintain differentials in a single processor configuration. | |
273 | * The functions directly modify the zone and global counters. | |
274 | */ | |
275 | static inline void __mod_zone_page_state(struct zone *zone, | |
276 | enum zone_stat_item item, int delta) | |
277 | { | |
278 | zone_page_state_add(delta, zone, item); | |
279 | } | |
280 | ||
7f4599e9 CL |
281 | static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) |
282 | { | |
283 | atomic_long_inc(&zone->vm_stat[item]); | |
284 | atomic_long_inc(&vm_stat[item]); | |
285 | } | |
286 | ||
2244b95a CL |
287 | static inline void __inc_zone_page_state(struct page *page, |
288 | enum zone_stat_item item) | |
289 | { | |
7f4599e9 | 290 | __inc_zone_state(page_zone(page), item); |
2244b95a CL |
291 | } |
292 | ||
c8785385 CL |
293 | static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) |
294 | { | |
295 | atomic_long_dec(&zone->vm_stat[item]); | |
296 | atomic_long_dec(&vm_stat[item]); | |
297 | } | |
298 | ||
2244b95a CL |
299 | static inline void __dec_zone_page_state(struct page *page, |
300 | enum zone_stat_item item) | |
301 | { | |
57ce36fe | 302 | __dec_zone_state(page_zone(page), item); |
2244b95a CL |
303 | } |
304 | ||
305 | /* | |
306 | * We only use atomic operations to update counters. So there is no need to | |
307 | * disable interrupts. | |
308 | */ | |
309 | #define inc_zone_page_state __inc_zone_page_state | |
310 | #define dec_zone_page_state __dec_zone_page_state | |
311 | #define mod_zone_page_state __mod_zone_page_state | |
312 | ||
b44129b3 | 313 | #define set_pgdat_percpu_threshold(pgdat, callback) { } |
88f5acf8 | 314 | |
2244b95a | 315 | static inline void refresh_cpu_vm_stats(int cpu) { } |
2244b95a CL |
316 | #endif |
317 | ||
318 | #endif /* _LINUX_VMSTAT_H */ |