Commit | Line | Data |
---|---|---|
8cdea7c0 BS |
1 | /* memcontrol.h - Memory Controller |
2 | * | |
3 | * Copyright IBM Corporation, 2007 | |
4 | * Author Balbir Singh <balbir@linux.vnet.ibm.com> | |
5 | * | |
78fb7466 PE |
6 | * Copyright 2007 OpenVZ SWsoft Inc |
7 | * Author: Pavel Emelianov <xemul@openvz.org> | |
8 | * | |
8cdea7c0 BS |
9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License as published by | |
11 | * the Free Software Foundation; either version 2 of the License, or | |
12 | * (at your option) any later version. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | */ | |
19 | ||
20 | #ifndef _LINUX_MEMCONTROL_H | |
21 | #define _LINUX_MEMCONTROL_H | |
f8d66542 | 22 | #include <linux/cgroup.h> |
456f998e YH |
23 | #include <linux/vm_event_item.h> |
24 | ||
78fb7466 PE |
25 | struct mem_cgroup; |
26 | struct page_cgroup; | |
8697d331 BS |
27 | struct page; |
28 | struct mm_struct; | |
78fb7466 | 29 | |
2a7106f2 GT |
30 | /* Stats that can be updated by kernel. */ |
31 | enum mem_cgroup_page_stat_item { | |
32 | MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */ | |
33 | }; | |
34 | ||
5660048c JW |
35 | struct mem_cgroup_reclaim_cookie { |
36 | struct zone *zone; | |
37 | int priority; | |
38 | unsigned int generation; | |
39 | }; | |
40 | ||
00f0b825 | 41 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
2c26fdd7 KH |
42 | /* |
43 | * All "charge" functions with gfp_mask should use GFP_KERNEL or | |
44 | * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't | |
45 | * alloc memory but reclaims memory from all available zones. So, "where I want | |
46 | * memory from" bits of gfp_mask has no meaning. So any bits of that field is | |
47 | * available but adding a rule is better. charge functions' gfp_mask should | |
48 | * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous | |
49 | * codes. | |
50 | * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.) | |
51 | */ | |
78fb7466 | 52 | |
7a81b88c | 53 | extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, |
e1a1cd59 | 54 | gfp_t gfp_mask); |
7a81b88c | 55 | /* for swap handling */ |
8c7c6e34 | 56 | extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, |
72835c86 | 57 | struct page *page, gfp_t mask, struct mem_cgroup **memcgp); |
7a81b88c | 58 | extern void mem_cgroup_commit_charge_swapin(struct page *page, |
72835c86 JW |
59 | struct mem_cgroup *memcg); |
60 | extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg); | |
7a81b88c | 61 | |
8289546e HD |
62 | extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, |
63 | gfp_t gfp_mask); | |
925b7673 JW |
64 | |
65 | struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); | |
fa9add64 | 66 | struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); |
569b846d KH |
67 | |
68 | /* For coalescing uncharge for reducing memcg' overhead*/ | |
69 | extern void mem_cgroup_uncharge_start(void); | |
70 | extern void mem_cgroup_uncharge_end(void); | |
71 | ||
3c541e14 | 72 | extern void mem_cgroup_uncharge_page(struct page *page); |
69029cd5 | 73 | extern void mem_cgroup_uncharge_cache_page(struct page *page); |
c9b0ed51 | 74 | |
e845e199 DR |
75 | extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, |
76 | int order); | |
c3ac9a8a JW |
77 | bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, |
78 | struct mem_cgroup *memcg); | |
c0ff4b85 | 79 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg); |
3062fc67 | 80 | |
e42d9d5d | 81 | extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); |
cf475ad2 | 82 | extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); |
a433658c | 83 | extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm); |
cf475ad2 | 84 | |
e1aab161 | 85 | extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); |
d1a4c0b3 | 86 | extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont); |
e1aab161 | 87 | |
2e4d4091 LJ |
88 | static inline |
89 | int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) | |
90 | { | |
c0ff4b85 | 91 | struct mem_cgroup *memcg; |
c3ac9a8a JW |
92 | int match; |
93 | ||
2e4d4091 | 94 | rcu_read_lock(); |
c0ff4b85 | 95 | memcg = mem_cgroup_from_task(rcu_dereference((mm)->owner)); |
c3ac9a8a | 96 | match = __mem_cgroup_same_or_subtree(cgroup, memcg); |
2e4d4091 | 97 | rcu_read_unlock(); |
c3ac9a8a | 98 | return match; |
2e4d4091 | 99 | } |
8a9f3ccd | 100 | |
c0ff4b85 | 101 | extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); |
d324236b | 102 | |
e8589cc1 | 103 | extern int |
ac39cf8c | 104 | mem_cgroup_prepare_migration(struct page *page, |
72835c86 | 105 | struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask); |
c0ff4b85 | 106 | extern void mem_cgroup_end_migration(struct mem_cgroup *memcg, |
50de1dd9 | 107 | struct page *oldpage, struct page *newpage, bool migration_ok); |
ae41be37 | 108 | |
5660048c JW |
109 | struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, |
110 | struct mem_cgroup *, | |
111 | struct mem_cgroup_reclaim_cookie *); | |
112 | void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); | |
113 | ||
58ae83db KH |
114 | /* |
115 | * For memory reclaim. | |
116 | */ | |
c56d5c7d KK |
117 | int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec); |
118 | int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec); | |
889976db | 119 | int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); |
4d7dcca2 | 120 | unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list); |
fa9add64 | 121 | void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int); |
e222432b BS |
122 | extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, |
123 | struct task_struct *p); | |
ab936cbc KH |
124 | extern void mem_cgroup_replace_page_cache(struct page *oldpage, |
125 | struct page *newpage); | |
58ae83db | 126 | |
c077719b KH |
127 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP |
128 | extern int do_swap_account; | |
129 | #endif | |
f8d66542 HT |
130 | |
131 | static inline bool mem_cgroup_disabled(void) | |
132 | { | |
133 | if (mem_cgroup_subsys.disabled) | |
134 | return true; | |
135 | return false; | |
136 | } | |
137 | ||
89c06bd5 KH |
138 | void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked, |
139 | unsigned long *flags); | |
140 | ||
4331f7d3 KH |
141 | extern atomic_t memcg_moving; |
142 | ||
89c06bd5 KH |
143 | static inline void mem_cgroup_begin_update_page_stat(struct page *page, |
144 | bool *locked, unsigned long *flags) | |
145 | { | |
146 | if (mem_cgroup_disabled()) | |
147 | return; | |
148 | rcu_read_lock(); | |
149 | *locked = false; | |
4331f7d3 KH |
150 | if (atomic_read(&memcg_moving)) |
151 | __mem_cgroup_begin_update_page_stat(page, locked, flags); | |
89c06bd5 KH |
152 | } |
153 | ||
154 | void __mem_cgroup_end_update_page_stat(struct page *page, | |
155 | unsigned long *flags); | |
156 | static inline void mem_cgroup_end_update_page_stat(struct page *page, | |
157 | bool *locked, unsigned long *flags) | |
158 | { | |
159 | if (mem_cgroup_disabled()) | |
160 | return; | |
161 | if (*locked) | |
162 | __mem_cgroup_end_update_page_stat(page, flags); | |
163 | rcu_read_unlock(); | |
164 | } | |
165 | ||
2a7106f2 GT |
166 | void mem_cgroup_update_page_stat(struct page *page, |
167 | enum mem_cgroup_page_stat_item idx, | |
168 | int val); | |
169 | ||
170 | static inline void mem_cgroup_inc_page_stat(struct page *page, | |
171 | enum mem_cgroup_page_stat_item idx) | |
172 | { | |
173 | mem_cgroup_update_page_stat(page, idx, 1); | |
174 | } | |
175 | ||
176 | static inline void mem_cgroup_dec_page_stat(struct page *page, | |
177 | enum mem_cgroup_page_stat_item idx) | |
178 | { | |
179 | mem_cgroup_update_page_stat(page, idx, -1); | |
180 | } | |
181 | ||
4e416953 | 182 | unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, |
0ae5e89c YH |
183 | gfp_t gfp_mask, |
184 | unsigned long *total_scanned); | |
c0ff4b85 | 185 | u64 mem_cgroup_get_limit(struct mem_cgroup *memcg); |
a63d83f4 | 186 | |
456f998e | 187 | void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); |
ca3e0214 | 188 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
e94c8a9c | 189 | void mem_cgroup_split_huge_fixup(struct page *head); |
ca3e0214 KH |
190 | #endif |
191 | ||
f212ad7c DN |
192 | #ifdef CONFIG_DEBUG_VM |
193 | bool mem_cgroup_bad_page_check(struct page *page); | |
194 | void mem_cgroup_print_bad_page(struct page *page); | |
195 | #endif | |
52d4b9ac | 196 | #else /* CONFIG_CGROUP_MEM_RES_CTLR */ |
7a81b88c KH |
197 | struct mem_cgroup; |
198 | ||
199 | static inline int mem_cgroup_newpage_charge(struct page *page, | |
8289546e | 200 | struct mm_struct *mm, gfp_t gfp_mask) |
8a9f3ccd BS |
201 | { |
202 | return 0; | |
203 | } | |
204 | ||
8289546e HD |
205 | static inline int mem_cgroup_cache_charge(struct page *page, |
206 | struct mm_struct *mm, gfp_t gfp_mask) | |
8a9f3ccd | 207 | { |
8289546e | 208 | return 0; |
8a9f3ccd BS |
209 | } |
210 | ||
8c7c6e34 | 211 | static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm, |
72835c86 | 212 | struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp) |
7a81b88c KH |
213 | { |
214 | return 0; | |
215 | } | |
216 | ||
217 | static inline void mem_cgroup_commit_charge_swapin(struct page *page, | |
72835c86 | 218 | struct mem_cgroup *memcg) |
7a81b88c KH |
219 | { |
220 | } | |
221 | ||
72835c86 | 222 | static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg) |
7a81b88c KH |
223 | { |
224 | } | |
225 | ||
569b846d KH |
226 | static inline void mem_cgroup_uncharge_start(void) |
227 | { | |
228 | } | |
229 | ||
230 | static inline void mem_cgroup_uncharge_end(void) | |
231 | { | |
232 | } | |
233 | ||
8a9f3ccd BS |
234 | static inline void mem_cgroup_uncharge_page(struct page *page) |
235 | { | |
236 | } | |
237 | ||
69029cd5 KH |
238 | static inline void mem_cgroup_uncharge_cache_page(struct page *page) |
239 | { | |
240 | } | |
241 | ||
925b7673 JW |
242 | static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, |
243 | struct mem_cgroup *memcg) | |
08e552c6 | 244 | { |
925b7673 | 245 | return &zone->lruvec; |
08e552c6 KH |
246 | } |
247 | ||
fa9add64 HD |
248 | static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, |
249 | struct zone *zone) | |
66e1707b | 250 | { |
925b7673 | 251 | return &zone->lruvec; |
66e1707b BS |
252 | } |
253 | ||
e42d9d5d WF |
254 | static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) |
255 | { | |
256 | return NULL; | |
257 | } | |
258 | ||
a433658c KM |
259 | static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) |
260 | { | |
261 | return NULL; | |
262 | } | |
263 | ||
c0ff4b85 R |
264 | static inline int mm_match_cgroup(struct mm_struct *mm, |
265 | struct mem_cgroup *memcg) | |
bed7161a | 266 | { |
60c12b12 | 267 | return 1; |
bed7161a BS |
268 | } |
269 | ||
4c4a2214 | 270 | static inline int task_in_mem_cgroup(struct task_struct *task, |
c0ff4b85 | 271 | const struct mem_cgroup *memcg) |
4c4a2214 DR |
272 | { |
273 | return 1; | |
274 | } | |
275 | ||
c0ff4b85 R |
276 | static inline struct cgroup_subsys_state |
277 | *mem_cgroup_css(struct mem_cgroup *memcg) | |
d324236b WF |
278 | { |
279 | return NULL; | |
280 | } | |
281 | ||
e8589cc1 | 282 | static inline int |
ac39cf8c | 283 | mem_cgroup_prepare_migration(struct page *page, struct page *newpage, |
72835c86 | 284 | struct mem_cgroup **memcgp, gfp_t gfp_mask) |
ae41be37 KH |
285 | { |
286 | return 0; | |
287 | } | |
288 | ||
c0ff4b85 | 289 | static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg, |
50de1dd9 | 290 | struct page *oldpage, struct page *newpage, bool migration_ok) |
ae41be37 KH |
291 | { |
292 | } | |
293 | ||
5660048c JW |
294 | static inline struct mem_cgroup * |
295 | mem_cgroup_iter(struct mem_cgroup *root, | |
296 | struct mem_cgroup *prev, | |
297 | struct mem_cgroup_reclaim_cookie *reclaim) | |
298 | { | |
299 | return NULL; | |
300 | } | |
301 | ||
302 | static inline void mem_cgroup_iter_break(struct mem_cgroup *root, | |
303 | struct mem_cgroup *prev) | |
304 | { | |
305 | } | |
306 | ||
f8d66542 HT |
307 | static inline bool mem_cgroup_disabled(void) |
308 | { | |
309 | return true; | |
310 | } | |
a636b327 | 311 | |
14797e23 | 312 | static inline int |
c56d5c7d | 313 | mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) |
14797e23 KM |
314 | { |
315 | return 1; | |
316 | } | |
317 | ||
56e49d21 | 318 | static inline int |
c56d5c7d | 319 | mem_cgroup_inactive_file_is_low(struct lruvec *lruvec) |
56e49d21 RR |
320 | { |
321 | return 1; | |
322 | } | |
323 | ||
a3d8e054 | 324 | static inline unsigned long |
4d7dcca2 | 325 | mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) |
a3d8e054 KM |
326 | { |
327 | return 0; | |
328 | } | |
329 | ||
fa9add64 HD |
330 | static inline void |
331 | mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, | |
332 | int increment) | |
3e2f41f1 | 333 | { |
3e2f41f1 KM |
334 | } |
335 | ||
e222432b BS |
336 | static inline void |
337 | mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) | |
338 | { | |
339 | } | |
340 | ||
89c06bd5 KH |
341 | static inline void mem_cgroup_begin_update_page_stat(struct page *page, |
342 | bool *locked, unsigned long *flags) | |
343 | { | |
344 | } | |
345 | ||
346 | static inline void mem_cgroup_end_update_page_stat(struct page *page, | |
347 | bool *locked, unsigned long *flags) | |
348 | { | |
349 | } | |
350 | ||
2a7106f2 GT |
351 | static inline void mem_cgroup_inc_page_stat(struct page *page, |
352 | enum mem_cgroup_page_stat_item idx) | |
353 | { | |
354 | } | |
355 | ||
356 | static inline void mem_cgroup_dec_page_stat(struct page *page, | |
357 | enum mem_cgroup_page_stat_item idx) | |
d69b042f BS |
358 | { |
359 | } | |
360 | ||
4e416953 BS |
361 | static inline |
362 | unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | |
0ae5e89c YH |
363 | gfp_t gfp_mask, |
364 | unsigned long *total_scanned) | |
4e416953 BS |
365 | { |
366 | return 0; | |
367 | } | |
368 | ||
a63d83f4 | 369 | static inline |
c0ff4b85 | 370 | u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) |
a63d83f4 DR |
371 | { |
372 | return 0; | |
373 | } | |
374 | ||
e94c8a9c | 375 | static inline void mem_cgroup_split_huge_fixup(struct page *head) |
ca3e0214 KH |
376 | { |
377 | } | |
378 | ||
456f998e YH |
379 | static inline |
380 | void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) | |
381 | { | |
382 | } | |
ab936cbc KH |
383 | static inline void mem_cgroup_replace_page_cache(struct page *oldpage, |
384 | struct page *newpage) | |
385 | { | |
386 | } | |
31a79235 | 387 | #endif /* CONFIG_CGROUP_MEM_RES_CTLR */ |
78fb7466 | 388 | |
f212ad7c DN |
389 | #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) |
390 | static inline bool | |
391 | mem_cgroup_bad_page_check(struct page *page) | |
392 | { | |
393 | return false; | |
394 | } | |
395 | ||
396 | static inline void | |
397 | mem_cgroup_print_bad_page(struct page *page) | |
398 | { | |
399 | } | |
400 | #endif | |
401 | ||
e1aab161 GC |
402 | enum { |
403 | UNDER_LIMIT, | |
404 | SOFT_LIMIT, | |
405 | OVER_LIMIT, | |
406 | }; | |
407 | ||
408 | struct sock; | |
409 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM | |
410 | void sock_update_memcg(struct sock *sk); | |
411 | void sock_release_memcg(struct sock *sk); | |
412 | #else | |
413 | static inline void sock_update_memcg(struct sock *sk) | |
414 | { | |
415 | } | |
416 | static inline void sock_release_memcg(struct sock *sk) | |
417 | { | |
418 | } | |
419 | #endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */ | |
8cdea7c0 BS |
420 | #endif /* _LINUX_MEMCONTROL_H */ |
421 |