Merge branch 'master' into for-next
[deliverable/linux.git] / include / linux / memcontrol.h
1 /* memcontrol.h - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20 #ifndef _LINUX_MEMCONTROL_H
21 #define _LINUX_MEMCONTROL_H
22 #include <linux/cgroup.h>
23 #include <linux/vm_event_item.h>
24
25 struct mem_cgroup;
26 struct page_cgroup;
27 struct page;
28 struct mm_struct;
29
30 /* Stats that can be updated by kernel. */
31 enum mem_cgroup_page_stat_item {
32 MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
33 };
34
35 struct mem_cgroup_reclaim_cookie {
36 struct zone *zone;
37 int priority;
38 unsigned int generation;
39 };
40
41 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
42 /*
43 * All "charge" functions with gfp_mask should use GFP_KERNEL or
44 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
45 * alloc memory but reclaims memory from all available zones. So, "where I want
46 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
47 * available but adding a rule is better. charge functions' gfp_mask should
48 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
49 * codes.
50 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
51 */
52
53 extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
54 gfp_t gfp_mask);
55 /* for swap handling */
56 extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
57 struct page *page, gfp_t mask, struct mem_cgroup **memcgp);
58 extern void mem_cgroup_commit_charge_swapin(struct page *page,
59 struct mem_cgroup *memcg);
60 extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg);
61
62 extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
63 gfp_t gfp_mask);
64
65 struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
66 struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *,
67 enum lru_list);
68 void mem_cgroup_lru_del_list(struct page *, enum lru_list);
69 void mem_cgroup_lru_del(struct page *);
70 struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *,
71 enum lru_list, enum lru_list);
72
73 /* For coalescing uncharge for reducing memcg' overhead*/
74 extern void mem_cgroup_uncharge_start(void);
75 extern void mem_cgroup_uncharge_end(void);
76
77 extern void mem_cgroup_uncharge_page(struct page *page);
78 extern void mem_cgroup_uncharge_cache_page(struct page *page);
79
80 extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
81 int order);
82 bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
83 struct mem_cgroup *memcg);
84 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg);
85
86 extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
87 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
88 extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
89
90 extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
91 extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
92
93 static inline
94 int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
95 {
96 struct mem_cgroup *memcg;
97 int match;
98
99 rcu_read_lock();
100 memcg = mem_cgroup_from_task(rcu_dereference((mm)->owner));
101 match = __mem_cgroup_same_or_subtree(cgroup, memcg);
102 rcu_read_unlock();
103 return match;
104 }
105
106 extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
107
108 extern int
109 mem_cgroup_prepare_migration(struct page *page,
110 struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask);
111 extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
112 struct page *oldpage, struct page *newpage, bool migration_ok);
113
114 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
115 struct mem_cgroup *,
116 struct mem_cgroup_reclaim_cookie *);
117 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
118
119 /*
120 * For memory reclaim.
121 */
122 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg,
123 struct zone *zone);
124 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg,
125 struct zone *zone);
126 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
127 unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
128 int nid, int zid, unsigned int lrumask);
129 struct zone_reclaim_stat*
130 mem_cgroup_get_reclaim_stat_from_page(struct page *page);
131 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
132 struct task_struct *p);
133 extern void mem_cgroup_replace_page_cache(struct page *oldpage,
134 struct page *newpage);
135
136 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
137 extern int do_swap_account;
138 #endif
139
140 static inline bool mem_cgroup_disabled(void)
141 {
142 if (mem_cgroup_subsys.disabled)
143 return true;
144 return false;
145 }
146
147 void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
148 unsigned long *flags);
149
150 extern atomic_t memcg_moving;
151
152 static inline void mem_cgroup_begin_update_page_stat(struct page *page,
153 bool *locked, unsigned long *flags)
154 {
155 if (mem_cgroup_disabled())
156 return;
157 rcu_read_lock();
158 *locked = false;
159 if (atomic_read(&memcg_moving))
160 __mem_cgroup_begin_update_page_stat(page, locked, flags);
161 }
162
163 void __mem_cgroup_end_update_page_stat(struct page *page,
164 unsigned long *flags);
165 static inline void mem_cgroup_end_update_page_stat(struct page *page,
166 bool *locked, unsigned long *flags)
167 {
168 if (mem_cgroup_disabled())
169 return;
170 if (*locked)
171 __mem_cgroup_end_update_page_stat(page, flags);
172 rcu_read_unlock();
173 }
174
175 void mem_cgroup_update_page_stat(struct page *page,
176 enum mem_cgroup_page_stat_item idx,
177 int val);
178
179 static inline void mem_cgroup_inc_page_stat(struct page *page,
180 enum mem_cgroup_page_stat_item idx)
181 {
182 mem_cgroup_update_page_stat(page, idx, 1);
183 }
184
185 static inline void mem_cgroup_dec_page_stat(struct page *page,
186 enum mem_cgroup_page_stat_item idx)
187 {
188 mem_cgroup_update_page_stat(page, idx, -1);
189 }
190
191 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
192 gfp_t gfp_mask,
193 unsigned long *total_scanned);
194 u64 mem_cgroup_get_limit(struct mem_cgroup *memcg);
195
196 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
197 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
198 void mem_cgroup_split_huge_fixup(struct page *head);
199 #endif
200
201 #ifdef CONFIG_DEBUG_VM
202 bool mem_cgroup_bad_page_check(struct page *page);
203 void mem_cgroup_print_bad_page(struct page *page);
204 #endif
205 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
206 struct mem_cgroup;
207
208 static inline int mem_cgroup_newpage_charge(struct page *page,
209 struct mm_struct *mm, gfp_t gfp_mask)
210 {
211 return 0;
212 }
213
214 static inline int mem_cgroup_cache_charge(struct page *page,
215 struct mm_struct *mm, gfp_t gfp_mask)
216 {
217 return 0;
218 }
219
220 static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
221 struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp)
222 {
223 return 0;
224 }
225
226 static inline void mem_cgroup_commit_charge_swapin(struct page *page,
227 struct mem_cgroup *memcg)
228 {
229 }
230
231 static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
232 {
233 }
234
235 static inline void mem_cgroup_uncharge_start(void)
236 {
237 }
238
239 static inline void mem_cgroup_uncharge_end(void)
240 {
241 }
242
243 static inline void mem_cgroup_uncharge_page(struct page *page)
244 {
245 }
246
247 static inline void mem_cgroup_uncharge_cache_page(struct page *page)
248 {
249 }
250
251 static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
252 struct mem_cgroup *memcg)
253 {
254 return &zone->lruvec;
255 }
256
257 static inline struct lruvec *mem_cgroup_lru_add_list(struct zone *zone,
258 struct page *page,
259 enum lru_list lru)
260 {
261 return &zone->lruvec;
262 }
263
264 static inline void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
265 {
266 }
267
268 static inline void mem_cgroup_lru_del(struct page *page)
269 {
270 }
271
272 static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
273 struct page *page,
274 enum lru_list from,
275 enum lru_list to)
276 {
277 return &zone->lruvec;
278 }
279
280 static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
281 {
282 return NULL;
283 }
284
285 static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
286 {
287 return NULL;
288 }
289
290 static inline int mm_match_cgroup(struct mm_struct *mm,
291 struct mem_cgroup *memcg)
292 {
293 return 1;
294 }
295
296 static inline int task_in_mem_cgroup(struct task_struct *task,
297 const struct mem_cgroup *memcg)
298 {
299 return 1;
300 }
301
302 static inline struct cgroup_subsys_state
303 *mem_cgroup_css(struct mem_cgroup *memcg)
304 {
305 return NULL;
306 }
307
308 static inline int
309 mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
310 struct mem_cgroup **memcgp, gfp_t gfp_mask)
311 {
312 return 0;
313 }
314
315 static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
316 struct page *oldpage, struct page *newpage, bool migration_ok)
317 {
318 }
319
320 static inline struct mem_cgroup *
321 mem_cgroup_iter(struct mem_cgroup *root,
322 struct mem_cgroup *prev,
323 struct mem_cgroup_reclaim_cookie *reclaim)
324 {
325 return NULL;
326 }
327
328 static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
329 struct mem_cgroup *prev)
330 {
331 }
332
333 static inline bool mem_cgroup_disabled(void)
334 {
335 return true;
336 }
337
338 static inline int
339 mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
340 {
341 return 1;
342 }
343
344 static inline int
345 mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone)
346 {
347 return 1;
348 }
349
350 static inline unsigned long
351 mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
352 unsigned int lru_mask)
353 {
354 return 0;
355 }
356
357 static inline struct zone_reclaim_stat*
358 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
359 {
360 return NULL;
361 }
362
363 static inline void
364 mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
365 {
366 }
367
368 static inline void mem_cgroup_begin_update_page_stat(struct page *page,
369 bool *locked, unsigned long *flags)
370 {
371 }
372
373 static inline void mem_cgroup_end_update_page_stat(struct page *page,
374 bool *locked, unsigned long *flags)
375 {
376 }
377
378 static inline void mem_cgroup_inc_page_stat(struct page *page,
379 enum mem_cgroup_page_stat_item idx)
380 {
381 }
382
383 static inline void mem_cgroup_dec_page_stat(struct page *page,
384 enum mem_cgroup_page_stat_item idx)
385 {
386 }
387
388 static inline
389 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
390 gfp_t gfp_mask,
391 unsigned long *total_scanned)
392 {
393 return 0;
394 }
395
396 static inline
397 u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
398 {
399 return 0;
400 }
401
402 static inline void mem_cgroup_split_huge_fixup(struct page *head)
403 {
404 }
405
406 static inline
407 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
408 {
409 }
410 static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
411 struct page *newpage)
412 {
413 }
414 #endif /* CONFIG_CGROUP_MEM_RES_CTLR */
415
416 #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
417 static inline bool
418 mem_cgroup_bad_page_check(struct page *page)
419 {
420 return false;
421 }
422
423 static inline void
424 mem_cgroup_print_bad_page(struct page *page)
425 {
426 }
427 #endif
428
429 enum {
430 UNDER_LIMIT,
431 SOFT_LIMIT,
432 OVER_LIMIT,
433 };
434
435 struct sock;
436 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
437 void sock_update_memcg(struct sock *sk);
438 void sock_release_memcg(struct sock *sk);
439 #else
440 static inline void sock_update_memcg(struct sock *sk)
441 {
442 }
443 static inline void sock_release_memcg(struct sock *sk)
444 {
445 }
446 #endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
447 #endif /* _LINUX_MEMCONTROL_H */
448
This page took 0.046456 seconds and 6 git commands to generate.