per-zone and reclaim enhancements for memory controller: per-zone-lock for cgroup
[deliverable/linux.git] / include / linux / memcontrol.h
CommitLineData
8cdea7c0
BS
1/* memcontrol.h - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
78fb7466
PE
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
8cdea7c0
BS
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#ifndef _LINUX_MEMCONTROL_H
21#define _LINUX_MEMCONTROL_H
22
3062fc67
DR
23#include <linux/rcupdate.h>
24#include <linux/mm.h>
25
78fb7466
PE
26struct mem_cgroup;
27struct page_cgroup;
8697d331
BS
28struct page;
29struct mm_struct;
78fb7466
PE
30
31#ifdef CONFIG_CGROUP_MEM_CONT
32
33extern void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p);
34extern void mm_free_cgroup(struct mm_struct *mm);
35extern void page_assign_page_cgroup(struct page *page,
36 struct page_cgroup *pc);
37extern struct page_cgroup *page_get_page_cgroup(struct page *page);
e1a1cd59
BS
38extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
39 gfp_t gfp_mask);
8a9f3ccd 40extern void mem_cgroup_uncharge(struct page_cgroup *pc);
66e1707b
BS
41extern void mem_cgroup_move_lists(struct page_cgroup *pc, bool active);
42extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
43 struct list_head *dst,
44 unsigned long *scanned, int order,
45 int mode, struct zone *z,
46 struct mem_cgroup *mem_cont,
47 int active);
c7ba5c9e 48extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
e1a1cd59
BS
49extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
50 gfp_t gfp_mask);
4c4a2214 51int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
3062fc67
DR
52
53static inline struct mem_cgroup *mm_cgroup(const struct mm_struct *mm)
54{
55 return rcu_dereference(mm->mem_cgroup);
56}
8a9f3ccd
BS
57
58static inline void mem_cgroup_uncharge_page(struct page *page)
59{
60 mem_cgroup_uncharge(page_get_page_cgroup(page));
61}
78fb7466 62
ae41be37
KH
63extern int mem_cgroup_prepare_migration(struct page *page);
64extern void mem_cgroup_end_migration(struct page *page);
65extern void mem_cgroup_page_migration(struct page *page, struct page *newpage);
66
58ae83db
KH
67/*
68 * For memory reclaim.
69 */
70extern int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem);
5932f367
KH
71extern long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem);
72
6c48a1d0
KH
73extern int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem);
74extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
75 int priority);
76extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
77 int priority);
58ae83db 78
cc38108e
KH
79extern long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
80 struct zone *zone, int priority);
81extern long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
82 struct zone *zone, int priority);
58ae83db 83
78fb7466
PE
84#else /* CONFIG_CGROUP_MEM_CONT */
85static inline void mm_init_cgroup(struct mm_struct *mm,
86 struct task_struct *p)
87{
88}
89
90static inline void mm_free_cgroup(struct mm_struct *mm)
91{
92}
93
94static inline void page_assign_page_cgroup(struct page *page,
95 struct page_cgroup *pc)
96{
97}
98
99static inline struct page_cgroup *page_get_page_cgroup(struct page *page)
100{
101 return NULL;
102}
103
e1a1cd59
BS
104static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
105 gfp_t gfp_mask)
8a9f3ccd
BS
106{
107 return 0;
108}
109
110static inline void mem_cgroup_uncharge(struct page_cgroup *pc)
111{
112}
113
114static inline void mem_cgroup_uncharge_page(struct page *page)
115{
116}
117
66e1707b
BS
118static inline void mem_cgroup_move_lists(struct page_cgroup *pc,
119 bool active)
120{
121}
122
8697d331 123static inline int mem_cgroup_cache_charge(struct page *page,
e1a1cd59
BS
124 struct mm_struct *mm,
125 gfp_t gfp_mask)
8697d331
BS
126{
127 return 0;
128}
129
3062fc67 130static inline struct mem_cgroup *mm_cgroup(const struct mm_struct *mm)
bed7161a
BS
131{
132 return NULL;
133}
134
4c4a2214
DR
135static inline int task_in_mem_cgroup(struct task_struct *task,
136 const struct mem_cgroup *mem)
137{
138 return 1;
139}
140
ae41be37
KH
141static inline int mem_cgroup_prepare_migration(struct page *page)
142{
143 return 0;
144}
145
146static inline void mem_cgroup_end_migration(struct page *page)
147{
148}
149
150static inline void
151mem_cgroup_page_migration(struct page *page, struct page *newpage)
152{
153}
154
58ae83db
KH
155static inline int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
156{
157 return 0;
158}
5932f367
KH
159
160static inline int mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
161{
162 return 0;
163}
164
6c48a1d0
KH
165static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
166{
167 return 0;
168}
169
170static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
171 int priority)
172{
173}
174
175static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
176 int priority)
177{
178}
179
cc38108e
KH
180static inline long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
181 struct zone *zone, int priority)
182{
183 return 0;
184}
185
186static inline long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
187 struct zone *zone, int priority)
188{
189 return 0;
190}
78fb7466
PE
191#endif /* CONFIG_CGROUP_MEM_CONT */
192
8cdea7c0
BS
193#endif /* _LINUX_MEMCONTROL_H */
194
This page took 0.037485 seconds and 5 git commands to generate.