mm: page-writeback: inline account_page_dirtied() into single caller
[deliverable/linux.git] / include / linux / memcontrol.h
CommitLineData
8cdea7c0
BS
1/* memcontrol.h - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
78fb7466
PE
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
8cdea7c0
BS
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#ifndef _LINUX_MEMCONTROL_H
21#define _LINUX_MEMCONTROL_H
f8d66542 22#include <linux/cgroup.h>
456f998e 23#include <linux/vm_event_item.h>
7ae1e1d0 24#include <linux/hardirq.h>
a8964b9b 25#include <linux/jump_label.h>
456f998e 26
78fb7466
PE
27struct mem_cgroup;
28struct page_cgroup;
8697d331
BS
29struct page;
30struct mm_struct;
2633d7a0 31struct kmem_cache;
78fb7466 32
68b4876d
SZ
33/*
34 * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c,
35 * These two lists should keep in accord with each other.
36 */
37enum mem_cgroup_stat_index {
38 /*
39 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
40 */
41 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
42 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
43 MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */
44 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
3ea67d06 45 MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */
68b4876d
SZ
46 MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
47 MEM_CGROUP_STAT_NSTATS,
2a7106f2
GT
48};
49
5660048c
JW
50struct mem_cgroup_reclaim_cookie {
51 struct zone *zone;
52 int priority;
53 unsigned int generation;
54};
55
c255a458 56#ifdef CONFIG_MEMCG
00501b53
JW
57int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
58 gfp_t gfp_mask, struct mem_cgroup **memcgp);
59void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
60 bool lrucare);
61void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg);
0a31bc97 62void mem_cgroup_uncharge(struct page *page);
747db954 63void mem_cgroup_uncharge_list(struct list_head *page_list);
569b846d 64
0a31bc97
JW
65void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
66 bool lrucare);
569b846d 67
0a31bc97
JW
68struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
69struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
c9b0ed51 70
c3ac9a8a
JW
71bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
72 struct mem_cgroup *memcg);
ffbdccf5
DR
73bool task_in_mem_cgroup(struct task_struct *task,
74 const struct mem_cgroup *memcg);
3062fc67 75
e42d9d5d 76extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
cf475ad2
BS
77extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
78
e1aab161 79extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
182446d0 80extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
e1aab161 81
2e4d4091 82static inline
587af308 83bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg)
2e4d4091 84{
587af308
JW
85 struct mem_cgroup *task_memcg;
86 bool match;
c3ac9a8a 87
2e4d4091 88 rcu_read_lock();
587af308
JW
89 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
90 match = __mem_cgroup_same_or_subtree(memcg, task_memcg);
2e4d4091 91 rcu_read_unlock();
c3ac9a8a 92 return match;
2e4d4091 93}
8a9f3ccd 94
c0ff4b85 95extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
d324236b 96
694fbc0f
AM
97struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
98 struct mem_cgroup *,
99 struct mem_cgroup_reclaim_cookie *);
5660048c
JW
100void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
101
58ae83db
KH
102/*
103 * For memory reclaim.
104 */
c56d5c7d 105int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
889976db 106int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
4d7dcca2 107unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
fa9add64 108void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
e222432b
BS
109extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
110 struct task_struct *p);
58ae83db 111
49426420 112static inline void mem_cgroup_oom_enable(void)
519e5247 113{
49426420
JW
114 WARN_ON(current->memcg_oom.may_oom);
115 current->memcg_oom.may_oom = 1;
519e5247
JW
116}
117
49426420 118static inline void mem_cgroup_oom_disable(void)
519e5247 119{
49426420
JW
120 WARN_ON(!current->memcg_oom.may_oom);
121 current->memcg_oom.may_oom = 0;
519e5247
JW
122}
123
3812c8c8
JW
124static inline bool task_in_memcg_oom(struct task_struct *p)
125{
49426420 126 return p->memcg_oom.memcg;
3812c8c8
JW
127}
128
49426420 129bool mem_cgroup_oom_synchronize(bool wait);
3812c8c8 130
c255a458 131#ifdef CONFIG_MEMCG_SWAP
c077719b
KH
132extern int do_swap_account;
133#endif
f8d66542
HT
134
135static inline bool mem_cgroup_disabled(void)
136{
073219e9 137 if (memory_cgrp_subsys.disabled)
f8d66542
HT
138 return true;
139 return false;
140}
141
89c06bd5
KH
142void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
143 unsigned long *flags);
144
4331f7d3
KH
145extern atomic_t memcg_moving;
146
89c06bd5
KH
147static inline void mem_cgroup_begin_update_page_stat(struct page *page,
148 bool *locked, unsigned long *flags)
149{
150 if (mem_cgroup_disabled())
151 return;
152 rcu_read_lock();
153 *locked = false;
4331f7d3
KH
154 if (atomic_read(&memcg_moving))
155 __mem_cgroup_begin_update_page_stat(page, locked, flags);
89c06bd5
KH
156}
157
158void __mem_cgroup_end_update_page_stat(struct page *page,
159 unsigned long *flags);
160static inline void mem_cgroup_end_update_page_stat(struct page *page,
161 bool *locked, unsigned long *flags)
162{
163 if (mem_cgroup_disabled())
164 return;
165 if (*locked)
166 __mem_cgroup_end_update_page_stat(page, flags);
167 rcu_read_unlock();
168}
169
2a7106f2 170void mem_cgroup_update_page_stat(struct page *page,
68b4876d 171 enum mem_cgroup_stat_index idx,
2a7106f2
GT
172 int val);
173
174static inline void mem_cgroup_inc_page_stat(struct page *page,
68b4876d 175 enum mem_cgroup_stat_index idx)
2a7106f2
GT
176{
177 mem_cgroup_update_page_stat(page, idx, 1);
178}
179
180static inline void mem_cgroup_dec_page_stat(struct page *page,
68b4876d 181 enum mem_cgroup_stat_index idx)
2a7106f2
GT
182{
183 mem_cgroup_update_page_stat(page, idx, -1);
184}
185
0608f43d
AM
186unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
187 gfp_t gfp_mask,
188 unsigned long *total_scanned);
a63d83f4 189
68ae564b
DR
190void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
191static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
192 enum vm_event_item idx)
193{
194 if (mem_cgroup_disabled())
195 return;
196 __mem_cgroup_count_vm_event(mm, idx);
197}
ca3e0214 198#ifdef CONFIG_TRANSPARENT_HUGEPAGE
e94c8a9c 199void mem_cgroup_split_huge_fixup(struct page *head);
ca3e0214
KH
200#endif
201
f212ad7c
DN
202#ifdef CONFIG_DEBUG_VM
203bool mem_cgroup_bad_page_check(struct page *page);
204void mem_cgroup_print_bad_page(struct page *page);
205#endif
c255a458 206#else /* CONFIG_MEMCG */
7a81b88c
KH
207struct mem_cgroup;
208
00501b53
JW
209static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
210 gfp_t gfp_mask,
211 struct mem_cgroup **memcgp)
7a81b88c 212{
00501b53 213 *memcgp = NULL;
7a81b88c
KH
214 return 0;
215}
216
00501b53
JW
217static inline void mem_cgroup_commit_charge(struct page *page,
218 struct mem_cgroup *memcg,
219 bool lrucare)
7a81b88c
KH
220{
221}
222
00501b53
JW
223static inline void mem_cgroup_cancel_charge(struct page *page,
224 struct mem_cgroup *memcg)
7a81b88c
KH
225{
226}
227
0a31bc97 228static inline void mem_cgroup_uncharge(struct page *page)
569b846d
KH
229{
230}
231
747db954 232static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
8a9f3ccd
BS
233{
234}
235
0a31bc97
JW
236static inline void mem_cgroup_migrate(struct page *oldpage,
237 struct page *newpage,
238 bool lrucare)
69029cd5
KH
239{
240}
241
925b7673
JW
242static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
243 struct mem_cgroup *memcg)
08e552c6 244{
925b7673 245 return &zone->lruvec;
08e552c6
KH
246}
247
fa9add64
HD
248static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
249 struct zone *zone)
66e1707b 250{
925b7673 251 return &zone->lruvec;
66e1707b
BS
252}
253
e42d9d5d
WF
254static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
255{
256 return NULL;
257}
258
587af308 259static inline bool mm_match_cgroup(struct mm_struct *mm,
c0ff4b85 260 struct mem_cgroup *memcg)
bed7161a 261{
587af308 262 return true;
bed7161a
BS
263}
264
ffbdccf5
DR
265static inline bool task_in_mem_cgroup(struct task_struct *task,
266 const struct mem_cgroup *memcg)
4c4a2214 267{
ffbdccf5 268 return true;
4c4a2214
DR
269}
270
c0ff4b85
R
271static inline struct cgroup_subsys_state
272 *mem_cgroup_css(struct mem_cgroup *memcg)
d324236b
WF
273{
274 return NULL;
275}
276
5660048c
JW
277static inline struct mem_cgroup *
278mem_cgroup_iter(struct mem_cgroup *root,
279 struct mem_cgroup *prev,
280 struct mem_cgroup_reclaim_cookie *reclaim)
281{
282 return NULL;
283}
284
285static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
286 struct mem_cgroup *prev)
287{
288}
289
f8d66542
HT
290static inline bool mem_cgroup_disabled(void)
291{
292 return true;
293}
a636b327 294
14797e23 295static inline int
c56d5c7d 296mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
14797e23
KM
297{
298 return 1;
299}
300
a3d8e054 301static inline unsigned long
4d7dcca2 302mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
a3d8e054
KM
303{
304 return 0;
305}
306
fa9add64
HD
307static inline void
308mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
309 int increment)
3e2f41f1 310{
3e2f41f1
KM
311}
312
e222432b
BS
313static inline void
314mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
315{
316}
317
89c06bd5
KH
318static inline void mem_cgroup_begin_update_page_stat(struct page *page,
319 bool *locked, unsigned long *flags)
320{
321}
322
323static inline void mem_cgroup_end_update_page_stat(struct page *page,
324 bool *locked, unsigned long *flags)
325{
326}
327
49426420 328static inline void mem_cgroup_oom_enable(void)
519e5247
JW
329{
330}
331
49426420 332static inline void mem_cgroup_oom_disable(void)
519e5247
JW
333{
334}
335
3812c8c8
JW
336static inline bool task_in_memcg_oom(struct task_struct *p)
337{
338 return false;
339}
340
49426420 341static inline bool mem_cgroup_oom_synchronize(bool wait)
3812c8c8
JW
342{
343 return false;
344}
345
2a7106f2 346static inline void mem_cgroup_inc_page_stat(struct page *page,
68b4876d 347 enum mem_cgroup_stat_index idx)
2a7106f2
GT
348{
349}
350
351static inline void mem_cgroup_dec_page_stat(struct page *page,
68b4876d 352 enum mem_cgroup_stat_index idx)
d69b042f
BS
353{
354}
355
4e416953 356static inline
0608f43d
AM
357unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
358 gfp_t gfp_mask,
359 unsigned long *total_scanned)
4e416953 360{
0608f43d 361 return 0;
4e416953
BS
362}
363
e94c8a9c 364static inline void mem_cgroup_split_huge_fixup(struct page *head)
ca3e0214
KH
365{
366}
367
456f998e
YH
368static inline
369void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
370{
371}
c255a458 372#endif /* CONFIG_MEMCG */
78fb7466 373
c255a458 374#if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM)
f212ad7c
DN
375static inline bool
376mem_cgroup_bad_page_check(struct page *page)
377{
378 return false;
379}
380
381static inline void
382mem_cgroup_print_bad_page(struct page *page)
383{
384}
385#endif
386
e1aab161
GC
387enum {
388 UNDER_LIMIT,
389 SOFT_LIMIT,
390 OVER_LIMIT,
391};
392
393struct sock;
cd59085a 394#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
e1aab161
GC
395void sock_update_memcg(struct sock *sk);
396void sock_release_memcg(struct sock *sk);
397#else
398static inline void sock_update_memcg(struct sock *sk)
399{
400}
401static inline void sock_release_memcg(struct sock *sk)
402{
403}
cd59085a 404#endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */
7ae1e1d0
GC
405
406#ifdef CONFIG_MEMCG_KMEM
a8964b9b 407extern struct static_key memcg_kmem_enabled_key;
749c5415
GC
408
409extern int memcg_limited_groups_array_size;
ebe945c2
GC
410
411/*
412 * Helper macro to loop through all memcg-specific caches. Callers must still
413 * check if the cache is valid (it is either valid or NULL).
414 * the slab_mutex must be held when looping through those caches
415 */
749c5415 416#define for_each_memcg_cache_index(_idx) \
91c777d8 417 for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++)
749c5415 418
7ae1e1d0
GC
419static inline bool memcg_kmem_enabled(void)
420{
a8964b9b 421 return static_key_false(&memcg_kmem_enabled_key);
7ae1e1d0
GC
422}
423
424/*
425 * In general, we'll do everything in our power to not incur in any overhead
426 * for non-memcg users for the kmem functions. Not even a function call, if we
427 * can avoid it.
428 *
429 * Therefore, we'll inline all those functions so that in the best case, we'll
430 * see that kmemcg is off for everybody and proceed quickly. If it is on,
431 * we'll still do most of the flag checking inline. We check a lot of
432 * conditions, but because they are pretty simple, they are expected to be
433 * fast.
434 */
435bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg,
436 int order);
437void __memcg_kmem_commit_charge(struct page *page,
438 struct mem_cgroup *memcg, int order);
439void __memcg_kmem_uncharge_pages(struct page *page, int order);
440
2633d7a0 441int memcg_cache_id(struct mem_cgroup *memcg);
5722d094 442
55007d84 443void memcg_update_array_size(int num_groups);
d7f25f8a
GC
444
445struct kmem_cache *
446__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
447
c67a8a68
VD
448int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order);
449void __memcg_uncharge_slab(struct kmem_cache *cachep, int order);
5dfb4175 450
776ed0f0 451int __memcg_cleanup_cache_params(struct kmem_cache *s);
1f458cbf 452
7ae1e1d0
GC
453/**
454 * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
455 * @gfp: the gfp allocation flags.
456 * @memcg: a pointer to the memcg this was charged against.
457 * @order: allocation order.
458 *
459 * returns true if the memcg where the current task belongs can hold this
460 * allocation.
461 *
462 * We return true automatically if this allocation is not to be accounted to
463 * any memcg.
464 */
465static inline bool
466memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
467{
468 if (!memcg_kmem_enabled())
469 return true;
470
471 /*
472 * __GFP_NOFAIL allocations will move on even if charging is not
473 * possible. Therefore we don't even try, and have this allocation
474 * unaccounted. We could in theory charge it with
475 * res_counter_charge_nofail, but we hope those allocations are rare,
476 * and won't be worth the trouble.
477 */
52383431 478 if (gfp & __GFP_NOFAIL)
7ae1e1d0
GC
479 return true;
480 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
481 return true;
482
483 /* If the test is dying, just let it go. */
484 if (unlikely(fatal_signal_pending(current)))
485 return true;
486
487 return __memcg_kmem_newpage_charge(gfp, memcg, order);
488}
489
490/**
491 * memcg_kmem_uncharge_pages: uncharge pages from memcg
492 * @page: pointer to struct page being freed
493 * @order: allocation order.
494 *
495 * there is no need to specify memcg here, since it is embedded in page_cgroup
496 */
497static inline void
498memcg_kmem_uncharge_pages(struct page *page, int order)
499{
500 if (memcg_kmem_enabled())
501 __memcg_kmem_uncharge_pages(page, order);
502}
503
504/**
505 * memcg_kmem_commit_charge: embeds correct memcg in a page
506 * @page: pointer to struct page recently allocated
507 * @memcg: the memcg structure we charged against
508 * @order: allocation order.
509 *
510 * Needs to be called after memcg_kmem_newpage_charge, regardless of success or
511 * failure of the allocation. if @page is NULL, this function will revert the
512 * charges. Otherwise, it will commit the memcg given by @memcg to the
513 * corresponding page_cgroup.
514 */
515static inline void
516memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
517{
518 if (memcg_kmem_enabled() && memcg)
519 __memcg_kmem_commit_charge(page, memcg, order);
520}
521
d7f25f8a
GC
522/**
523 * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
524 * @cachep: the original global kmem cache
525 * @gfp: allocation flags.
526 *
5dfb4175 527 * All memory allocated from a per-memcg cache is charged to the owner memcg.
d7f25f8a
GC
528 */
529static __always_inline struct kmem_cache *
530memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
531{
532 if (!memcg_kmem_enabled())
533 return cachep;
534 if (gfp & __GFP_NOFAIL)
535 return cachep;
536 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
537 return cachep;
538 if (unlikely(fatal_signal_pending(current)))
539 return cachep;
540
541 return __memcg_kmem_get_cache(cachep, gfp);
542}
7ae1e1d0 543#else
749c5415
GC
544#define for_each_memcg_cache_index(_idx) \
545 for (; NULL; )
546
b9ce5ef4
GC
547static inline bool memcg_kmem_enabled(void)
548{
549 return false;
550}
551
7ae1e1d0
GC
552static inline bool
553memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
554{
555 return true;
556}
557
558static inline void memcg_kmem_uncharge_pages(struct page *page, int order)
559{
560}
561
562static inline void
563memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
564{
565}
2633d7a0
GC
566
567static inline int memcg_cache_id(struct mem_cgroup *memcg)
568{
569 return -1;
570}
571
d7f25f8a
GC
572static inline struct kmem_cache *
573memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
574{
575 return cachep;
576}
7ae1e1d0 577#endif /* CONFIG_MEMCG_KMEM */
8cdea7c0
BS
578#endif /* _LINUX_MEMCONTROL_H */
579
This page took 0.741937 seconds and 5 git commands to generate.