1 /* memcontrol.h - Memory Controller
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #ifndef _LINUX_MEMCONTROL_H
21 #define _LINUX_MEMCONTROL_H
22 #include <linux/cgroup.h>
23 #include <linux/vm_event_item.h>
24 #include <linux/hardirq.h>
25 #include <linux/jump_label.h>
34 * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c,
35 * These two lists should keep in accord with each other.
37 enum mem_cgroup_stat_index
{
39 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
41 MEM_CGROUP_STAT_CACHE
, /* # of pages charged as cache */
42 MEM_CGROUP_STAT_RSS
, /* # of pages charged as anon rss */
43 MEM_CGROUP_STAT_RSS_HUGE
, /* # of pages charged as anon huge */
44 MEM_CGROUP_STAT_FILE_MAPPED
, /* # of pages charged as file rss */
45 MEM_CGROUP_STAT_WRITEBACK
, /* # of pages under writeback */
46 MEM_CGROUP_STAT_SWAP
, /* # of pages, swapped out */
47 MEM_CGROUP_STAT_NSTATS
,
50 struct mem_cgroup_reclaim_cookie
{
53 unsigned int generation
;
58 * All "charge" functions with gfp_mask should use GFP_KERNEL or
59 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
60 * alloc memory but reclaims memory from all available zones. So, "where I want
61 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
62 * available but adding a rule is better. charge functions' gfp_mask should
63 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
65 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
68 extern int mem_cgroup_newpage_charge(struct page
*page
, struct mm_struct
*mm
,
70 /* for swap handling */
71 extern int mem_cgroup_try_charge_swapin(struct mm_struct
*mm
,
72 struct page
*page
, gfp_t mask
, struct mem_cgroup
**memcgp
);
73 extern void mem_cgroup_commit_charge_swapin(struct page
*page
,
74 struct mem_cgroup
*memcg
);
75 extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup
*memcg
);
77 extern int mem_cgroup_cache_charge(struct page
*page
, struct mm_struct
*mm
,
80 struct lruvec
*mem_cgroup_zone_lruvec(struct zone
*, struct mem_cgroup
*);
81 struct lruvec
*mem_cgroup_page_lruvec(struct page
*, struct zone
*);
83 /* For coalescing uncharge for reducing memcg' overhead*/
84 extern void mem_cgroup_uncharge_start(void);
85 extern void mem_cgroup_uncharge_end(void);
87 extern void mem_cgroup_uncharge_page(struct page
*page
);
88 extern void mem_cgroup_uncharge_cache_page(struct page
*page
);
90 bool __mem_cgroup_same_or_subtree(const struct mem_cgroup
*root_memcg
,
91 struct mem_cgroup
*memcg
);
92 bool task_in_mem_cgroup(struct task_struct
*task
,
93 const struct mem_cgroup
*memcg
);
95 extern struct mem_cgroup
*try_get_mem_cgroup_from_page(struct page
*page
);
96 extern struct mem_cgroup
*mem_cgroup_from_task(struct task_struct
*p
);
97 extern struct mem_cgroup
*try_get_mem_cgroup_from_mm(struct mm_struct
*mm
);
99 extern struct mem_cgroup
*parent_mem_cgroup(struct mem_cgroup
*memcg
);
100 extern struct mem_cgroup
*mem_cgroup_from_css(struct cgroup_subsys_state
*css
);
103 bool mm_match_cgroup(const struct mm_struct
*mm
, const struct mem_cgroup
*memcg
)
105 struct mem_cgroup
*task_memcg
;
109 task_memcg
= mem_cgroup_from_task(rcu_dereference(mm
->owner
));
110 match
= __mem_cgroup_same_or_subtree(memcg
, task_memcg
);
115 extern struct cgroup_subsys_state
*mem_cgroup_css(struct mem_cgroup
*memcg
);
118 mem_cgroup_prepare_migration(struct page
*page
, struct page
*newpage
,
119 struct mem_cgroup
**memcgp
);
120 extern void mem_cgroup_end_migration(struct mem_cgroup
*memcg
,
121 struct page
*oldpage
, struct page
*newpage
, bool migration_ok
);
123 struct mem_cgroup
*mem_cgroup_iter(struct mem_cgroup
*,
125 struct mem_cgroup_reclaim_cookie
*);
126 void mem_cgroup_iter_break(struct mem_cgroup
*, struct mem_cgroup
*);
129 * For memory reclaim.
131 int mem_cgroup_inactive_anon_is_low(struct lruvec
*lruvec
);
132 int mem_cgroup_select_victim_node(struct mem_cgroup
*memcg
);
133 unsigned long mem_cgroup_get_lru_size(struct lruvec
*lruvec
, enum lru_list
);
134 void mem_cgroup_update_lru_size(struct lruvec
*, enum lru_list
, int);
135 extern void mem_cgroup_print_oom_info(struct mem_cgroup
*memcg
,
136 struct task_struct
*p
);
137 extern void mem_cgroup_replace_page_cache(struct page
*oldpage
,
138 struct page
*newpage
);
141 * mem_cgroup_toggle_oom - toggle the memcg OOM killer for the current task
142 * @new: true to enable, false to disable
144 * Toggle whether a failed memcg charge should invoke the OOM killer
145 * or just return -ENOMEM. Returns the previous toggle state.
147 * NOTE: Any path that enables the OOM killer before charging must
148 * call mem_cgroup_oom_synchronize() afterward to finalize the
149 * OOM handling and clean up.
151 static inline bool mem_cgroup_toggle_oom(bool new)
155 old
= current
->memcg_oom
.may_oom
;
156 current
->memcg_oom
.may_oom
= new;
161 static inline void mem_cgroup_enable_oom(void)
163 bool old
= mem_cgroup_toggle_oom(true);
165 WARN_ON(old
== true);
168 static inline void mem_cgroup_disable_oom(void)
170 bool old
= mem_cgroup_toggle_oom(false);
172 WARN_ON(old
== false);
175 static inline bool task_in_memcg_oom(struct task_struct
*p
)
177 return p
->memcg_oom
.in_memcg_oom
;
180 bool mem_cgroup_oom_synchronize(void);
182 #ifdef CONFIG_MEMCG_SWAP
183 extern int do_swap_account
;
186 static inline bool mem_cgroup_disabled(void)
188 if (mem_cgroup_subsys
.disabled
)
193 void __mem_cgroup_begin_update_page_stat(struct page
*page
, bool *locked
,
194 unsigned long *flags
);
196 extern atomic_t memcg_moving
;
198 static inline void mem_cgroup_begin_update_page_stat(struct page
*page
,
199 bool *locked
, unsigned long *flags
)
201 if (mem_cgroup_disabled())
205 if (atomic_read(&memcg_moving
))
206 __mem_cgroup_begin_update_page_stat(page
, locked
, flags
);
209 void __mem_cgroup_end_update_page_stat(struct page
*page
,
210 unsigned long *flags
);
211 static inline void mem_cgroup_end_update_page_stat(struct page
*page
,
212 bool *locked
, unsigned long *flags
)
214 if (mem_cgroup_disabled())
217 __mem_cgroup_end_update_page_stat(page
, flags
);
221 void mem_cgroup_update_page_stat(struct page
*page
,
222 enum mem_cgroup_stat_index idx
,
225 static inline void mem_cgroup_inc_page_stat(struct page
*page
,
226 enum mem_cgroup_stat_index idx
)
228 mem_cgroup_update_page_stat(page
, idx
, 1);
231 static inline void mem_cgroup_dec_page_stat(struct page
*page
,
232 enum mem_cgroup_stat_index idx
)
234 mem_cgroup_update_page_stat(page
, idx
, -1);
237 unsigned long mem_cgroup_soft_limit_reclaim(struct zone
*zone
, int order
,
239 unsigned long *total_scanned
);
241 void __mem_cgroup_count_vm_event(struct mm_struct
*mm
, enum vm_event_item idx
);
242 static inline void mem_cgroup_count_vm_event(struct mm_struct
*mm
,
243 enum vm_event_item idx
)
245 if (mem_cgroup_disabled())
247 __mem_cgroup_count_vm_event(mm
, idx
);
249 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
250 void mem_cgroup_split_huge_fixup(struct page
*head
);
253 #ifdef CONFIG_DEBUG_VM
254 bool mem_cgroup_bad_page_check(struct page
*page
);
255 void mem_cgroup_print_bad_page(struct page
*page
);
257 #else /* CONFIG_MEMCG */
260 static inline int mem_cgroup_newpage_charge(struct page
*page
,
261 struct mm_struct
*mm
, gfp_t gfp_mask
)
266 static inline int mem_cgroup_cache_charge(struct page
*page
,
267 struct mm_struct
*mm
, gfp_t gfp_mask
)
272 static inline int mem_cgroup_try_charge_swapin(struct mm_struct
*mm
,
273 struct page
*page
, gfp_t gfp_mask
, struct mem_cgroup
**memcgp
)
278 static inline void mem_cgroup_commit_charge_swapin(struct page
*page
,
279 struct mem_cgroup
*memcg
)
283 static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup
*memcg
)
287 static inline void mem_cgroup_uncharge_start(void)
291 static inline void mem_cgroup_uncharge_end(void)
295 static inline void mem_cgroup_uncharge_page(struct page
*page
)
299 static inline void mem_cgroup_uncharge_cache_page(struct page
*page
)
303 static inline struct lruvec
*mem_cgroup_zone_lruvec(struct zone
*zone
,
304 struct mem_cgroup
*memcg
)
306 return &zone
->lruvec
;
309 static inline struct lruvec
*mem_cgroup_page_lruvec(struct page
*page
,
312 return &zone
->lruvec
;
315 static inline struct mem_cgroup
*try_get_mem_cgroup_from_page(struct page
*page
)
320 static inline struct mem_cgroup
*try_get_mem_cgroup_from_mm(struct mm_struct
*mm
)
325 static inline bool mm_match_cgroup(struct mm_struct
*mm
,
326 struct mem_cgroup
*memcg
)
331 static inline bool task_in_mem_cgroup(struct task_struct
*task
,
332 const struct mem_cgroup
*memcg
)
337 static inline struct cgroup_subsys_state
338 *mem_cgroup_css(struct mem_cgroup
*memcg
)
344 mem_cgroup_prepare_migration(struct page
*page
, struct page
*newpage
,
345 struct mem_cgroup
**memcgp
)
349 static inline void mem_cgroup_end_migration(struct mem_cgroup
*memcg
,
350 struct page
*oldpage
, struct page
*newpage
, bool migration_ok
)
354 static inline struct mem_cgroup
*
355 mem_cgroup_iter(struct mem_cgroup
*root
,
356 struct mem_cgroup
*prev
,
357 struct mem_cgroup_reclaim_cookie
*reclaim
)
362 static inline void mem_cgroup_iter_break(struct mem_cgroup
*root
,
363 struct mem_cgroup
*prev
)
367 static inline bool mem_cgroup_disabled(void)
373 mem_cgroup_inactive_anon_is_low(struct lruvec
*lruvec
)
378 static inline unsigned long
379 mem_cgroup_get_lru_size(struct lruvec
*lruvec
, enum lru_list lru
)
385 mem_cgroup_update_lru_size(struct lruvec
*lruvec
, enum lru_list lru
,
391 mem_cgroup_print_oom_info(struct mem_cgroup
*memcg
, struct task_struct
*p
)
395 static inline void mem_cgroup_begin_update_page_stat(struct page
*page
,
396 bool *locked
, unsigned long *flags
)
400 static inline void mem_cgroup_end_update_page_stat(struct page
*page
,
401 bool *locked
, unsigned long *flags
)
405 static inline bool mem_cgroup_toggle_oom(bool new)
410 static inline void mem_cgroup_enable_oom(void)
414 static inline void mem_cgroup_disable_oom(void)
418 static inline bool task_in_memcg_oom(struct task_struct
*p
)
423 static inline bool mem_cgroup_oom_synchronize(void)
428 static inline void mem_cgroup_inc_page_stat(struct page
*page
,
429 enum mem_cgroup_stat_index idx
)
433 static inline void mem_cgroup_dec_page_stat(struct page
*page
,
434 enum mem_cgroup_stat_index idx
)
439 unsigned long mem_cgroup_soft_limit_reclaim(struct zone
*zone
, int order
,
441 unsigned long *total_scanned
)
446 static inline void mem_cgroup_split_huge_fixup(struct page
*head
)
451 void mem_cgroup_count_vm_event(struct mm_struct
*mm
, enum vm_event_item idx
)
454 static inline void mem_cgroup_replace_page_cache(struct page
*oldpage
,
455 struct page
*newpage
)
458 #endif /* CONFIG_MEMCG */
460 #if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM)
462 mem_cgroup_bad_page_check(struct page
*page
)
468 mem_cgroup_print_bad_page(struct page
*page
)
480 #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
481 void sock_update_memcg(struct sock
*sk
);
482 void sock_release_memcg(struct sock
*sk
);
484 static inline void sock_update_memcg(struct sock
*sk
)
487 static inline void sock_release_memcg(struct sock
*sk
)
490 #endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */
492 #ifdef CONFIG_MEMCG_KMEM
493 extern struct static_key memcg_kmem_enabled_key
;
495 extern int memcg_limited_groups_array_size
;
498 * Helper macro to loop through all memcg-specific caches. Callers must still
499 * check if the cache is valid (it is either valid or NULL).
500 * the slab_mutex must be held when looping through those caches
502 #define for_each_memcg_cache_index(_idx) \
503 for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++)
505 static inline bool memcg_kmem_enabled(void)
507 return static_key_false(&memcg_kmem_enabled_key
);
511 * In general, we'll do everything in our power to not incur in any overhead
512 * for non-memcg users for the kmem functions. Not even a function call, if we
515 * Therefore, we'll inline all those functions so that in the best case, we'll
516 * see that kmemcg is off for everybody and proceed quickly. If it is on,
517 * we'll still do most of the flag checking inline. We check a lot of
518 * conditions, but because they are pretty simple, they are expected to be
521 bool __memcg_kmem_newpage_charge(gfp_t gfp
, struct mem_cgroup
**memcg
,
523 void __memcg_kmem_commit_charge(struct page
*page
,
524 struct mem_cgroup
*memcg
, int order
);
525 void __memcg_kmem_uncharge_pages(struct page
*page
, int order
);
527 int memcg_cache_id(struct mem_cgroup
*memcg
);
528 int memcg_register_cache(struct mem_cgroup
*memcg
, struct kmem_cache
*s
,
529 struct kmem_cache
*root_cache
);
530 void memcg_release_cache(struct kmem_cache
*cachep
);
531 void memcg_cache_list_add(struct mem_cgroup
*memcg
, struct kmem_cache
*cachep
);
533 int memcg_update_cache_size(struct kmem_cache
*s
, int num_groups
);
534 void memcg_update_array_size(int num_groups
);
537 __memcg_kmem_get_cache(struct kmem_cache
*cachep
, gfp_t gfp
);
539 void mem_cgroup_destroy_cache(struct kmem_cache
*cachep
);
540 void kmem_cache_destroy_memcg_children(struct kmem_cache
*s
);
543 * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
544 * @gfp: the gfp allocation flags.
545 * @memcg: a pointer to the memcg this was charged against.
546 * @order: allocation order.
548 * returns true if the memcg where the current task belongs can hold this
551 * We return true automatically if this allocation is not to be accounted to
555 memcg_kmem_newpage_charge(gfp_t gfp
, struct mem_cgroup
**memcg
, int order
)
557 if (!memcg_kmem_enabled())
561 * __GFP_NOFAIL allocations will move on even if charging is not
562 * possible. Therefore we don't even try, and have this allocation
563 * unaccounted. We could in theory charge it with
564 * res_counter_charge_nofail, but we hope those allocations are rare,
565 * and won't be worth the trouble.
567 if (!(gfp
& __GFP_KMEMCG
) || (gfp
& __GFP_NOFAIL
))
569 if (in_interrupt() || (!current
->mm
) || (current
->flags
& PF_KTHREAD
))
572 /* If the test is dying, just let it go. */
573 if (unlikely(fatal_signal_pending(current
)))
576 return __memcg_kmem_newpage_charge(gfp
, memcg
, order
);
580 * memcg_kmem_uncharge_pages: uncharge pages from memcg
581 * @page: pointer to struct page being freed
582 * @order: allocation order.
584 * there is no need to specify memcg here, since it is embedded in page_cgroup
587 memcg_kmem_uncharge_pages(struct page
*page
, int order
)
589 if (memcg_kmem_enabled())
590 __memcg_kmem_uncharge_pages(page
, order
);
594 * memcg_kmem_commit_charge: embeds correct memcg in a page
595 * @page: pointer to struct page recently allocated
596 * @memcg: the memcg structure we charged against
597 * @order: allocation order.
599 * Needs to be called after memcg_kmem_newpage_charge, regardless of success or
600 * failure of the allocation. if @page is NULL, this function will revert the
601 * charges. Otherwise, it will commit the memcg given by @memcg to the
602 * corresponding page_cgroup.
605 memcg_kmem_commit_charge(struct page
*page
, struct mem_cgroup
*memcg
, int order
)
607 if (memcg_kmem_enabled() && memcg
)
608 __memcg_kmem_commit_charge(page
, memcg
, order
);
612 * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
613 * @cachep: the original global kmem cache
614 * @gfp: allocation flags.
616 * This function assumes that the task allocating, which determines the memcg
617 * in the page allocator, belongs to the same cgroup throughout the whole
618 * process. Misacounting can happen if the task calls memcg_kmem_get_cache()
619 * while belonging to a cgroup, and later on changes. This is considered
620 * acceptable, and should only happen upon task migration.
622 * Before the cache is created by the memcg core, there is also a possible
623 * imbalance: the task belongs to a memcg, but the cache being allocated from
624 * is the global cache, since the child cache is not yet guaranteed to be
625 * ready. This case is also fine, since in this case the GFP_KMEMCG will not be
626 * passed and the page allocator will not attempt any cgroup accounting.
628 static __always_inline
struct kmem_cache
*
629 memcg_kmem_get_cache(struct kmem_cache
*cachep
, gfp_t gfp
)
631 if (!memcg_kmem_enabled())
633 if (gfp
& __GFP_NOFAIL
)
635 if (in_interrupt() || (!current
->mm
) || (current
->flags
& PF_KTHREAD
))
637 if (unlikely(fatal_signal_pending(current
)))
640 return __memcg_kmem_get_cache(cachep
, gfp
);
643 #define for_each_memcg_cache_index(_idx) \
646 static inline bool memcg_kmem_enabled(void)
652 memcg_kmem_newpage_charge(gfp_t gfp
, struct mem_cgroup
**memcg
, int order
)
657 static inline void memcg_kmem_uncharge_pages(struct page
*page
, int order
)
662 memcg_kmem_commit_charge(struct page
*page
, struct mem_cgroup
*memcg
, int order
)
666 static inline int memcg_cache_id(struct mem_cgroup
*memcg
)
672 memcg_register_cache(struct mem_cgroup
*memcg
, struct kmem_cache
*s
,
673 struct kmem_cache
*root_cache
)
678 static inline void memcg_release_cache(struct kmem_cache
*cachep
)
682 static inline void memcg_cache_list_add(struct mem_cgroup
*memcg
,
683 struct kmem_cache
*s
)
687 static inline struct kmem_cache
*
688 memcg_kmem_get_cache(struct kmem_cache
*cachep
, gfp_t gfp
)
693 static inline void kmem_cache_destroy_memcg_children(struct kmem_cache
*s
)
696 #endif /* CONFIG_MEMCG_KMEM */
697 #endif /* _LINUX_MEMCONTROL_H */