memcg: use css_get/put when charging/uncharging kmem
[deliverable/linux.git] / mm / memcontrol.c
CommitLineData
8cdea7c0
BS
1/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
78fb7466
PE
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
2e72b634
KS
9 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
7ae1e1d0
GC
13 * Kernel Memory Controller
14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
15 * Authors: Glauber Costa and Suleiman Souhlal
16 *
8cdea7c0
BS
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 */
27
28#include <linux/res_counter.h>
29#include <linux/memcontrol.h>
30#include <linux/cgroup.h>
78fb7466 31#include <linux/mm.h>
4ffef5fe 32#include <linux/hugetlb.h>
d13d1443 33#include <linux/pagemap.h>
d52aa412 34#include <linux/smp.h>
8a9f3ccd 35#include <linux/page-flags.h>
66e1707b 36#include <linux/backing-dev.h>
8a9f3ccd
BS
37#include <linux/bit_spinlock.h>
38#include <linux/rcupdate.h>
e222432b 39#include <linux/limits.h>
b9e15baf 40#include <linux/export.h>
8c7c6e34 41#include <linux/mutex.h>
f64c3f54 42#include <linux/rbtree.h>
b6ac57d5 43#include <linux/slab.h>
66e1707b 44#include <linux/swap.h>
02491447 45#include <linux/swapops.h>
66e1707b 46#include <linux/spinlock.h>
2e72b634
KS
47#include <linux/eventfd.h>
48#include <linux/sort.h>
66e1707b 49#include <linux/fs.h>
d2ceb9b7 50#include <linux/seq_file.h>
33327948 51#include <linux/vmalloc.h>
70ddf637 52#include <linux/vmpressure.h>
b69408e8 53#include <linux/mm_inline.h>
52d4b9ac 54#include <linux/page_cgroup.h>
cdec2e42 55#include <linux/cpu.h>
158e0a2d 56#include <linux/oom.h>
08e552c6 57#include "internal.h"
d1a4c0b3 58#include <net/sock.h>
4bd2c1ee 59#include <net/ip.h>
d1a4c0b3 60#include <net/tcp_memcontrol.h>
8cdea7c0 61
8697d331
BS
62#include <asm/uaccess.h>
63
cc8e970c
KM
64#include <trace/events/vmscan.h>
65
a181b0e8 66struct cgroup_subsys mem_cgroup_subsys __read_mostly;
68ae564b
DR
67EXPORT_SYMBOL(mem_cgroup_subsys);
68
a181b0e8 69#define MEM_CGROUP_RECLAIM_RETRIES 5
6bbda35c 70static struct mem_cgroup *root_mem_cgroup __read_mostly;
8cdea7c0 71
c255a458 72#ifdef CONFIG_MEMCG_SWAP
338c8431 73/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
c077719b 74int do_swap_account __read_mostly;
a42c390c
MH
75
76/* for remember boot option*/
c255a458 77#ifdef CONFIG_MEMCG_SWAP_ENABLED
a42c390c
MH
78static int really_do_swap_account __initdata = 1;
79#else
80static int really_do_swap_account __initdata = 0;
81#endif
82
c077719b 83#else
a0db00fc 84#define do_swap_account 0
c077719b
KH
85#endif
86
87
d52aa412
KH
88/*
89 * Statistics for memory cgroup.
90 */
91enum mem_cgroup_stat_index {
92 /*
93 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
94 */
b070e65c
DR
95 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
96 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
97 MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */
98 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
99 MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
d52aa412
KH
100 MEM_CGROUP_STAT_NSTATS,
101};
102
af7c4b0e
JW
103static const char * const mem_cgroup_stat_names[] = {
104 "cache",
105 "rss",
b070e65c 106 "rss_huge",
af7c4b0e
JW
107 "mapped_file",
108 "swap",
109};
110
e9f8974f
JW
111enum mem_cgroup_events_index {
112 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
113 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
456f998e
YH
114 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
115 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
e9f8974f
JW
116 MEM_CGROUP_EVENTS_NSTATS,
117};
af7c4b0e
JW
118
119static const char * const mem_cgroup_events_names[] = {
120 "pgpgin",
121 "pgpgout",
122 "pgfault",
123 "pgmajfault",
124};
125
58cf188e
SZ
126static const char * const mem_cgroup_lru_names[] = {
127 "inactive_anon",
128 "active_anon",
129 "inactive_file",
130 "active_file",
131 "unevictable",
132};
133
7a159cc9
JW
134/*
135 * Per memcg event counter is incremented at every pagein/pageout. With THP,
136 * it will be incremated by the number of pages. This counter is used for
137 * for trigger some periodic events. This is straightforward and better
138 * than using jiffies etc. to handle periodic memcg event.
139 */
140enum mem_cgroup_events_target {
141 MEM_CGROUP_TARGET_THRESH,
142 MEM_CGROUP_TARGET_SOFTLIMIT,
453a9bf3 143 MEM_CGROUP_TARGET_NUMAINFO,
7a159cc9
JW
144 MEM_CGROUP_NTARGETS,
145};
a0db00fc
KS
146#define THRESHOLDS_EVENTS_TARGET 128
147#define SOFTLIMIT_EVENTS_TARGET 1024
148#define NUMAINFO_EVENTS_TARGET 1024
e9f8974f 149
d52aa412 150struct mem_cgroup_stat_cpu {
7a159cc9 151 long count[MEM_CGROUP_STAT_NSTATS];
e9f8974f 152 unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
13114716 153 unsigned long nr_page_events;
7a159cc9 154 unsigned long targets[MEM_CGROUP_NTARGETS];
d52aa412
KH
155};
156
527a5ec9 157struct mem_cgroup_reclaim_iter {
5f578161
MH
158 /*
159 * last scanned hierarchy member. Valid only if last_dead_count
160 * matches memcg->dead_count of the hierarchy root group.
161 */
542f85f9 162 struct mem_cgroup *last_visited;
5f578161
MH
163 unsigned long last_dead_count;
164
527a5ec9
JW
165 /* scan generation, increased every round-trip */
166 unsigned int generation;
167};
168
6d12e2d8
KH
169/*
170 * per-zone information in memory controller.
171 */
6d12e2d8 172struct mem_cgroup_per_zone {
6290df54 173 struct lruvec lruvec;
1eb49272 174 unsigned long lru_size[NR_LRU_LISTS];
3e2f41f1 175
527a5ec9
JW
176 struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
177
f64c3f54
BS
178 struct rb_node tree_node; /* RB tree node */
179 unsigned long long usage_in_excess;/* Set to the value by which */
180 /* the soft limit is exceeded*/
181 bool on_tree;
d79154bb 182 struct mem_cgroup *memcg; /* Back pointer, we cannot */
4e416953 183 /* use container_of */
6d12e2d8 184};
6d12e2d8
KH
185
186struct mem_cgroup_per_node {
187 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
188};
189
f64c3f54
BS
190/*
191 * Cgroups above their limits are maintained in a RB-Tree, independent of
192 * their hierarchy representation
193 */
194
195struct mem_cgroup_tree_per_zone {
196 struct rb_root rb_root;
197 spinlock_t lock;
198};
199
200struct mem_cgroup_tree_per_node {
201 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
202};
203
204struct mem_cgroup_tree {
205 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
206};
207
208static struct mem_cgroup_tree soft_limit_tree __read_mostly;
209
2e72b634
KS
210struct mem_cgroup_threshold {
211 struct eventfd_ctx *eventfd;
212 u64 threshold;
213};
214
9490ff27 215/* For threshold */
2e72b634 216struct mem_cgroup_threshold_ary {
748dad36 217 /* An array index points to threshold just below or equal to usage. */
5407a562 218 int current_threshold;
2e72b634
KS
219 /* Size of entries[] */
220 unsigned int size;
221 /* Array of thresholds */
222 struct mem_cgroup_threshold entries[0];
223};
2c488db2
KS
224
225struct mem_cgroup_thresholds {
226 /* Primary thresholds array */
227 struct mem_cgroup_threshold_ary *primary;
228 /*
229 * Spare threshold array.
230 * This is needed to make mem_cgroup_unregister_event() "never fail".
231 * It must be able to store at least primary->size - 1 entries.
232 */
233 struct mem_cgroup_threshold_ary *spare;
234};
235
9490ff27
KH
236/* for OOM */
237struct mem_cgroup_eventfd_list {
238 struct list_head list;
239 struct eventfd_ctx *eventfd;
240};
2e72b634 241
c0ff4b85
R
242static void mem_cgroup_threshold(struct mem_cgroup *memcg);
243static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
2e72b634 244
8cdea7c0
BS
245/*
246 * The memory controller data structure. The memory controller controls both
247 * page cache and RSS per cgroup. We would eventually like to provide
248 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
249 * to help the administrator determine what knobs to tune.
250 *
251 * TODO: Add a water mark for the memory controller. Reclaim will begin when
8a9f3ccd
BS
252 * we hit the water mark. May be even add a low water mark, such that
253 * no reclaim occurs from a cgroup at it's low water mark, this is
254 * a feature that will be implemented much later in the future.
8cdea7c0
BS
255 */
256struct mem_cgroup {
257 struct cgroup_subsys_state css;
258 /*
259 * the counter to account for memory usage
260 */
261 struct res_counter res;
59927fb9 262
70ddf637
AV
263 /* vmpressure notifications */
264 struct vmpressure vmpressure;
265
59927fb9
HD
266 union {
267 /*
268 * the counter to account for mem+swap usage.
269 */
270 struct res_counter memsw;
271
272 /*
273 * rcu_freeing is used only when freeing struct mem_cgroup,
274 * so put it into a union to avoid wasting more memory.
275 * It must be disjoint from the css field. It could be
276 * in a union with the res field, but res plays a much
277 * larger part in mem_cgroup life than memsw, and might
278 * be of interest, even at time of free, when debugging.
279 * So share rcu_head with the less interesting memsw.
280 */
281 struct rcu_head rcu_freeing;
282 /*
3afe36b1
GC
283 * We also need some space for a worker in deferred freeing.
284 * By the time we call it, rcu_freeing is no longer in use.
59927fb9
HD
285 */
286 struct work_struct work_freeing;
287 };
288
510fc4e1
GC
289 /*
290 * the counter to account for kernel memory usage.
291 */
292 struct res_counter kmem;
18f59ea7
BS
293 /*
294 * Should the accounting and control be hierarchical, per subtree?
295 */
296 bool use_hierarchy;
510fc4e1 297 unsigned long kmem_account_flags; /* See KMEM_ACCOUNTED_*, below */
79dfdacc
MH
298
299 bool oom_lock;
300 atomic_t under_oom;
301
8c7c6e34 302 atomic_t refcnt;
14797e23 303
1f4c025b 304 int swappiness;
3c11ecf4
KH
305 /* OOM-Killer disable */
306 int oom_kill_disable;
a7885eb8 307
22a668d7
KH
308 /* set when res.limit == memsw.limit */
309 bool memsw_is_minimum;
310
2e72b634
KS
311 /* protect arrays of thresholds */
312 struct mutex thresholds_lock;
313
314 /* thresholds for memory usage. RCU-protected */
2c488db2 315 struct mem_cgroup_thresholds thresholds;
907860ed 316
2e72b634 317 /* thresholds for mem+swap usage. RCU-protected */
2c488db2 318 struct mem_cgroup_thresholds memsw_thresholds;
907860ed 319
9490ff27
KH
320 /* For oom notifier event fd */
321 struct list_head oom_notify;
185efc0f 322
7dc74be0
DN
323 /*
324 * Should we move charges of a task when a task is moved into this
325 * mem_cgroup ? And what type of charges should we move ?
326 */
327 unsigned long move_charge_at_immigrate;
619d094b
KH
328 /*
329 * set > 0 if pages under this cgroup are moving to other cgroup.
330 */
331 atomic_t moving_account;
312734c0
KH
332 /* taken only while moving_account > 0 */
333 spinlock_t move_lock;
d52aa412 334 /*
c62b1a3b 335 * percpu counter.
d52aa412 336 */
3a7951b4 337 struct mem_cgroup_stat_cpu __percpu *stat;
711d3d2c
KH
338 /*
339 * used when a cpu is offlined or other synchronizations
340 * See mem_cgroup_read_stat().
341 */
342 struct mem_cgroup_stat_cpu nocpu_base;
343 spinlock_t pcp_counter_lock;
d1a4c0b3 344
5f578161 345 atomic_t dead_count;
4bd2c1ee 346#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
d1a4c0b3
GC
347 struct tcp_memcontrol tcp_mem;
348#endif
2633d7a0
GC
349#if defined(CONFIG_MEMCG_KMEM)
350 /* analogous to slab_common's slab_caches list. per-memcg */
351 struct list_head memcg_slab_caches;
352 /* Not a spinlock, we can take a lot of time walking the list */
353 struct mutex slab_caches_mutex;
354 /* Index in the kmem_cache->memcg_params->memcg_caches array */
355 int kmemcg_id;
356#endif
45cf7ebd
GC
357
358 int last_scanned_node;
359#if MAX_NUMNODES > 1
360 nodemask_t scan_nodes;
361 atomic_t numainfo_events;
362 atomic_t numainfo_updating;
363#endif
70ddf637 364
54f72fe0
JW
365 struct mem_cgroup_per_node *nodeinfo[0];
366 /* WARNING: nodeinfo must be the last member here */
8cdea7c0
BS
367};
368
45cf7ebd
GC
369static size_t memcg_size(void)
370{
371 return sizeof(struct mem_cgroup) +
372 nr_node_ids * sizeof(struct mem_cgroup_per_node);
373}
374
510fc4e1
GC
375/* internal only representation about the status of kmem accounting. */
376enum {
377 KMEM_ACCOUNTED_ACTIVE = 0, /* accounted by this cgroup itself */
a8964b9b 378 KMEM_ACCOUNTED_ACTIVATED, /* static key enabled. */
7de37682 379 KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */
510fc4e1
GC
380};
381
a8964b9b
GC
382/* We account when limit is on, but only after call sites are patched */
383#define KMEM_ACCOUNTED_MASK \
384 ((1 << KMEM_ACCOUNTED_ACTIVE) | (1 << KMEM_ACCOUNTED_ACTIVATED))
510fc4e1
GC
385
386#ifdef CONFIG_MEMCG_KMEM
387static inline void memcg_kmem_set_active(struct mem_cgroup *memcg)
388{
389 set_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
390}
7de37682
GC
391
392static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
393{
394 return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
395}
396
a8964b9b
GC
397static void memcg_kmem_set_activated(struct mem_cgroup *memcg)
398{
399 set_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags);
400}
401
55007d84
GC
402static void memcg_kmem_clear_activated(struct mem_cgroup *memcg)
403{
404 clear_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags);
405}
406
7de37682
GC
407static void memcg_kmem_mark_dead(struct mem_cgroup *memcg)
408{
10d5ebf4
LZ
409 /*
410 * Our caller must use css_get() first, because memcg_uncharge_kmem()
411 * will call css_put() if it sees the memcg is dead.
412 */
413 smp_wmb();
7de37682
GC
414 if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags))
415 set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags);
416}
417
418static bool memcg_kmem_test_and_clear_dead(struct mem_cgroup *memcg)
419{
420 return test_and_clear_bit(KMEM_ACCOUNTED_DEAD,
421 &memcg->kmem_account_flags);
422}
510fc4e1
GC
423#endif
424
7dc74be0
DN
425/* Stuffs for move charges at task migration. */
426/*
ee5e8472
GC
427 * Types of charges to be moved. "move_charge_at_immitgrate" and
428 * "immigrate_flags" are treated as a left-shifted bitmap of these types.
7dc74be0
DN
429 */
430enum move_type {
4ffef5fe 431 MOVE_CHARGE_TYPE_ANON, /* private anonymous page and swap of it */
87946a72 432 MOVE_CHARGE_TYPE_FILE, /* file page(including tmpfs) and swap of it */
7dc74be0
DN
433 NR_MOVE_TYPE,
434};
435
4ffef5fe
DN
436/* "mc" and its members are protected by cgroup_mutex */
437static struct move_charge_struct {
b1dd693e 438 spinlock_t lock; /* for from, to */
4ffef5fe
DN
439 struct mem_cgroup *from;
440 struct mem_cgroup *to;
ee5e8472 441 unsigned long immigrate_flags;
4ffef5fe 442 unsigned long precharge;
854ffa8d 443 unsigned long moved_charge;
483c30b5 444 unsigned long moved_swap;
8033b97c
DN
445 struct task_struct *moving_task; /* a task moving charges */
446 wait_queue_head_t waitq; /* a waitq for other context */
447} mc = {
2bd9bb20 448 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
8033b97c
DN
449 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
450};
4ffef5fe 451
90254a65
DN
452static bool move_anon(void)
453{
ee5e8472 454 return test_bit(MOVE_CHARGE_TYPE_ANON, &mc.immigrate_flags);
90254a65
DN
455}
456
87946a72
DN
457static bool move_file(void)
458{
ee5e8472 459 return test_bit(MOVE_CHARGE_TYPE_FILE, &mc.immigrate_flags);
87946a72
DN
460}
461
4e416953
BS
462/*
463 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
464 * limit reclaim to prevent infinite loops, if they ever occur.
465 */
a0db00fc
KS
466#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
467#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
4e416953 468
217bc319
KH
469enum charge_type {
470 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
41326c17 471 MEM_CGROUP_CHARGE_TYPE_ANON,
d13d1443 472 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
8a9478ca 473 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
c05555b5
KH
474 NR_CHARGE_TYPE,
475};
476
8c7c6e34 477/* for encoding cft->private value on file */
86ae53e1
GC
478enum res_type {
479 _MEM,
480 _MEMSWAP,
481 _OOM_TYPE,
510fc4e1 482 _KMEM,
86ae53e1
GC
483};
484
a0db00fc
KS
485#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
486#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
8c7c6e34 487#define MEMFILE_ATTR(val) ((val) & 0xffff)
9490ff27
KH
488/* Used for OOM nofiier */
489#define OOM_CONTROL (0)
8c7c6e34 490
75822b44
BS
491/*
492 * Reclaim flags for mem_cgroup_hierarchical_reclaim
493 */
494#define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0
495#define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
496#define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1
497#define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
498
0999821b
GC
499/*
500 * The memcg_create_mutex will be held whenever a new cgroup is created.
501 * As a consequence, any change that needs to protect against new child cgroups
502 * appearing has to hold it as well.
503 */
504static DEFINE_MUTEX(memcg_create_mutex);
505
c0ff4b85
R
506static void mem_cgroup_get(struct mem_cgroup *memcg);
507static void mem_cgroup_put(struct mem_cgroup *memcg);
e1aab161 508
b2145145
WL
509static inline
510struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
511{
512 return container_of(s, struct mem_cgroup, css);
513}
514
70ddf637
AV
515/* Some nice accessors for the vmpressure. */
516struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
517{
518 if (!memcg)
519 memcg = root_mem_cgroup;
520 return &memcg->vmpressure;
521}
522
523struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
524{
525 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
526}
527
528struct vmpressure *css_to_vmpressure(struct cgroup_subsys_state *css)
529{
530 return &mem_cgroup_from_css(css)->vmpressure;
531}
532
7ffc0edc
MH
533static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
534{
535 return (memcg == root_mem_cgroup);
536}
537
e1aab161 538/* Writing them here to avoid exposing memcg's inner layout */
4bd2c1ee 539#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
e1aab161 540
e1aab161
GC
541void sock_update_memcg(struct sock *sk)
542{
376be5ff 543 if (mem_cgroup_sockets_enabled) {
e1aab161 544 struct mem_cgroup *memcg;
3f134619 545 struct cg_proto *cg_proto;
e1aab161
GC
546
547 BUG_ON(!sk->sk_prot->proto_cgroup);
548
f3f511e1
GC
549 /* Socket cloning can throw us here with sk_cgrp already
550 * filled. It won't however, necessarily happen from
551 * process context. So the test for root memcg given
552 * the current task's memcg won't help us in this case.
553 *
554 * Respecting the original socket's memcg is a better
555 * decision in this case.
556 */
557 if (sk->sk_cgrp) {
558 BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
5347e5ae 559 css_get(&sk->sk_cgrp->memcg->css);
f3f511e1
GC
560 return;
561 }
562
e1aab161
GC
563 rcu_read_lock();
564 memcg = mem_cgroup_from_task(current);
3f134619 565 cg_proto = sk->sk_prot->proto_cgroup(memcg);
5347e5ae
LZ
566 if (!mem_cgroup_is_root(memcg) &&
567 memcg_proto_active(cg_proto) && css_tryget(&memcg->css)) {
3f134619 568 sk->sk_cgrp = cg_proto;
e1aab161
GC
569 }
570 rcu_read_unlock();
571 }
572}
573EXPORT_SYMBOL(sock_update_memcg);
574
575void sock_release_memcg(struct sock *sk)
576{
376be5ff 577 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
e1aab161
GC
578 struct mem_cgroup *memcg;
579 WARN_ON(!sk->sk_cgrp->memcg);
580 memcg = sk->sk_cgrp->memcg;
5347e5ae 581 css_put(&sk->sk_cgrp->memcg->css);
e1aab161
GC
582 }
583}
d1a4c0b3
GC
584
585struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
586{
587 if (!memcg || mem_cgroup_is_root(memcg))
588 return NULL;
589
590 return &memcg->tcp_mem.cg_proto;
591}
592EXPORT_SYMBOL(tcp_proto_cgroup);
e1aab161 593
3f134619
GC
594static void disarm_sock_keys(struct mem_cgroup *memcg)
595{
596 if (!memcg_proto_activated(&memcg->tcp_mem.cg_proto))
597 return;
598 static_key_slow_dec(&memcg_socket_limit_enabled);
599}
600#else
601static void disarm_sock_keys(struct mem_cgroup *memcg)
602{
603}
604#endif
605
a8964b9b 606#ifdef CONFIG_MEMCG_KMEM
55007d84
GC
607/*
608 * This will be the memcg's index in each cache's ->memcg_params->memcg_caches.
609 * There are two main reasons for not using the css_id for this:
610 * 1) this works better in sparse environments, where we have a lot of memcgs,
611 * but only a few kmem-limited. Or also, if we have, for instance, 200
612 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
613 * 200 entry array for that.
614 *
615 * 2) In order not to violate the cgroup API, we would like to do all memory
616 * allocation in ->create(). At that point, we haven't yet allocated the
617 * css_id. Having a separate index prevents us from messing with the cgroup
618 * core for this
619 *
620 * The current size of the caches array is stored in
621 * memcg_limited_groups_array_size. It will double each time we have to
622 * increase it.
623 */
624static DEFINE_IDA(kmem_limited_groups);
749c5415
GC
625int memcg_limited_groups_array_size;
626
55007d84
GC
627/*
628 * MIN_SIZE is different than 1, because we would like to avoid going through
629 * the alloc/free process all the time. In a small machine, 4 kmem-limited
630 * cgroups is a reasonable guess. In the future, it could be a parameter or
631 * tunable, but that is strictly not necessary.
632 *
633 * MAX_SIZE should be as large as the number of css_ids. Ideally, we could get
634 * this constant directly from cgroup, but it is understandable that this is
635 * better kept as an internal representation in cgroup.c. In any case, the
636 * css_id space is not getting any smaller, and we don't have to necessarily
637 * increase ours as well if it increases.
638 */
639#define MEMCG_CACHES_MIN_SIZE 4
640#define MEMCG_CACHES_MAX_SIZE 65535
641
d7f25f8a
GC
642/*
643 * A lot of the calls to the cache allocation functions are expected to be
644 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
645 * conditional to this static branch, we'll have to allow modules that does
646 * kmem_cache_alloc and the such to see this symbol as well
647 */
a8964b9b 648struct static_key memcg_kmem_enabled_key;
d7f25f8a 649EXPORT_SYMBOL(memcg_kmem_enabled_key);
a8964b9b
GC
650
651static void disarm_kmem_keys(struct mem_cgroup *memcg)
652{
55007d84 653 if (memcg_kmem_is_active(memcg)) {
a8964b9b 654 static_key_slow_dec(&memcg_kmem_enabled_key);
55007d84
GC
655 ida_simple_remove(&kmem_limited_groups, memcg->kmemcg_id);
656 }
bea207c8
GC
657 /*
658 * This check can't live in kmem destruction function,
659 * since the charges will outlive the cgroup
660 */
661 WARN_ON(res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0);
a8964b9b
GC
662}
663#else
664static void disarm_kmem_keys(struct mem_cgroup *memcg)
665{
666}
667#endif /* CONFIG_MEMCG_KMEM */
668
669static void disarm_static_keys(struct mem_cgroup *memcg)
670{
671 disarm_sock_keys(memcg);
672 disarm_kmem_keys(memcg);
673}
674
c0ff4b85 675static void drain_all_stock_async(struct mem_cgroup *memcg);
8c7c6e34 676
f64c3f54 677static struct mem_cgroup_per_zone *
c0ff4b85 678mem_cgroup_zoneinfo(struct mem_cgroup *memcg, int nid, int zid)
f64c3f54 679{
45cf7ebd 680 VM_BUG_ON((unsigned)nid >= nr_node_ids);
54f72fe0 681 return &memcg->nodeinfo[nid]->zoneinfo[zid];
f64c3f54
BS
682}
683
c0ff4b85 684struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg)
d324236b 685{
c0ff4b85 686 return &memcg->css;
d324236b
WF
687}
688
f64c3f54 689static struct mem_cgroup_per_zone *
c0ff4b85 690page_cgroup_zoneinfo(struct mem_cgroup *memcg, struct page *page)
f64c3f54 691{
97a6c37b
JW
692 int nid = page_to_nid(page);
693 int zid = page_zonenum(page);
f64c3f54 694
c0ff4b85 695 return mem_cgroup_zoneinfo(memcg, nid, zid);
f64c3f54
BS
696}
697
698static struct mem_cgroup_tree_per_zone *
699soft_limit_tree_node_zone(int nid, int zid)
700{
701 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
702}
703
704static struct mem_cgroup_tree_per_zone *
705soft_limit_tree_from_page(struct page *page)
706{
707 int nid = page_to_nid(page);
708 int zid = page_zonenum(page);
709
710 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
711}
712
713static void
c0ff4b85 714__mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
f64c3f54 715 struct mem_cgroup_per_zone *mz,
ef8745c1
KH
716 struct mem_cgroup_tree_per_zone *mctz,
717 unsigned long long new_usage_in_excess)
f64c3f54
BS
718{
719 struct rb_node **p = &mctz->rb_root.rb_node;
720 struct rb_node *parent = NULL;
721 struct mem_cgroup_per_zone *mz_node;
722
723 if (mz->on_tree)
724 return;
725
ef8745c1
KH
726 mz->usage_in_excess = new_usage_in_excess;
727 if (!mz->usage_in_excess)
728 return;
f64c3f54
BS
729 while (*p) {
730 parent = *p;
731 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
732 tree_node);
733 if (mz->usage_in_excess < mz_node->usage_in_excess)
734 p = &(*p)->rb_left;
735 /*
736 * We can't avoid mem cgroups that are over their soft
737 * limit by the same amount
738 */
739 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
740 p = &(*p)->rb_right;
741 }
742 rb_link_node(&mz->tree_node, parent, p);
743 rb_insert_color(&mz->tree_node, &mctz->rb_root);
744 mz->on_tree = true;
4e416953
BS
745}
746
747static void
c0ff4b85 748__mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
4e416953
BS
749 struct mem_cgroup_per_zone *mz,
750 struct mem_cgroup_tree_per_zone *mctz)
751{
752 if (!mz->on_tree)
753 return;
754 rb_erase(&mz->tree_node, &mctz->rb_root);
755 mz->on_tree = false;
756}
757
f64c3f54 758static void
c0ff4b85 759mem_cgroup_remove_exceeded(struct mem_cgroup *memcg,
f64c3f54
BS
760 struct mem_cgroup_per_zone *mz,
761 struct mem_cgroup_tree_per_zone *mctz)
762{
763 spin_lock(&mctz->lock);
c0ff4b85 764 __mem_cgroup_remove_exceeded(memcg, mz, mctz);
f64c3f54
BS
765 spin_unlock(&mctz->lock);
766}
767
f64c3f54 768
c0ff4b85 769static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
f64c3f54 770{
ef8745c1 771 unsigned long long excess;
f64c3f54
BS
772 struct mem_cgroup_per_zone *mz;
773 struct mem_cgroup_tree_per_zone *mctz;
4e649152
KH
774 int nid = page_to_nid(page);
775 int zid = page_zonenum(page);
f64c3f54
BS
776 mctz = soft_limit_tree_from_page(page);
777
778 /*
4e649152
KH
779 * Necessary to update all ancestors when hierarchy is used.
780 * because their event counter is not touched.
f64c3f54 781 */
c0ff4b85
R
782 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
783 mz = mem_cgroup_zoneinfo(memcg, nid, zid);
784 excess = res_counter_soft_limit_excess(&memcg->res);
4e649152
KH
785 /*
786 * We have to update the tree if mz is on RB-tree or
787 * mem is over its softlimit.
788 */
ef8745c1 789 if (excess || mz->on_tree) {
4e649152
KH
790 spin_lock(&mctz->lock);
791 /* if on-tree, remove it */
792 if (mz->on_tree)
c0ff4b85 793 __mem_cgroup_remove_exceeded(memcg, mz, mctz);
4e649152 794 /*
ef8745c1
KH
795 * Insert again. mz->usage_in_excess will be updated.
796 * If excess is 0, no tree ops.
4e649152 797 */
c0ff4b85 798 __mem_cgroup_insert_exceeded(memcg, mz, mctz, excess);
4e649152
KH
799 spin_unlock(&mctz->lock);
800 }
f64c3f54
BS
801 }
802}
803
c0ff4b85 804static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
f64c3f54
BS
805{
806 int node, zone;
807 struct mem_cgroup_per_zone *mz;
808 struct mem_cgroup_tree_per_zone *mctz;
809
3ed28fa1 810 for_each_node(node) {
f64c3f54 811 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
c0ff4b85 812 mz = mem_cgroup_zoneinfo(memcg, node, zone);
f64c3f54 813 mctz = soft_limit_tree_node_zone(node, zone);
c0ff4b85 814 mem_cgroup_remove_exceeded(memcg, mz, mctz);
f64c3f54
BS
815 }
816 }
817}
818
4e416953
BS
819static struct mem_cgroup_per_zone *
820__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
821{
822 struct rb_node *rightmost = NULL;
26251eaf 823 struct mem_cgroup_per_zone *mz;
4e416953
BS
824
825retry:
26251eaf 826 mz = NULL;
4e416953
BS
827 rightmost = rb_last(&mctz->rb_root);
828 if (!rightmost)
829 goto done; /* Nothing to reclaim from */
830
831 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
832 /*
833 * Remove the node now but someone else can add it back,
834 * we will to add it back at the end of reclaim to its correct
835 * position in the tree.
836 */
d79154bb
HD
837 __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
838 if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
839 !css_tryget(&mz->memcg->css))
4e416953
BS
840 goto retry;
841done:
842 return mz;
843}
844
845static struct mem_cgroup_per_zone *
846mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
847{
848 struct mem_cgroup_per_zone *mz;
849
850 spin_lock(&mctz->lock);
851 mz = __mem_cgroup_largest_soft_limit_node(mctz);
852 spin_unlock(&mctz->lock);
853 return mz;
854}
855
711d3d2c
KH
856/*
857 * Implementation Note: reading percpu statistics for memcg.
858 *
859 * Both of vmstat[] and percpu_counter has threshold and do periodic
860 * synchronization to implement "quick" read. There are trade-off between
861 * reading cost and precision of value. Then, we may have a chance to implement
862 * a periodic synchronizion of counter in memcg's counter.
863 *
864 * But this _read() function is used for user interface now. The user accounts
865 * memory usage by memory cgroup and he _always_ requires exact value because
866 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
867 * have to visit all online cpus and make sum. So, for now, unnecessary
868 * synchronization is not implemented. (just implemented for cpu hotplug)
869 *
870 * If there are kernel internal actions which can make use of some not-exact
871 * value, and reading all cpu value can be performance bottleneck in some
872 * common workload, threashold and synchonization as vmstat[] should be
873 * implemented.
874 */
c0ff4b85 875static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
7a159cc9 876 enum mem_cgroup_stat_index idx)
c62b1a3b 877{
7a159cc9 878 long val = 0;
c62b1a3b 879 int cpu;
c62b1a3b 880
711d3d2c
KH
881 get_online_cpus();
882 for_each_online_cpu(cpu)
c0ff4b85 883 val += per_cpu(memcg->stat->count[idx], cpu);
711d3d2c 884#ifdef CONFIG_HOTPLUG_CPU
c0ff4b85
R
885 spin_lock(&memcg->pcp_counter_lock);
886 val += memcg->nocpu_base.count[idx];
887 spin_unlock(&memcg->pcp_counter_lock);
711d3d2c
KH
888#endif
889 put_online_cpus();
c62b1a3b
KH
890 return val;
891}
892
c0ff4b85 893static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
0c3e73e8
BS
894 bool charge)
895{
896 int val = (charge) ? 1 : -1;
bff6bb83 897 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
0c3e73e8
BS
898}
899
c0ff4b85 900static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
e9f8974f
JW
901 enum mem_cgroup_events_index idx)
902{
903 unsigned long val = 0;
904 int cpu;
905
906 for_each_online_cpu(cpu)
c0ff4b85 907 val += per_cpu(memcg->stat->events[idx], cpu);
e9f8974f 908#ifdef CONFIG_HOTPLUG_CPU
c0ff4b85
R
909 spin_lock(&memcg->pcp_counter_lock);
910 val += memcg->nocpu_base.events[idx];
911 spin_unlock(&memcg->pcp_counter_lock);
e9f8974f
JW
912#endif
913 return val;
914}
915
c0ff4b85 916static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
b070e65c 917 struct page *page,
b2402857 918 bool anon, int nr_pages)
d52aa412 919{
c62b1a3b
KH
920 preempt_disable();
921
b2402857
KH
922 /*
923 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
924 * counted as CACHE even if it's on ANON LRU.
925 */
926 if (anon)
927 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
c0ff4b85 928 nr_pages);
d52aa412 929 else
b2402857 930 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
c0ff4b85 931 nr_pages);
55e462b0 932
b070e65c
DR
933 if (PageTransHuge(page))
934 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
935 nr_pages);
936
e401f176
KH
937 /* pagein of a big page is an event. So, ignore page size */
938 if (nr_pages > 0)
c0ff4b85 939 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
3751d604 940 else {
c0ff4b85 941 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
3751d604
KH
942 nr_pages = -nr_pages; /* for event */
943 }
e401f176 944
13114716 945 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
2e72b634 946
c62b1a3b 947 preempt_enable();
6d12e2d8
KH
948}
949
bb2a0de9 950unsigned long
4d7dcca2 951mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
074291fe
KK
952{
953 struct mem_cgroup_per_zone *mz;
954
955 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
956 return mz->lru_size[lru];
957}
958
959static unsigned long
c0ff4b85 960mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
bb2a0de9 961 unsigned int lru_mask)
889976db
YH
962{
963 struct mem_cgroup_per_zone *mz;
f156ab93 964 enum lru_list lru;
bb2a0de9
KH
965 unsigned long ret = 0;
966
c0ff4b85 967 mz = mem_cgroup_zoneinfo(memcg, nid, zid);
bb2a0de9 968
f156ab93
HD
969 for_each_lru(lru) {
970 if (BIT(lru) & lru_mask)
971 ret += mz->lru_size[lru];
bb2a0de9
KH
972 }
973 return ret;
974}
975
976static unsigned long
c0ff4b85 977mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
bb2a0de9
KH
978 int nid, unsigned int lru_mask)
979{
889976db
YH
980 u64 total = 0;
981 int zid;
982
bb2a0de9 983 for (zid = 0; zid < MAX_NR_ZONES; zid++)
c0ff4b85
R
984 total += mem_cgroup_zone_nr_lru_pages(memcg,
985 nid, zid, lru_mask);
bb2a0de9 986
889976db
YH
987 return total;
988}
bb2a0de9 989
c0ff4b85 990static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
bb2a0de9 991 unsigned int lru_mask)
6d12e2d8 992{
889976db 993 int nid;
6d12e2d8
KH
994 u64 total = 0;
995
31aaea4a 996 for_each_node_state(nid, N_MEMORY)
c0ff4b85 997 total += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
6d12e2d8 998 return total;
d52aa412
KH
999}
1000
f53d7ce3
JW
1001static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
1002 enum mem_cgroup_events_target target)
7a159cc9
JW
1003{
1004 unsigned long val, next;
1005
13114716 1006 val = __this_cpu_read(memcg->stat->nr_page_events);
4799401f 1007 next = __this_cpu_read(memcg->stat->targets[target]);
7a159cc9 1008 /* from time_after() in jiffies.h */
f53d7ce3
JW
1009 if ((long)next - (long)val < 0) {
1010 switch (target) {
1011 case MEM_CGROUP_TARGET_THRESH:
1012 next = val + THRESHOLDS_EVENTS_TARGET;
1013 break;
1014 case MEM_CGROUP_TARGET_SOFTLIMIT:
1015 next = val + SOFTLIMIT_EVENTS_TARGET;
1016 break;
1017 case MEM_CGROUP_TARGET_NUMAINFO:
1018 next = val + NUMAINFO_EVENTS_TARGET;
1019 break;
1020 default:
1021 break;
1022 }
1023 __this_cpu_write(memcg->stat->targets[target], next);
1024 return true;
7a159cc9 1025 }
f53d7ce3 1026 return false;
d2265e6f
KH
1027}
1028
1029/*
1030 * Check events in order.
1031 *
1032 */
c0ff4b85 1033static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
d2265e6f 1034{
4799401f 1035 preempt_disable();
d2265e6f 1036 /* threshold event is triggered in finer grain than soft limit */
f53d7ce3
JW
1037 if (unlikely(mem_cgroup_event_ratelimit(memcg,
1038 MEM_CGROUP_TARGET_THRESH))) {
82b3f2a7
AM
1039 bool do_softlimit;
1040 bool do_numainfo __maybe_unused;
f53d7ce3
JW
1041
1042 do_softlimit = mem_cgroup_event_ratelimit(memcg,
1043 MEM_CGROUP_TARGET_SOFTLIMIT);
1044#if MAX_NUMNODES > 1
1045 do_numainfo = mem_cgroup_event_ratelimit(memcg,
1046 MEM_CGROUP_TARGET_NUMAINFO);
1047#endif
1048 preempt_enable();
1049
c0ff4b85 1050 mem_cgroup_threshold(memcg);
f53d7ce3 1051 if (unlikely(do_softlimit))
c0ff4b85 1052 mem_cgroup_update_tree(memcg, page);
453a9bf3 1053#if MAX_NUMNODES > 1
f53d7ce3 1054 if (unlikely(do_numainfo))
c0ff4b85 1055 atomic_inc(&memcg->numainfo_events);
453a9bf3 1056#endif
f53d7ce3
JW
1057 } else
1058 preempt_enable();
d2265e6f
KH
1059}
1060
d1a4c0b3 1061struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
8cdea7c0 1062{
b2145145
WL
1063 return mem_cgroup_from_css(
1064 cgroup_subsys_state(cont, mem_cgroup_subsys_id));
8cdea7c0
BS
1065}
1066
cf475ad2 1067struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
78fb7466 1068{
31a78f23
BS
1069 /*
1070 * mm_update_next_owner() may clear mm->owner to NULL
1071 * if it races with swapoff, page migration, etc.
1072 * So this can be called with p == NULL.
1073 */
1074 if (unlikely(!p))
1075 return NULL;
1076
b2145145 1077 return mem_cgroup_from_css(task_subsys_state(p, mem_cgroup_subsys_id));
78fb7466
PE
1078}
1079
a433658c 1080struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
54595fe2 1081{
c0ff4b85 1082 struct mem_cgroup *memcg = NULL;
0b7f569e
KH
1083
1084 if (!mm)
1085 return NULL;
54595fe2
KH
1086 /*
1087 * Because we have no locks, mm->owner's may be being moved to other
1088 * cgroup. We use css_tryget() here even if this looks
1089 * pessimistic (rather than adding locks here).
1090 */
1091 rcu_read_lock();
1092 do {
c0ff4b85
R
1093 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1094 if (unlikely(!memcg))
54595fe2 1095 break;
c0ff4b85 1096 } while (!css_tryget(&memcg->css));
54595fe2 1097 rcu_read_unlock();
c0ff4b85 1098 return memcg;
54595fe2
KH
1099}
1100
16248d8f
MH
1101/*
1102 * Returns a next (in a pre-order walk) alive memcg (with elevated css
1103 * ref. count) or NULL if the whole root's subtree has been visited.
1104 *
1105 * helper function to be used by mem_cgroup_iter
1106 */
1107static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
1108 struct mem_cgroup *last_visited)
1109{
1110 struct cgroup *prev_cgroup, *next_cgroup;
1111
1112 /*
1113 * Root is not visited by cgroup iterators so it needs an
1114 * explicit visit.
1115 */
1116 if (!last_visited)
1117 return root;
1118
1119 prev_cgroup = (last_visited == root) ? NULL
1120 : last_visited->css.cgroup;
1121skip_node:
1122 next_cgroup = cgroup_next_descendant_pre(
1123 prev_cgroup, root->css.cgroup);
1124
1125 /*
1126 * Even if we found a group we have to make sure it is
1127 * alive. css && !memcg means that the groups should be
1128 * skipped and we should continue the tree walk.
1129 * last_visited css is safe to use because it is
1130 * protected by css_get and the tree walk is rcu safe.
1131 */
1132 if (next_cgroup) {
1133 struct mem_cgroup *mem = mem_cgroup_from_cont(
1134 next_cgroup);
1135 if (css_tryget(&mem->css))
1136 return mem;
1137 else {
1138 prev_cgroup = next_cgroup;
1139 goto skip_node;
1140 }
1141 }
1142
1143 return NULL;
1144}
1145
519ebea3
JW
1146static void mem_cgroup_iter_invalidate(struct mem_cgroup *root)
1147{
1148 /*
1149 * When a group in the hierarchy below root is destroyed, the
1150 * hierarchy iterator can no longer be trusted since it might
1151 * have pointed to the destroyed group. Invalidate it.
1152 */
1153 atomic_inc(&root->dead_count);
1154}
1155
1156static struct mem_cgroup *
1157mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter,
1158 struct mem_cgroup *root,
1159 int *sequence)
1160{
1161 struct mem_cgroup *position = NULL;
1162 /*
1163 * A cgroup destruction happens in two stages: offlining and
1164 * release. They are separated by a RCU grace period.
1165 *
1166 * If the iterator is valid, we may still race with an
1167 * offlining. The RCU lock ensures the object won't be
1168 * released, tryget will fail if we lost the race.
1169 */
1170 *sequence = atomic_read(&root->dead_count);
1171 if (iter->last_dead_count == *sequence) {
1172 smp_rmb();
1173 position = iter->last_visited;
1174 if (position && !css_tryget(&position->css))
1175 position = NULL;
1176 }
1177 return position;
1178}
1179
1180static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter,
1181 struct mem_cgroup *last_visited,
1182 struct mem_cgroup *new_position,
1183 int sequence)
1184{
1185 if (last_visited)
1186 css_put(&last_visited->css);
1187 /*
1188 * We store the sequence count from the time @last_visited was
1189 * loaded successfully instead of rereading it here so that we
1190 * don't lose destruction events in between. We could have
1191 * raced with the destruction of @new_position after all.
1192 */
1193 iter->last_visited = new_position;
1194 smp_wmb();
1195 iter->last_dead_count = sequence;
1196}
1197
5660048c
JW
1198/**
1199 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1200 * @root: hierarchy root
1201 * @prev: previously returned memcg, NULL on first invocation
1202 * @reclaim: cookie for shared reclaim walks, NULL for full walks
1203 *
1204 * Returns references to children of the hierarchy below @root, or
1205 * @root itself, or %NULL after a full round-trip.
1206 *
1207 * Caller must pass the return value in @prev on subsequent
1208 * invocations for reference counting, or use mem_cgroup_iter_break()
1209 * to cancel a hierarchy walk before the round-trip is complete.
1210 *
1211 * Reclaimers can specify a zone and a priority level in @reclaim to
1212 * divide up the memcgs in the hierarchy among all concurrent
1213 * reclaimers operating on the same zone and priority.
1214 */
1215struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1216 struct mem_cgroup *prev,
1217 struct mem_cgroup_reclaim_cookie *reclaim)
14067bb3 1218{
9f3a0d09 1219 struct mem_cgroup *memcg = NULL;
542f85f9 1220 struct mem_cgroup *last_visited = NULL;
711d3d2c 1221
5660048c
JW
1222 if (mem_cgroup_disabled())
1223 return NULL;
1224
9f3a0d09
JW
1225 if (!root)
1226 root = root_mem_cgroup;
7d74b06f 1227
9f3a0d09 1228 if (prev && !reclaim)
542f85f9 1229 last_visited = prev;
14067bb3 1230
9f3a0d09
JW
1231 if (!root->use_hierarchy && root != root_mem_cgroup) {
1232 if (prev)
c40046f3 1233 goto out_css_put;
9f3a0d09
JW
1234 return root;
1235 }
14067bb3 1236
542f85f9 1237 rcu_read_lock();
9f3a0d09 1238 while (!memcg) {
527a5ec9 1239 struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
519ebea3 1240 int uninitialized_var(seq);
711d3d2c 1241
527a5ec9
JW
1242 if (reclaim) {
1243 int nid = zone_to_nid(reclaim->zone);
1244 int zid = zone_idx(reclaim->zone);
1245 struct mem_cgroup_per_zone *mz;
1246
1247 mz = mem_cgroup_zoneinfo(root, nid, zid);
1248 iter = &mz->reclaim_iter[reclaim->priority];
542f85f9 1249 if (prev && reclaim->generation != iter->generation) {
5f578161 1250 iter->last_visited = NULL;
542f85f9
MH
1251 goto out_unlock;
1252 }
5f578161 1253
519ebea3 1254 last_visited = mem_cgroup_iter_load(iter, root, &seq);
527a5ec9 1255 }
7d74b06f 1256
16248d8f 1257 memcg = __mem_cgroup_iter_next(root, last_visited);
14067bb3 1258
527a5ec9 1259 if (reclaim) {
519ebea3 1260 mem_cgroup_iter_update(iter, last_visited, memcg, seq);
542f85f9 1261
19f39402 1262 if (!memcg)
527a5ec9
JW
1263 iter->generation++;
1264 else if (!prev && memcg)
1265 reclaim->generation = iter->generation;
1266 }
9f3a0d09 1267
19f39402 1268 if (prev && !memcg)
542f85f9 1269 goto out_unlock;
9f3a0d09 1270 }
542f85f9
MH
1271out_unlock:
1272 rcu_read_unlock();
c40046f3
MH
1273out_css_put:
1274 if (prev && prev != root)
1275 css_put(&prev->css);
1276
9f3a0d09 1277 return memcg;
14067bb3 1278}
7d74b06f 1279
5660048c
JW
1280/**
1281 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1282 * @root: hierarchy root
1283 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1284 */
1285void mem_cgroup_iter_break(struct mem_cgroup *root,
1286 struct mem_cgroup *prev)
9f3a0d09
JW
1287{
1288 if (!root)
1289 root = root_mem_cgroup;
1290 if (prev && prev != root)
1291 css_put(&prev->css);
1292}
7d74b06f 1293
9f3a0d09
JW
1294/*
1295 * Iteration constructs for visiting all cgroups (under a tree). If
1296 * loops are exited prematurely (break), mem_cgroup_iter_break() must
1297 * be used for reference counting.
1298 */
1299#define for_each_mem_cgroup_tree(iter, root) \
527a5ec9 1300 for (iter = mem_cgroup_iter(root, NULL, NULL); \
9f3a0d09 1301 iter != NULL; \
527a5ec9 1302 iter = mem_cgroup_iter(root, iter, NULL))
711d3d2c 1303
9f3a0d09 1304#define for_each_mem_cgroup(iter) \
527a5ec9 1305 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
9f3a0d09 1306 iter != NULL; \
527a5ec9 1307 iter = mem_cgroup_iter(NULL, iter, NULL))
14067bb3 1308
68ae564b 1309void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
456f998e 1310{
c0ff4b85 1311 struct mem_cgroup *memcg;
456f998e 1312
456f998e 1313 rcu_read_lock();
c0ff4b85
R
1314 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1315 if (unlikely(!memcg))
456f998e
YH
1316 goto out;
1317
1318 switch (idx) {
456f998e 1319 case PGFAULT:
0e574a93
JW
1320 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
1321 break;
1322 case PGMAJFAULT:
1323 this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
456f998e
YH
1324 break;
1325 default:
1326 BUG();
1327 }
1328out:
1329 rcu_read_unlock();
1330}
68ae564b 1331EXPORT_SYMBOL(__mem_cgroup_count_vm_event);
456f998e 1332
925b7673
JW
1333/**
1334 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
1335 * @zone: zone of the wanted lruvec
fa9add64 1336 * @memcg: memcg of the wanted lruvec
925b7673
JW
1337 *
1338 * Returns the lru list vector holding pages for the given @zone and
1339 * @mem. This can be the global zone lruvec, if the memory controller
1340 * is disabled.
1341 */
1342struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
1343 struct mem_cgroup *memcg)
1344{
1345 struct mem_cgroup_per_zone *mz;
bea8c150 1346 struct lruvec *lruvec;
925b7673 1347
bea8c150
HD
1348 if (mem_cgroup_disabled()) {
1349 lruvec = &zone->lruvec;
1350 goto out;
1351 }
925b7673
JW
1352
1353 mz = mem_cgroup_zoneinfo(memcg, zone_to_nid(zone), zone_idx(zone));
bea8c150
HD
1354 lruvec = &mz->lruvec;
1355out:
1356 /*
1357 * Since a node can be onlined after the mem_cgroup was created,
1358 * we have to be prepared to initialize lruvec->zone here;
1359 * and if offlined then reonlined, we need to reinitialize it.
1360 */
1361 if (unlikely(lruvec->zone != zone))
1362 lruvec->zone = zone;
1363 return lruvec;
925b7673
JW
1364}
1365
08e552c6
KH
1366/*
1367 * Following LRU functions are allowed to be used without PCG_LOCK.
1368 * Operations are called by routine of global LRU independently from memcg.
1369 * What we have to take care of here is validness of pc->mem_cgroup.
1370 *
1371 * Changes to pc->mem_cgroup happens when
1372 * 1. charge
1373 * 2. moving account
1374 * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
1375 * It is added to LRU before charge.
1376 * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
1377 * When moving account, the page is not on LRU. It's isolated.
1378 */
4f98a2fe 1379
925b7673 1380/**
fa9add64 1381 * mem_cgroup_page_lruvec - return lruvec for adding an lru page
925b7673 1382 * @page: the page
fa9add64 1383 * @zone: zone of the page
925b7673 1384 */
fa9add64 1385struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
08e552c6 1386{
08e552c6 1387 struct mem_cgroup_per_zone *mz;
925b7673
JW
1388 struct mem_cgroup *memcg;
1389 struct page_cgroup *pc;
bea8c150 1390 struct lruvec *lruvec;
6d12e2d8 1391
bea8c150
HD
1392 if (mem_cgroup_disabled()) {
1393 lruvec = &zone->lruvec;
1394 goto out;
1395 }
925b7673 1396
08e552c6 1397 pc = lookup_page_cgroup(page);
38c5d72f 1398 memcg = pc->mem_cgroup;
7512102c
HD
1399
1400 /*
fa9add64 1401 * Surreptitiously switch any uncharged offlist page to root:
7512102c
HD
1402 * an uncharged page off lru does nothing to secure
1403 * its former mem_cgroup from sudden removal.
1404 *
1405 * Our caller holds lru_lock, and PageCgroupUsed is updated
1406 * under page_cgroup lock: between them, they make all uses
1407 * of pc->mem_cgroup safe.
1408 */
fa9add64 1409 if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
7512102c
HD
1410 pc->mem_cgroup = memcg = root_mem_cgroup;
1411
925b7673 1412 mz = page_cgroup_zoneinfo(memcg, page);
bea8c150
HD
1413 lruvec = &mz->lruvec;
1414out:
1415 /*
1416 * Since a node can be onlined after the mem_cgroup was created,
1417 * we have to be prepared to initialize lruvec->zone here;
1418 * and if offlined then reonlined, we need to reinitialize it.
1419 */
1420 if (unlikely(lruvec->zone != zone))
1421 lruvec->zone = zone;
1422 return lruvec;
08e552c6 1423}
b69408e8 1424
925b7673 1425/**
fa9add64
HD
1426 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1427 * @lruvec: mem_cgroup per zone lru vector
1428 * @lru: index of lru list the page is sitting on
1429 * @nr_pages: positive when adding or negative when removing
925b7673 1430 *
fa9add64
HD
1431 * This function must be called when a page is added to or removed from an
1432 * lru list.
3f58a829 1433 */
fa9add64
HD
1434void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1435 int nr_pages)
3f58a829
MK
1436{
1437 struct mem_cgroup_per_zone *mz;
fa9add64 1438 unsigned long *lru_size;
3f58a829
MK
1439
1440 if (mem_cgroup_disabled())
1441 return;
1442
fa9add64
HD
1443 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1444 lru_size = mz->lru_size + lru;
1445 *lru_size += nr_pages;
1446 VM_BUG_ON((long)(*lru_size) < 0);
08e552c6 1447}
544122e5 1448
3e92041d 1449/*
c0ff4b85 1450 * Checks whether given mem is same or in the root_mem_cgroup's
3e92041d
MH
1451 * hierarchy subtree
1452 */
c3ac9a8a
JW
1453bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1454 struct mem_cgroup *memcg)
3e92041d 1455{
91c63734
JW
1456 if (root_memcg == memcg)
1457 return true;
3a981f48 1458 if (!root_memcg->use_hierarchy || !memcg)
91c63734 1459 return false;
c3ac9a8a
JW
1460 return css_is_ancestor(&memcg->css, &root_memcg->css);
1461}
1462
1463static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1464 struct mem_cgroup *memcg)
1465{
1466 bool ret;
1467
91c63734 1468 rcu_read_lock();
c3ac9a8a 1469 ret = __mem_cgroup_same_or_subtree(root_memcg, memcg);
91c63734
JW
1470 rcu_read_unlock();
1471 return ret;
3e92041d
MH
1472}
1473
ffbdccf5
DR
1474bool task_in_mem_cgroup(struct task_struct *task,
1475 const struct mem_cgroup *memcg)
4c4a2214 1476{
0b7f569e 1477 struct mem_cgroup *curr = NULL;
158e0a2d 1478 struct task_struct *p;
ffbdccf5 1479 bool ret;
4c4a2214 1480
158e0a2d 1481 p = find_lock_task_mm(task);
de077d22
DR
1482 if (p) {
1483 curr = try_get_mem_cgroup_from_mm(p->mm);
1484 task_unlock(p);
1485 } else {
1486 /*
1487 * All threads may have already detached their mm's, but the oom
1488 * killer still needs to detect if they have already been oom
1489 * killed to prevent needlessly killing additional tasks.
1490 */
ffbdccf5 1491 rcu_read_lock();
de077d22
DR
1492 curr = mem_cgroup_from_task(task);
1493 if (curr)
1494 css_get(&curr->css);
ffbdccf5 1495 rcu_read_unlock();
de077d22 1496 }
0b7f569e 1497 if (!curr)
ffbdccf5 1498 return false;
d31f56db 1499 /*
c0ff4b85 1500 * We should check use_hierarchy of "memcg" not "curr". Because checking
d31f56db 1501 * use_hierarchy of "curr" here make this function true if hierarchy is
c0ff4b85
R
1502 * enabled in "curr" and "curr" is a child of "memcg" in *cgroup*
1503 * hierarchy(even if use_hierarchy is disabled in "memcg").
d31f56db 1504 */
c0ff4b85 1505 ret = mem_cgroup_same_or_subtree(memcg, curr);
0b7f569e 1506 css_put(&curr->css);
4c4a2214
DR
1507 return ret;
1508}
1509
c56d5c7d 1510int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
14797e23 1511{
9b272977 1512 unsigned long inactive_ratio;
14797e23 1513 unsigned long inactive;
9b272977 1514 unsigned long active;
c772be93 1515 unsigned long gb;
14797e23 1516
4d7dcca2
HD
1517 inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
1518 active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
14797e23 1519
c772be93
KM
1520 gb = (inactive + active) >> (30 - PAGE_SHIFT);
1521 if (gb)
1522 inactive_ratio = int_sqrt(10 * gb);
1523 else
1524 inactive_ratio = 1;
1525
9b272977 1526 return inactive * inactive_ratio < active;
14797e23
KM
1527}
1528
6d61ef40
BS
1529#define mem_cgroup_from_res_counter(counter, member) \
1530 container_of(counter, struct mem_cgroup, member)
1531
19942822 1532/**
9d11ea9f 1533 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
dad7557e 1534 * @memcg: the memory cgroup
19942822 1535 *
9d11ea9f 1536 * Returns the maximum amount of memory @mem can be charged with, in
7ec99d62 1537 * pages.
19942822 1538 */
c0ff4b85 1539static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
19942822 1540{
9d11ea9f
JW
1541 unsigned long long margin;
1542
c0ff4b85 1543 margin = res_counter_margin(&memcg->res);
9d11ea9f 1544 if (do_swap_account)
c0ff4b85 1545 margin = min(margin, res_counter_margin(&memcg->memsw));
7ec99d62 1546 return margin >> PAGE_SHIFT;
19942822
JW
1547}
1548
1f4c025b 1549int mem_cgroup_swappiness(struct mem_cgroup *memcg)
a7885eb8
KM
1550{
1551 struct cgroup *cgrp = memcg->css.cgroup;
a7885eb8
KM
1552
1553 /* root ? */
1554 if (cgrp->parent == NULL)
1555 return vm_swappiness;
1556
bf1ff263 1557 return memcg->swappiness;
a7885eb8
KM
1558}
1559
619d094b
KH
1560/*
1561 * memcg->moving_account is used for checking possibility that some thread is
1562 * calling move_account(). When a thread on CPU-A starts moving pages under
1563 * a memcg, other threads should check memcg->moving_account under
1564 * rcu_read_lock(), like this:
1565 *
1566 * CPU-A CPU-B
1567 * rcu_read_lock()
1568 * memcg->moving_account+1 if (memcg->mocing_account)
1569 * take heavy locks.
1570 * synchronize_rcu() update something.
1571 * rcu_read_unlock()
1572 * start move here.
1573 */
4331f7d3
KH
1574
1575/* for quick checking without looking up memcg */
1576atomic_t memcg_moving __read_mostly;
1577
c0ff4b85 1578static void mem_cgroup_start_move(struct mem_cgroup *memcg)
32047e2a 1579{
4331f7d3 1580 atomic_inc(&memcg_moving);
619d094b 1581 atomic_inc(&memcg->moving_account);
32047e2a
KH
1582 synchronize_rcu();
1583}
1584
c0ff4b85 1585static void mem_cgroup_end_move(struct mem_cgroup *memcg)
32047e2a 1586{
619d094b
KH
1587 /*
1588 * Now, mem_cgroup_clear_mc() may call this function with NULL.
1589 * We check NULL in callee rather than caller.
1590 */
4331f7d3
KH
1591 if (memcg) {
1592 atomic_dec(&memcg_moving);
619d094b 1593 atomic_dec(&memcg->moving_account);
4331f7d3 1594 }
32047e2a 1595}
619d094b 1596
32047e2a
KH
1597/*
1598 * 2 routines for checking "mem" is under move_account() or not.
1599 *
13fd1dd9
AM
1600 * mem_cgroup_stolen() - checking whether a cgroup is mc.from or not. This
1601 * is used for avoiding races in accounting. If true,
32047e2a
KH
1602 * pc->mem_cgroup may be overwritten.
1603 *
1604 * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
1605 * under hierarchy of moving cgroups. This is for
1606 * waiting at hith-memory prressure caused by "move".
1607 */
1608
13fd1dd9 1609static bool mem_cgroup_stolen(struct mem_cgroup *memcg)
32047e2a
KH
1610{
1611 VM_BUG_ON(!rcu_read_lock_held());
619d094b 1612 return atomic_read(&memcg->moving_account) > 0;
32047e2a 1613}
4b534334 1614
c0ff4b85 1615static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
4b534334 1616{
2bd9bb20
KH
1617 struct mem_cgroup *from;
1618 struct mem_cgroup *to;
4b534334 1619 bool ret = false;
2bd9bb20
KH
1620 /*
1621 * Unlike task_move routines, we access mc.to, mc.from not under
1622 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1623 */
1624 spin_lock(&mc.lock);
1625 from = mc.from;
1626 to = mc.to;
1627 if (!from)
1628 goto unlock;
3e92041d 1629
c0ff4b85
R
1630 ret = mem_cgroup_same_or_subtree(memcg, from)
1631 || mem_cgroup_same_or_subtree(memcg, to);
2bd9bb20
KH
1632unlock:
1633 spin_unlock(&mc.lock);
4b534334
KH
1634 return ret;
1635}
1636
c0ff4b85 1637static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
4b534334
KH
1638{
1639 if (mc.moving_task && current != mc.moving_task) {
c0ff4b85 1640 if (mem_cgroup_under_move(memcg)) {
4b534334
KH
1641 DEFINE_WAIT(wait);
1642 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1643 /* moving charge context might have finished. */
1644 if (mc.moving_task)
1645 schedule();
1646 finish_wait(&mc.waitq, &wait);
1647 return true;
1648 }
1649 }
1650 return false;
1651}
1652
312734c0
KH
1653/*
1654 * Take this lock when
1655 * - a code tries to modify page's memcg while it's USED.
1656 * - a code tries to modify page state accounting in a memcg.
13fd1dd9 1657 * see mem_cgroup_stolen(), too.
312734c0
KH
1658 */
1659static void move_lock_mem_cgroup(struct mem_cgroup *memcg,
1660 unsigned long *flags)
1661{
1662 spin_lock_irqsave(&memcg->move_lock, *flags);
1663}
1664
1665static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
1666 unsigned long *flags)
1667{
1668 spin_unlock_irqrestore(&memcg->move_lock, *flags);
1669}
1670
58cf188e 1671#define K(x) ((x) << (PAGE_SHIFT-10))
e222432b 1672/**
58cf188e 1673 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
e222432b
BS
1674 * @memcg: The memory cgroup that went over limit
1675 * @p: Task that is going to be killed
1676 *
1677 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1678 * enabled
1679 */
1680void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1681{
1682 struct cgroup *task_cgrp;
1683 struct cgroup *mem_cgrp;
1684 /*
1685 * Need a buffer in BSS, can't rely on allocations. The code relies
1686 * on the assumption that OOM is serialized for memory controller.
1687 * If this assumption is broken, revisit this code.
1688 */
1689 static char memcg_name[PATH_MAX];
1690 int ret;
58cf188e
SZ
1691 struct mem_cgroup *iter;
1692 unsigned int i;
e222432b 1693
58cf188e 1694 if (!p)
e222432b
BS
1695 return;
1696
e222432b
BS
1697 rcu_read_lock();
1698
1699 mem_cgrp = memcg->css.cgroup;
1700 task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
1701
1702 ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
1703 if (ret < 0) {
1704 /*
1705 * Unfortunately, we are unable to convert to a useful name
1706 * But we'll still print out the usage information
1707 */
1708 rcu_read_unlock();
1709 goto done;
1710 }
1711 rcu_read_unlock();
1712
d045197f 1713 pr_info("Task in %s killed", memcg_name);
e222432b
BS
1714
1715 rcu_read_lock();
1716 ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
1717 if (ret < 0) {
1718 rcu_read_unlock();
1719 goto done;
1720 }
1721 rcu_read_unlock();
1722
1723 /*
1724 * Continues from above, so we don't need an KERN_ level
1725 */
d045197f 1726 pr_cont(" as a result of limit of %s\n", memcg_name);
e222432b
BS
1727done:
1728
d045197f 1729 pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n",
e222432b
BS
1730 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1731 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1732 res_counter_read_u64(&memcg->res, RES_FAILCNT));
d045197f 1733 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %llu\n",
e222432b
BS
1734 res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1735 res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1736 res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
d045197f 1737 pr_info("kmem: usage %llukB, limit %llukB, failcnt %llu\n",
510fc4e1
GC
1738 res_counter_read_u64(&memcg->kmem, RES_USAGE) >> 10,
1739 res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10,
1740 res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
58cf188e
SZ
1741
1742 for_each_mem_cgroup_tree(iter, memcg) {
1743 pr_info("Memory cgroup stats");
1744
1745 rcu_read_lock();
1746 ret = cgroup_path(iter->css.cgroup, memcg_name, PATH_MAX);
1747 if (!ret)
1748 pr_cont(" for %s", memcg_name);
1749 rcu_read_unlock();
1750 pr_cont(":");
1751
1752 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1753 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1754 continue;
1755 pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
1756 K(mem_cgroup_read_stat(iter, i)));
1757 }
1758
1759 for (i = 0; i < NR_LRU_LISTS; i++)
1760 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1761 K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1762
1763 pr_cont("\n");
1764 }
e222432b
BS
1765}
1766
81d39c20
KH
1767/*
1768 * This function returns the number of memcg under hierarchy tree. Returns
1769 * 1(self count) if no children.
1770 */
c0ff4b85 1771static int mem_cgroup_count_children(struct mem_cgroup *memcg)
81d39c20
KH
1772{
1773 int num = 0;
7d74b06f
KH
1774 struct mem_cgroup *iter;
1775
c0ff4b85 1776 for_each_mem_cgroup_tree(iter, memcg)
7d74b06f 1777 num++;
81d39c20
KH
1778 return num;
1779}
1780
a63d83f4
DR
1781/*
1782 * Return the memory (and swap, if configured) limit for a memcg.
1783 */
9cbb78bb 1784static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
a63d83f4
DR
1785{
1786 u64 limit;
a63d83f4 1787
f3e8eb70 1788 limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
f3e8eb70 1789
a63d83f4 1790 /*
9a5a8f19 1791 * Do not consider swap space if we cannot swap due to swappiness
a63d83f4 1792 */
9a5a8f19
MH
1793 if (mem_cgroup_swappiness(memcg)) {
1794 u64 memsw;
1795
1796 limit += total_swap_pages << PAGE_SHIFT;
1797 memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1798
1799 /*
1800 * If memsw is finite and limits the amount of swap space
1801 * available to this memcg, return that limit.
1802 */
1803 limit = min(limit, memsw);
1804 }
1805
1806 return limit;
a63d83f4
DR
1807}
1808
19965460
DR
1809static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1810 int order)
9cbb78bb
DR
1811{
1812 struct mem_cgroup *iter;
1813 unsigned long chosen_points = 0;
1814 unsigned long totalpages;
1815 unsigned int points = 0;
1816 struct task_struct *chosen = NULL;
1817
876aafbf 1818 /*
465adcf1
DR
1819 * If current has a pending SIGKILL or is exiting, then automatically
1820 * select it. The goal is to allow it to allocate so that it may
1821 * quickly exit and free its memory.
876aafbf 1822 */
465adcf1 1823 if (fatal_signal_pending(current) || current->flags & PF_EXITING) {
876aafbf
DR
1824 set_thread_flag(TIF_MEMDIE);
1825 return;
1826 }
1827
1828 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
9cbb78bb
DR
1829 totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
1830 for_each_mem_cgroup_tree(iter, memcg) {
1831 struct cgroup *cgroup = iter->css.cgroup;
1832 struct cgroup_iter it;
1833 struct task_struct *task;
1834
1835 cgroup_iter_start(cgroup, &it);
1836 while ((task = cgroup_iter_next(cgroup, &it))) {
1837 switch (oom_scan_process_thread(task, totalpages, NULL,
1838 false)) {
1839 case OOM_SCAN_SELECT:
1840 if (chosen)
1841 put_task_struct(chosen);
1842 chosen = task;
1843 chosen_points = ULONG_MAX;
1844 get_task_struct(chosen);
1845 /* fall through */
1846 case OOM_SCAN_CONTINUE:
1847 continue;
1848 case OOM_SCAN_ABORT:
1849 cgroup_iter_end(cgroup, &it);
1850 mem_cgroup_iter_break(memcg, iter);
1851 if (chosen)
1852 put_task_struct(chosen);
1853 return;
1854 case OOM_SCAN_OK:
1855 break;
1856 };
1857 points = oom_badness(task, memcg, NULL, totalpages);
1858 if (points > chosen_points) {
1859 if (chosen)
1860 put_task_struct(chosen);
1861 chosen = task;
1862 chosen_points = points;
1863 get_task_struct(chosen);
1864 }
1865 }
1866 cgroup_iter_end(cgroup, &it);
1867 }
1868
1869 if (!chosen)
1870 return;
1871 points = chosen_points * 1000 / totalpages;
9cbb78bb
DR
1872 oom_kill_process(chosen, gfp_mask, order, points, totalpages, memcg,
1873 NULL, "Memory cgroup out of memory");
9cbb78bb
DR
1874}
1875
5660048c
JW
1876static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
1877 gfp_t gfp_mask,
1878 unsigned long flags)
1879{
1880 unsigned long total = 0;
1881 bool noswap = false;
1882 int loop;
1883
1884 if (flags & MEM_CGROUP_RECLAIM_NOSWAP)
1885 noswap = true;
1886 if (!(flags & MEM_CGROUP_RECLAIM_SHRINK) && memcg->memsw_is_minimum)
1887 noswap = true;
1888
1889 for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) {
1890 if (loop)
1891 drain_all_stock_async(memcg);
1892 total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap);
1893 /*
1894 * Allow limit shrinkers, which are triggered directly
1895 * by userspace, to catch signals and stop reclaim
1896 * after minimal progress, regardless of the margin.
1897 */
1898 if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK))
1899 break;
1900 if (mem_cgroup_margin(memcg))
1901 break;
1902 /*
1903 * If nothing was reclaimed after two attempts, there
1904 * may be no reclaimable pages in this hierarchy.
1905 */
1906 if (loop && !total)
1907 break;
1908 }
1909 return total;
1910}
1911
4d0c066d
KH
1912/**
1913 * test_mem_cgroup_node_reclaimable
dad7557e 1914 * @memcg: the target memcg
4d0c066d
KH
1915 * @nid: the node ID to be checked.
1916 * @noswap : specify true here if the user wants flle only information.
1917 *
1918 * This function returns whether the specified memcg contains any
1919 * reclaimable pages on a node. Returns true if there are any reclaimable
1920 * pages in the node.
1921 */
c0ff4b85 1922static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
4d0c066d
KH
1923 int nid, bool noswap)
1924{
c0ff4b85 1925 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
4d0c066d
KH
1926 return true;
1927 if (noswap || !total_swap_pages)
1928 return false;
c0ff4b85 1929 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
4d0c066d
KH
1930 return true;
1931 return false;
1932
1933}
889976db
YH
1934#if MAX_NUMNODES > 1
1935
1936/*
1937 * Always updating the nodemask is not very good - even if we have an empty
1938 * list or the wrong list here, we can start from some node and traverse all
1939 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1940 *
1941 */
c0ff4b85 1942static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
889976db
YH
1943{
1944 int nid;
453a9bf3
KH
1945 /*
1946 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1947 * pagein/pageout changes since the last update.
1948 */
c0ff4b85 1949 if (!atomic_read(&memcg->numainfo_events))
453a9bf3 1950 return;
c0ff4b85 1951 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
889976db
YH
1952 return;
1953
889976db 1954 /* make a nodemask where this memcg uses memory from */
31aaea4a 1955 memcg->scan_nodes = node_states[N_MEMORY];
889976db 1956
31aaea4a 1957 for_each_node_mask(nid, node_states[N_MEMORY]) {
889976db 1958
c0ff4b85
R
1959 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1960 node_clear(nid, memcg->scan_nodes);
889976db 1961 }
453a9bf3 1962
c0ff4b85
R
1963 atomic_set(&memcg->numainfo_events, 0);
1964 atomic_set(&memcg->numainfo_updating, 0);
889976db
YH
1965}
1966
1967/*
1968 * Selecting a node where we start reclaim from. Because what we need is just
1969 * reducing usage counter, start from anywhere is O,K. Considering
1970 * memory reclaim from current node, there are pros. and cons.
1971 *
1972 * Freeing memory from current node means freeing memory from a node which
1973 * we'll use or we've used. So, it may make LRU bad. And if several threads
1974 * hit limits, it will see a contention on a node. But freeing from remote
1975 * node means more costs for memory reclaim because of memory latency.
1976 *
1977 * Now, we use round-robin. Better algorithm is welcomed.
1978 */
c0ff4b85 1979int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
889976db
YH
1980{
1981 int node;
1982
c0ff4b85
R
1983 mem_cgroup_may_update_nodemask(memcg);
1984 node = memcg->last_scanned_node;
889976db 1985
c0ff4b85 1986 node = next_node(node, memcg->scan_nodes);
889976db 1987 if (node == MAX_NUMNODES)
c0ff4b85 1988 node = first_node(memcg->scan_nodes);
889976db
YH
1989 /*
1990 * We call this when we hit limit, not when pages are added to LRU.
1991 * No LRU may hold pages because all pages are UNEVICTABLE or
1992 * memcg is too small and all pages are not on LRU. In that case,
1993 * we use curret node.
1994 */
1995 if (unlikely(node == MAX_NUMNODES))
1996 node = numa_node_id();
1997
c0ff4b85 1998 memcg->last_scanned_node = node;
889976db
YH
1999 return node;
2000}
2001
4d0c066d
KH
2002/*
2003 * Check all nodes whether it contains reclaimable pages or not.
2004 * For quick scan, we make use of scan_nodes. This will allow us to skip
2005 * unused nodes. But scan_nodes is lazily updated and may not cotain
2006 * enough new information. We need to do double check.
2007 */
6bbda35c 2008static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
4d0c066d
KH
2009{
2010 int nid;
2011
2012 /*
2013 * quick check...making use of scan_node.
2014 * We can skip unused nodes.
2015 */
c0ff4b85
R
2016 if (!nodes_empty(memcg->scan_nodes)) {
2017 for (nid = first_node(memcg->scan_nodes);
4d0c066d 2018 nid < MAX_NUMNODES;
c0ff4b85 2019 nid = next_node(nid, memcg->scan_nodes)) {
4d0c066d 2020
c0ff4b85 2021 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
4d0c066d
KH
2022 return true;
2023 }
2024 }
2025 /*
2026 * Check rest of nodes.
2027 */
31aaea4a 2028 for_each_node_state(nid, N_MEMORY) {
c0ff4b85 2029 if (node_isset(nid, memcg->scan_nodes))
4d0c066d 2030 continue;
c0ff4b85 2031 if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap))
4d0c066d
KH
2032 return true;
2033 }
2034 return false;
2035}
2036
889976db 2037#else
c0ff4b85 2038int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
889976db
YH
2039{
2040 return 0;
2041}
4d0c066d 2042
6bbda35c 2043static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
4d0c066d 2044{
c0ff4b85 2045 return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
4d0c066d 2046}
889976db
YH
2047#endif
2048
5660048c
JW
2049static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
2050 struct zone *zone,
2051 gfp_t gfp_mask,
2052 unsigned long *total_scanned)
6d61ef40 2053{
9f3a0d09 2054 struct mem_cgroup *victim = NULL;
5660048c 2055 int total = 0;
04046e1a 2056 int loop = 0;
9d11ea9f 2057 unsigned long excess;
185efc0f 2058 unsigned long nr_scanned;
527a5ec9
JW
2059 struct mem_cgroup_reclaim_cookie reclaim = {
2060 .zone = zone,
2061 .priority = 0,
2062 };
9d11ea9f 2063
c0ff4b85 2064 excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT;
04046e1a 2065
4e416953 2066 while (1) {
527a5ec9 2067 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
9f3a0d09 2068 if (!victim) {
04046e1a 2069 loop++;
4e416953
BS
2070 if (loop >= 2) {
2071 /*
2072 * If we have not been able to reclaim
2073 * anything, it might because there are
2074 * no reclaimable pages under this hierarchy
2075 */
5660048c 2076 if (!total)
4e416953 2077 break;
4e416953 2078 /*
25985edc 2079 * We want to do more targeted reclaim.
4e416953
BS
2080 * excess >> 2 is not to excessive so as to
2081 * reclaim too much, nor too less that we keep
2082 * coming back to reclaim from this cgroup
2083 */
2084 if (total >= (excess >> 2) ||
9f3a0d09 2085 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
4e416953 2086 break;
4e416953 2087 }
9f3a0d09 2088 continue;
4e416953 2089 }
5660048c 2090 if (!mem_cgroup_reclaimable(victim, false))
6d61ef40 2091 continue;
5660048c
JW
2092 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
2093 zone, &nr_scanned);
2094 *total_scanned += nr_scanned;
2095 if (!res_counter_soft_limit_excess(&root_memcg->res))
9f3a0d09 2096 break;
6d61ef40 2097 }
9f3a0d09 2098 mem_cgroup_iter_break(root_memcg, victim);
04046e1a 2099 return total;
6d61ef40
BS
2100}
2101
867578cb
KH
2102/*
2103 * Check OOM-Killer is already running under our hierarchy.
2104 * If someone is running, return false.
1af8efe9 2105 * Has to be called with memcg_oom_lock
867578cb 2106 */
c0ff4b85 2107static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg)
867578cb 2108{
79dfdacc 2109 struct mem_cgroup *iter, *failed = NULL;
a636b327 2110
9f3a0d09 2111 for_each_mem_cgroup_tree(iter, memcg) {
23751be0 2112 if (iter->oom_lock) {
79dfdacc
MH
2113 /*
2114 * this subtree of our hierarchy is already locked
2115 * so we cannot give a lock.
2116 */
79dfdacc 2117 failed = iter;
9f3a0d09
JW
2118 mem_cgroup_iter_break(memcg, iter);
2119 break;
23751be0
JW
2120 } else
2121 iter->oom_lock = true;
7d74b06f 2122 }
867578cb 2123
79dfdacc 2124 if (!failed)
23751be0 2125 return true;
79dfdacc
MH
2126
2127 /*
2128 * OK, we failed to lock the whole subtree so we have to clean up
2129 * what we set up to the failing subtree
2130 */
9f3a0d09 2131 for_each_mem_cgroup_tree(iter, memcg) {
79dfdacc 2132 if (iter == failed) {
9f3a0d09
JW
2133 mem_cgroup_iter_break(memcg, iter);
2134 break;
79dfdacc
MH
2135 }
2136 iter->oom_lock = false;
2137 }
23751be0 2138 return false;
a636b327 2139}
0b7f569e 2140
79dfdacc 2141/*
1af8efe9 2142 * Has to be called with memcg_oom_lock
79dfdacc 2143 */
c0ff4b85 2144static int mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
0b7f569e 2145{
7d74b06f
KH
2146 struct mem_cgroup *iter;
2147
c0ff4b85 2148 for_each_mem_cgroup_tree(iter, memcg)
79dfdacc
MH
2149 iter->oom_lock = false;
2150 return 0;
2151}
2152
c0ff4b85 2153static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
79dfdacc
MH
2154{
2155 struct mem_cgroup *iter;
2156
c0ff4b85 2157 for_each_mem_cgroup_tree(iter, memcg)
79dfdacc
MH
2158 atomic_inc(&iter->under_oom);
2159}
2160
c0ff4b85 2161static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
79dfdacc
MH
2162{
2163 struct mem_cgroup *iter;
2164
867578cb
KH
2165 /*
2166 * When a new child is created while the hierarchy is under oom,
2167 * mem_cgroup_oom_lock() may not be called. We have to use
2168 * atomic_add_unless() here.
2169 */
c0ff4b85 2170 for_each_mem_cgroup_tree(iter, memcg)
79dfdacc 2171 atomic_add_unless(&iter->under_oom, -1, 0);
0b7f569e
KH
2172}
2173
1af8efe9 2174static DEFINE_SPINLOCK(memcg_oom_lock);
867578cb
KH
2175static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
2176
dc98df5a 2177struct oom_wait_info {
d79154bb 2178 struct mem_cgroup *memcg;
dc98df5a
KH
2179 wait_queue_t wait;
2180};
2181
2182static int memcg_oom_wake_function(wait_queue_t *wait,
2183 unsigned mode, int sync, void *arg)
2184{
d79154bb
HD
2185 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
2186 struct mem_cgroup *oom_wait_memcg;
dc98df5a
KH
2187 struct oom_wait_info *oom_wait_info;
2188
2189 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
d79154bb 2190 oom_wait_memcg = oom_wait_info->memcg;
dc98df5a 2191
dc98df5a 2192 /*
d79154bb 2193 * Both of oom_wait_info->memcg and wake_memcg are stable under us.
dc98df5a
KH
2194 * Then we can use css_is_ancestor without taking care of RCU.
2195 */
c0ff4b85
R
2196 if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg)
2197 && !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg))
dc98df5a 2198 return 0;
dc98df5a
KH
2199 return autoremove_wake_function(wait, mode, sync, arg);
2200}
2201
c0ff4b85 2202static void memcg_wakeup_oom(struct mem_cgroup *memcg)
dc98df5a 2203{
c0ff4b85
R
2204 /* for filtering, pass "memcg" as argument. */
2205 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
dc98df5a
KH
2206}
2207
c0ff4b85 2208static void memcg_oom_recover(struct mem_cgroup *memcg)
3c11ecf4 2209{
c0ff4b85
R
2210 if (memcg && atomic_read(&memcg->under_oom))
2211 memcg_wakeup_oom(memcg);
3c11ecf4
KH
2212}
2213
867578cb
KH
2214/*
2215 * try to call OOM killer. returns false if we should exit memory-reclaim loop.
2216 */
6bbda35c
KS
2217static bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask,
2218 int order)
0b7f569e 2219{
dc98df5a 2220 struct oom_wait_info owait;
3c11ecf4 2221 bool locked, need_to_kill;
867578cb 2222
d79154bb 2223 owait.memcg = memcg;
dc98df5a
KH
2224 owait.wait.flags = 0;
2225 owait.wait.func = memcg_oom_wake_function;
2226 owait.wait.private = current;
2227 INIT_LIST_HEAD(&owait.wait.task_list);
3c11ecf4 2228 need_to_kill = true;
c0ff4b85 2229 mem_cgroup_mark_under_oom(memcg);
79dfdacc 2230
c0ff4b85 2231 /* At first, try to OOM lock hierarchy under memcg.*/
1af8efe9 2232 spin_lock(&memcg_oom_lock);
c0ff4b85 2233 locked = mem_cgroup_oom_lock(memcg);
867578cb
KH
2234 /*
2235 * Even if signal_pending(), we can't quit charge() loop without
2236 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
2237 * under OOM is always welcomed, use TASK_KILLABLE here.
2238 */
3c11ecf4 2239 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
c0ff4b85 2240 if (!locked || memcg->oom_kill_disable)
3c11ecf4
KH
2241 need_to_kill = false;
2242 if (locked)
c0ff4b85 2243 mem_cgroup_oom_notify(memcg);
1af8efe9 2244 spin_unlock(&memcg_oom_lock);
867578cb 2245
3c11ecf4
KH
2246 if (need_to_kill) {
2247 finish_wait(&memcg_oom_waitq, &owait.wait);
e845e199 2248 mem_cgroup_out_of_memory(memcg, mask, order);
3c11ecf4 2249 } else {
867578cb 2250 schedule();
dc98df5a 2251 finish_wait(&memcg_oom_waitq, &owait.wait);
867578cb 2252 }
1af8efe9 2253 spin_lock(&memcg_oom_lock);
79dfdacc 2254 if (locked)
c0ff4b85
R
2255 mem_cgroup_oom_unlock(memcg);
2256 memcg_wakeup_oom(memcg);
1af8efe9 2257 spin_unlock(&memcg_oom_lock);
867578cb 2258
c0ff4b85 2259 mem_cgroup_unmark_under_oom(memcg);
79dfdacc 2260
867578cb
KH
2261 if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
2262 return false;
2263 /* Give chance to dying process */
715a5ee8 2264 schedule_timeout_uninterruptible(1);
867578cb 2265 return true;
0b7f569e
KH
2266}
2267
d69b042f
BS
2268/*
2269 * Currently used to update mapped file statistics, but the routine can be
2270 * generalized to update other statistics as well.
32047e2a
KH
2271 *
2272 * Notes: Race condition
2273 *
2274 * We usually use page_cgroup_lock() for accessing page_cgroup member but
2275 * it tends to be costly. But considering some conditions, we doesn't need
2276 * to do so _always_.
2277 *
2278 * Considering "charge", lock_page_cgroup() is not required because all
2279 * file-stat operations happen after a page is attached to radix-tree. There
2280 * are no race with "charge".
2281 *
2282 * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
2283 * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
2284 * if there are race with "uncharge". Statistics itself is properly handled
2285 * by flags.
2286 *
2287 * Considering "move", this is an only case we see a race. To make the race
619d094b
KH
2288 * small, we check mm->moving_account and detect there are possibility of race
2289 * If there is, we take a lock.
d69b042f 2290 */
26174efd 2291
89c06bd5
KH
2292void __mem_cgroup_begin_update_page_stat(struct page *page,
2293 bool *locked, unsigned long *flags)
2294{
2295 struct mem_cgroup *memcg;
2296 struct page_cgroup *pc;
2297
2298 pc = lookup_page_cgroup(page);
2299again:
2300 memcg = pc->mem_cgroup;
2301 if (unlikely(!memcg || !PageCgroupUsed(pc)))
2302 return;
2303 /*
2304 * If this memory cgroup is not under account moving, we don't
da92c47d 2305 * need to take move_lock_mem_cgroup(). Because we already hold
89c06bd5 2306 * rcu_read_lock(), any calls to move_account will be delayed until
13fd1dd9 2307 * rcu_read_unlock() if mem_cgroup_stolen() == true.
89c06bd5 2308 */
13fd1dd9 2309 if (!mem_cgroup_stolen(memcg))
89c06bd5
KH
2310 return;
2311
2312 move_lock_mem_cgroup(memcg, flags);
2313 if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
2314 move_unlock_mem_cgroup(memcg, flags);
2315 goto again;
2316 }
2317 *locked = true;
2318}
2319
2320void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
2321{
2322 struct page_cgroup *pc = lookup_page_cgroup(page);
2323
2324 /*
2325 * It's guaranteed that pc->mem_cgroup never changes while
2326 * lock is held because a routine modifies pc->mem_cgroup
da92c47d 2327 * should take move_lock_mem_cgroup().
89c06bd5
KH
2328 */
2329 move_unlock_mem_cgroup(pc->mem_cgroup, flags);
2330}
2331
2a7106f2
GT
2332void mem_cgroup_update_page_stat(struct page *page,
2333 enum mem_cgroup_page_stat_item idx, int val)
d69b042f 2334{
c0ff4b85 2335 struct mem_cgroup *memcg;
32047e2a 2336 struct page_cgroup *pc = lookup_page_cgroup(page);
dbd4ea78 2337 unsigned long uninitialized_var(flags);
d69b042f 2338
cfa44946 2339 if (mem_cgroup_disabled())
d69b042f 2340 return;
89c06bd5 2341
c0ff4b85
R
2342 memcg = pc->mem_cgroup;
2343 if (unlikely(!memcg || !PageCgroupUsed(pc)))
89c06bd5 2344 return;
26174efd 2345
26174efd 2346 switch (idx) {
2a7106f2 2347 case MEMCG_NR_FILE_MAPPED:
2a7106f2 2348 idx = MEM_CGROUP_STAT_FILE_MAPPED;
26174efd
KH
2349 break;
2350 default:
2351 BUG();
8725d541 2352 }
d69b042f 2353
c0ff4b85 2354 this_cpu_add(memcg->stat->count[idx], val);
d69b042f 2355}
26174efd 2356
cdec2e42
KH
2357/*
2358 * size of first charge trial. "32" comes from vmscan.c's magic value.
2359 * TODO: maybe necessary to use big numbers in big irons.
2360 */
7ec99d62 2361#define CHARGE_BATCH 32U
cdec2e42
KH
2362struct memcg_stock_pcp {
2363 struct mem_cgroup *cached; /* this never be root cgroup */
11c9ea4e 2364 unsigned int nr_pages;
cdec2e42 2365 struct work_struct work;
26fe6168 2366 unsigned long flags;
a0db00fc 2367#define FLUSHING_CACHED_CHARGE 0
cdec2e42
KH
2368};
2369static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
9f50fad6 2370static DEFINE_MUTEX(percpu_charge_mutex);
cdec2e42 2371
a0956d54
SS
2372/**
2373 * consume_stock: Try to consume stocked charge on this cpu.
2374 * @memcg: memcg to consume from.
2375 * @nr_pages: how many pages to charge.
2376 *
2377 * The charges will only happen if @memcg matches the current cpu's memcg
2378 * stock, and at least @nr_pages are available in that stock. Failure to
2379 * service an allocation will refill the stock.
2380 *
2381 * returns true if successful, false otherwise.
cdec2e42 2382 */
a0956d54 2383static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
cdec2e42
KH
2384{
2385 struct memcg_stock_pcp *stock;
2386 bool ret = true;
2387
a0956d54
SS
2388 if (nr_pages > CHARGE_BATCH)
2389 return false;
2390
cdec2e42 2391 stock = &get_cpu_var(memcg_stock);
a0956d54
SS
2392 if (memcg == stock->cached && stock->nr_pages >= nr_pages)
2393 stock->nr_pages -= nr_pages;
cdec2e42
KH
2394 else /* need to call res_counter_charge */
2395 ret = false;
2396 put_cpu_var(memcg_stock);
2397 return ret;
2398}
2399
2400/*
2401 * Returns stocks cached in percpu to res_counter and reset cached information.
2402 */
2403static void drain_stock(struct memcg_stock_pcp *stock)
2404{
2405 struct mem_cgroup *old = stock->cached;
2406
11c9ea4e
JW
2407 if (stock->nr_pages) {
2408 unsigned long bytes = stock->nr_pages * PAGE_SIZE;
2409
2410 res_counter_uncharge(&old->res, bytes);
cdec2e42 2411 if (do_swap_account)
11c9ea4e
JW
2412 res_counter_uncharge(&old->memsw, bytes);
2413 stock->nr_pages = 0;
cdec2e42
KH
2414 }
2415 stock->cached = NULL;
cdec2e42
KH
2416}
2417
2418/*
2419 * This must be called under preempt disabled or must be called by
2420 * a thread which is pinned to local cpu.
2421 */
2422static void drain_local_stock(struct work_struct *dummy)
2423{
2424 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
2425 drain_stock(stock);
26fe6168 2426 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
cdec2e42
KH
2427}
2428
e4777496
MH
2429static void __init memcg_stock_init(void)
2430{
2431 int cpu;
2432
2433 for_each_possible_cpu(cpu) {
2434 struct memcg_stock_pcp *stock =
2435 &per_cpu(memcg_stock, cpu);
2436 INIT_WORK(&stock->work, drain_local_stock);
2437 }
2438}
2439
cdec2e42
KH
2440/*
2441 * Cache charges(val) which is from res_counter, to local per_cpu area.
320cc51d 2442 * This will be consumed by consume_stock() function, later.
cdec2e42 2443 */
c0ff4b85 2444static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
cdec2e42
KH
2445{
2446 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
2447
c0ff4b85 2448 if (stock->cached != memcg) { /* reset if necessary */
cdec2e42 2449 drain_stock(stock);
c0ff4b85 2450 stock->cached = memcg;
cdec2e42 2451 }
11c9ea4e 2452 stock->nr_pages += nr_pages;
cdec2e42
KH
2453 put_cpu_var(memcg_stock);
2454}
2455
2456/*
c0ff4b85 2457 * Drains all per-CPU charge caches for given root_memcg resp. subtree
d38144b7
MH
2458 * of the hierarchy under it. sync flag says whether we should block
2459 * until the work is done.
cdec2e42 2460 */
c0ff4b85 2461static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
cdec2e42 2462{
26fe6168 2463 int cpu, curcpu;
d38144b7 2464
cdec2e42 2465 /* Notify other cpus that system-wide "drain" is running */
cdec2e42 2466 get_online_cpus();
5af12d0e 2467 curcpu = get_cpu();
cdec2e42
KH
2468 for_each_online_cpu(cpu) {
2469 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
c0ff4b85 2470 struct mem_cgroup *memcg;
26fe6168 2471
c0ff4b85
R
2472 memcg = stock->cached;
2473 if (!memcg || !stock->nr_pages)
26fe6168 2474 continue;
c0ff4b85 2475 if (!mem_cgroup_same_or_subtree(root_memcg, memcg))
3e92041d 2476 continue;
d1a05b69
MH
2477 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2478 if (cpu == curcpu)
2479 drain_local_stock(&stock->work);
2480 else
2481 schedule_work_on(cpu, &stock->work);
2482 }
cdec2e42 2483 }
5af12d0e 2484 put_cpu();
d38144b7
MH
2485
2486 if (!sync)
2487 goto out;
2488
2489 for_each_online_cpu(cpu) {
2490 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
9f50fad6 2491 if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
d38144b7
MH
2492 flush_work(&stock->work);
2493 }
2494out:
cdec2e42 2495 put_online_cpus();
d38144b7
MH
2496}
2497
2498/*
2499 * Tries to drain stocked charges in other cpus. This function is asynchronous
2500 * and just put a work per cpu for draining localy on each cpu. Caller can
2501 * expects some charges will be back to res_counter later but cannot wait for
2502 * it.
2503 */
c0ff4b85 2504static void drain_all_stock_async(struct mem_cgroup *root_memcg)
d38144b7 2505{
9f50fad6
MH
2506 /*
2507 * If someone calls draining, avoid adding more kworker runs.
2508 */
2509 if (!mutex_trylock(&percpu_charge_mutex))
2510 return;
c0ff4b85 2511 drain_all_stock(root_memcg, false);
9f50fad6 2512 mutex_unlock(&percpu_charge_mutex);
cdec2e42
KH
2513}
2514
2515/* This is a synchronous drain interface. */
c0ff4b85 2516static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
cdec2e42
KH
2517{
2518 /* called when force_empty is called */
9f50fad6 2519 mutex_lock(&percpu_charge_mutex);
c0ff4b85 2520 drain_all_stock(root_memcg, true);
9f50fad6 2521 mutex_unlock(&percpu_charge_mutex);
cdec2e42
KH
2522}
2523
711d3d2c
KH
2524/*
2525 * This function drains percpu counter value from DEAD cpu and
2526 * move it to local cpu. Note that this function can be preempted.
2527 */
c0ff4b85 2528static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
711d3d2c
KH
2529{
2530 int i;
2531
c0ff4b85 2532 spin_lock(&memcg->pcp_counter_lock);
6104621d 2533 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
c0ff4b85 2534 long x = per_cpu(memcg->stat->count[i], cpu);
711d3d2c 2535
c0ff4b85
R
2536 per_cpu(memcg->stat->count[i], cpu) = 0;
2537 memcg->nocpu_base.count[i] += x;
711d3d2c 2538 }
e9f8974f 2539 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
c0ff4b85 2540 unsigned long x = per_cpu(memcg->stat->events[i], cpu);
e9f8974f 2541
c0ff4b85
R
2542 per_cpu(memcg->stat->events[i], cpu) = 0;
2543 memcg->nocpu_base.events[i] += x;
e9f8974f 2544 }
c0ff4b85 2545 spin_unlock(&memcg->pcp_counter_lock);
711d3d2c
KH
2546}
2547
2548static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
cdec2e42
KH
2549 unsigned long action,
2550 void *hcpu)
2551{
2552 int cpu = (unsigned long)hcpu;
2553 struct memcg_stock_pcp *stock;
711d3d2c 2554 struct mem_cgroup *iter;
cdec2e42 2555
619d094b 2556 if (action == CPU_ONLINE)
1489ebad 2557 return NOTIFY_OK;
1489ebad 2558
d833049b 2559 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
cdec2e42 2560 return NOTIFY_OK;
711d3d2c 2561
9f3a0d09 2562 for_each_mem_cgroup(iter)
711d3d2c
KH
2563 mem_cgroup_drain_pcp_counter(iter, cpu);
2564
cdec2e42
KH
2565 stock = &per_cpu(memcg_stock, cpu);
2566 drain_stock(stock);
2567 return NOTIFY_OK;
2568}
2569
4b534334
KH
2570
2571/* See __mem_cgroup_try_charge() for details */
2572enum {
2573 CHARGE_OK, /* success */
2574 CHARGE_RETRY, /* need to retry but retry is not bad */
2575 CHARGE_NOMEM, /* we can't do more. return -ENOMEM */
2576 CHARGE_WOULDBLOCK, /* GFP_WAIT wasn't set and no enough res. */
2577 CHARGE_OOM_DIE, /* the current is killed because of OOM */
2578};
2579
c0ff4b85 2580static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
4c9c5359
SS
2581 unsigned int nr_pages, unsigned int min_pages,
2582 bool oom_check)
4b534334 2583{
7ec99d62 2584 unsigned long csize = nr_pages * PAGE_SIZE;
4b534334
KH
2585 struct mem_cgroup *mem_over_limit;
2586 struct res_counter *fail_res;
2587 unsigned long flags = 0;
2588 int ret;
2589
c0ff4b85 2590 ret = res_counter_charge(&memcg->res, csize, &fail_res);
4b534334
KH
2591
2592 if (likely(!ret)) {
2593 if (!do_swap_account)
2594 return CHARGE_OK;
c0ff4b85 2595 ret = res_counter_charge(&memcg->memsw, csize, &fail_res);
4b534334
KH
2596 if (likely(!ret))
2597 return CHARGE_OK;
2598
c0ff4b85 2599 res_counter_uncharge(&memcg->res, csize);
4b534334
KH
2600 mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
2601 flags |= MEM_CGROUP_RECLAIM_NOSWAP;
2602 } else
2603 mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
9221edb7 2604 /*
9221edb7
JW
2605 * Never reclaim on behalf of optional batching, retry with a
2606 * single page instead.
2607 */
4c9c5359 2608 if (nr_pages > min_pages)
4b534334
KH
2609 return CHARGE_RETRY;
2610
2611 if (!(gfp_mask & __GFP_WAIT))
2612 return CHARGE_WOULDBLOCK;
2613
4c9c5359
SS
2614 if (gfp_mask & __GFP_NORETRY)
2615 return CHARGE_NOMEM;
2616
5660048c 2617 ret = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags);
7ec99d62 2618 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
19942822 2619 return CHARGE_RETRY;
4b534334 2620 /*
19942822
JW
2621 * Even though the limit is exceeded at this point, reclaim
2622 * may have been able to free some pages. Retry the charge
2623 * before killing the task.
2624 *
2625 * Only for regular pages, though: huge pages are rather
2626 * unlikely to succeed so close to the limit, and we fall back
2627 * to regular pages anyway in case of failure.
4b534334 2628 */
4c9c5359 2629 if (nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER) && ret)
4b534334
KH
2630 return CHARGE_RETRY;
2631
2632 /*
2633 * At task move, charge accounts can be doubly counted. So, it's
2634 * better to wait until the end of task_move if something is going on.
2635 */
2636 if (mem_cgroup_wait_acct_move(mem_over_limit))
2637 return CHARGE_RETRY;
2638
2639 /* If we don't need to call oom-killer at el, return immediately */
2640 if (!oom_check)
2641 return CHARGE_NOMEM;
2642 /* check OOM */
e845e199 2643 if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask, get_order(csize)))
4b534334
KH
2644 return CHARGE_OOM_DIE;
2645
2646 return CHARGE_RETRY;
2647}
2648
f817ed48 2649/*
38c5d72f
KH
2650 * __mem_cgroup_try_charge() does
2651 * 1. detect memcg to be charged against from passed *mm and *ptr,
2652 * 2. update res_counter
2653 * 3. call memory reclaim if necessary.
2654 *
2655 * In some special case, if the task is fatal, fatal_signal_pending() or
2656 * has TIF_MEMDIE, this function returns -EINTR while writing root_mem_cgroup
2657 * to *ptr. There are two reasons for this. 1: fatal threads should quit as soon
2658 * as possible without any hazards. 2: all pages should have a valid
2659 * pc->mem_cgroup. If mm is NULL and the caller doesn't pass a valid memcg
2660 * pointer, that is treated as a charge to root_mem_cgroup.
2661 *
2662 * So __mem_cgroup_try_charge() will return
2663 * 0 ... on success, filling *ptr with a valid memcg pointer.
2664 * -ENOMEM ... charge failure because of resource limits.
2665 * -EINTR ... if thread is fatal. *ptr is filled with root_mem_cgroup.
2666 *
2667 * Unlike the exported interface, an "oom" parameter is added. if oom==true,
2668 * the oom-killer can be invoked.
8a9f3ccd 2669 */
f817ed48 2670static int __mem_cgroup_try_charge(struct mm_struct *mm,
ec168510 2671 gfp_t gfp_mask,
7ec99d62 2672 unsigned int nr_pages,
c0ff4b85 2673 struct mem_cgroup **ptr,
7ec99d62 2674 bool oom)
8a9f3ccd 2675{
7ec99d62 2676 unsigned int batch = max(CHARGE_BATCH, nr_pages);
4b534334 2677 int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
c0ff4b85 2678 struct mem_cgroup *memcg = NULL;
4b534334 2679 int ret;
a636b327 2680
867578cb
KH
2681 /*
2682 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
2683 * in system level. So, allow to go ahead dying process in addition to
2684 * MEMDIE process.
2685 */
2686 if (unlikely(test_thread_flag(TIF_MEMDIE)
2687 || fatal_signal_pending(current)))
2688 goto bypass;
a636b327 2689
8a9f3ccd 2690 /*
3be91277
HD
2691 * We always charge the cgroup the mm_struct belongs to.
2692 * The mm_struct's mem_cgroup changes on task migration if the
8a9f3ccd 2693 * thread group leader migrates. It's possible that mm is not
24467cac 2694 * set, if so charge the root memcg (happens for pagecache usage).
8a9f3ccd 2695 */
c0ff4b85 2696 if (!*ptr && !mm)
38c5d72f 2697 *ptr = root_mem_cgroup;
f75ca962 2698again:
c0ff4b85
R
2699 if (*ptr) { /* css should be a valid one */
2700 memcg = *ptr;
c0ff4b85 2701 if (mem_cgroup_is_root(memcg))
f75ca962 2702 goto done;
a0956d54 2703 if (consume_stock(memcg, nr_pages))
f75ca962 2704 goto done;
c0ff4b85 2705 css_get(&memcg->css);
4b534334 2706 } else {
f75ca962 2707 struct task_struct *p;
54595fe2 2708
f75ca962
KH
2709 rcu_read_lock();
2710 p = rcu_dereference(mm->owner);
f75ca962 2711 /*
ebb76ce1 2712 * Because we don't have task_lock(), "p" can exit.
c0ff4b85 2713 * In that case, "memcg" can point to root or p can be NULL with
ebb76ce1
KH
2714 * race with swapoff. Then, we have small risk of mis-accouning.
2715 * But such kind of mis-account by race always happens because
2716 * we don't have cgroup_mutex(). It's overkill and we allo that
2717 * small race, here.
2718 * (*) swapoff at el will charge against mm-struct not against
2719 * task-struct. So, mm->owner can be NULL.
f75ca962 2720 */
c0ff4b85 2721 memcg = mem_cgroup_from_task(p);
38c5d72f
KH
2722 if (!memcg)
2723 memcg = root_mem_cgroup;
2724 if (mem_cgroup_is_root(memcg)) {
f75ca962
KH
2725 rcu_read_unlock();
2726 goto done;
2727 }
a0956d54 2728 if (consume_stock(memcg, nr_pages)) {
f75ca962
KH
2729 /*
2730 * It seems dagerous to access memcg without css_get().
2731 * But considering how consume_stok works, it's not
2732 * necessary. If consume_stock success, some charges
2733 * from this memcg are cached on this cpu. So, we
2734 * don't need to call css_get()/css_tryget() before
2735 * calling consume_stock().
2736 */
2737 rcu_read_unlock();
2738 goto done;
2739 }
2740 /* after here, we may be blocked. we need to get refcnt */
c0ff4b85 2741 if (!css_tryget(&memcg->css)) {
f75ca962
KH
2742 rcu_read_unlock();
2743 goto again;
2744 }
2745 rcu_read_unlock();
2746 }
8a9f3ccd 2747
4b534334
KH
2748 do {
2749 bool oom_check;
7a81b88c 2750
4b534334 2751 /* If killed, bypass charge */
f75ca962 2752 if (fatal_signal_pending(current)) {
c0ff4b85 2753 css_put(&memcg->css);
4b534334 2754 goto bypass;
f75ca962 2755 }
6d61ef40 2756
4b534334
KH
2757 oom_check = false;
2758 if (oom && !nr_oom_retries) {
2759 oom_check = true;
2760 nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
cdec2e42 2761 }
66e1707b 2762
4c9c5359
SS
2763 ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, nr_pages,
2764 oom_check);
4b534334
KH
2765 switch (ret) {
2766 case CHARGE_OK:
2767 break;
2768 case CHARGE_RETRY: /* not in OOM situation but retry */
7ec99d62 2769 batch = nr_pages;
c0ff4b85
R
2770 css_put(&memcg->css);
2771 memcg = NULL;
f75ca962 2772 goto again;
4b534334 2773 case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
c0ff4b85 2774 css_put(&memcg->css);
4b534334
KH
2775 goto nomem;
2776 case CHARGE_NOMEM: /* OOM routine works */
f75ca962 2777 if (!oom) {
c0ff4b85 2778 css_put(&memcg->css);
867578cb 2779 goto nomem;
f75ca962 2780 }
4b534334
KH
2781 /* If oom, we never return -ENOMEM */
2782 nr_oom_retries--;
2783 break;
2784 case CHARGE_OOM_DIE: /* Killed by OOM Killer */
c0ff4b85 2785 css_put(&memcg->css);
867578cb 2786 goto bypass;
66e1707b 2787 }
4b534334
KH
2788 } while (ret != CHARGE_OK);
2789
7ec99d62 2790 if (batch > nr_pages)
c0ff4b85
R
2791 refill_stock(memcg, batch - nr_pages);
2792 css_put(&memcg->css);
0c3e73e8 2793done:
c0ff4b85 2794 *ptr = memcg;
7a81b88c
KH
2795 return 0;
2796nomem:
c0ff4b85 2797 *ptr = NULL;
7a81b88c 2798 return -ENOMEM;
867578cb 2799bypass:
38c5d72f
KH
2800 *ptr = root_mem_cgroup;
2801 return -EINTR;
7a81b88c 2802}
8a9f3ccd 2803
a3032a2c
DN
2804/*
2805 * Somemtimes we have to undo a charge we got by try_charge().
2806 * This function is for that and do uncharge, put css's refcnt.
2807 * gotten by try_charge().
2808 */
c0ff4b85 2809static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
e7018b8d 2810 unsigned int nr_pages)
a3032a2c 2811{
c0ff4b85 2812 if (!mem_cgroup_is_root(memcg)) {
e7018b8d
JW
2813 unsigned long bytes = nr_pages * PAGE_SIZE;
2814
c0ff4b85 2815 res_counter_uncharge(&memcg->res, bytes);
a3032a2c 2816 if (do_swap_account)
c0ff4b85 2817 res_counter_uncharge(&memcg->memsw, bytes);
a3032a2c 2818 }
854ffa8d
DN
2819}
2820
d01dd17f
KH
2821/*
2822 * Cancel chrages in this cgroup....doesn't propagate to parent cgroup.
2823 * This is useful when moving usage to parent cgroup.
2824 */
2825static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
2826 unsigned int nr_pages)
2827{
2828 unsigned long bytes = nr_pages * PAGE_SIZE;
2829
2830 if (mem_cgroup_is_root(memcg))
2831 return;
2832
2833 res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes);
2834 if (do_swap_account)
2835 res_counter_uncharge_until(&memcg->memsw,
2836 memcg->memsw.parent, bytes);
2837}
2838
a3b2d692
KH
2839/*
2840 * A helper function to get mem_cgroup from ID. must be called under
e9316080
TH
2841 * rcu_read_lock(). The caller is responsible for calling css_tryget if
2842 * the mem_cgroup is used for charging. (dropping refcnt from swap can be
2843 * called against removed memcg.)
a3b2d692
KH
2844 */
2845static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
2846{
2847 struct cgroup_subsys_state *css;
2848
2849 /* ID 0 is unused ID */
2850 if (!id)
2851 return NULL;
2852 css = css_lookup(&mem_cgroup_subsys, id);
2853 if (!css)
2854 return NULL;
b2145145 2855 return mem_cgroup_from_css(css);
a3b2d692
KH
2856}
2857
e42d9d5d 2858struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
b5a84319 2859{
c0ff4b85 2860 struct mem_cgroup *memcg = NULL;
3c776e64 2861 struct page_cgroup *pc;
a3b2d692 2862 unsigned short id;
b5a84319
KH
2863 swp_entry_t ent;
2864
3c776e64
DN
2865 VM_BUG_ON(!PageLocked(page));
2866
3c776e64 2867 pc = lookup_page_cgroup(page);
c0bd3f63 2868 lock_page_cgroup(pc);
a3b2d692 2869 if (PageCgroupUsed(pc)) {
c0ff4b85
R
2870 memcg = pc->mem_cgroup;
2871 if (memcg && !css_tryget(&memcg->css))
2872 memcg = NULL;
e42d9d5d 2873 } else if (PageSwapCache(page)) {
3c776e64 2874 ent.val = page_private(page);
9fb4b7cc 2875 id = lookup_swap_cgroup_id(ent);
a3b2d692 2876 rcu_read_lock();
c0ff4b85
R
2877 memcg = mem_cgroup_lookup(id);
2878 if (memcg && !css_tryget(&memcg->css))
2879 memcg = NULL;
a3b2d692 2880 rcu_read_unlock();
3c776e64 2881 }
c0bd3f63 2882 unlock_page_cgroup(pc);
c0ff4b85 2883 return memcg;
b5a84319
KH
2884}
2885
c0ff4b85 2886static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
5564e88b 2887 struct page *page,
7ec99d62 2888 unsigned int nr_pages,
9ce70c02
HD
2889 enum charge_type ctype,
2890 bool lrucare)
7a81b88c 2891{
ce587e65 2892 struct page_cgroup *pc = lookup_page_cgroup(page);
9ce70c02 2893 struct zone *uninitialized_var(zone);
fa9add64 2894 struct lruvec *lruvec;
9ce70c02 2895 bool was_on_lru = false;
b2402857 2896 bool anon;
9ce70c02 2897
ca3e0214 2898 lock_page_cgroup(pc);
90deb788 2899 VM_BUG_ON(PageCgroupUsed(pc));
ca3e0214
KH
2900 /*
2901 * we don't need page_cgroup_lock about tail pages, becase they are not
2902 * accessed by any other context at this point.
2903 */
9ce70c02
HD
2904
2905 /*
2906 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2907 * may already be on some other mem_cgroup's LRU. Take care of it.
2908 */
2909 if (lrucare) {
2910 zone = page_zone(page);
2911 spin_lock_irq(&zone->lru_lock);
2912 if (PageLRU(page)) {
fa9add64 2913 lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
9ce70c02 2914 ClearPageLRU(page);
fa9add64 2915 del_page_from_lru_list(page, lruvec, page_lru(page));
9ce70c02
HD
2916 was_on_lru = true;
2917 }
2918 }
2919
c0ff4b85 2920 pc->mem_cgroup = memcg;
261fb61a
KH
2921 /*
2922 * We access a page_cgroup asynchronously without lock_page_cgroup().
2923 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
2924 * is accessed after testing USED bit. To make pc->mem_cgroup visible
2925 * before USED bit, we need memory barrier here.
2926 * See mem_cgroup_add_lru_list(), etc.
2927 */
08e552c6 2928 smp_wmb();
b2402857 2929 SetPageCgroupUsed(pc);
3be91277 2930
9ce70c02
HD
2931 if (lrucare) {
2932 if (was_on_lru) {
fa9add64 2933 lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
9ce70c02
HD
2934 VM_BUG_ON(PageLRU(page));
2935 SetPageLRU(page);
fa9add64 2936 add_page_to_lru_list(page, lruvec, page_lru(page));
9ce70c02
HD
2937 }
2938 spin_unlock_irq(&zone->lru_lock);
2939 }
2940
41326c17 2941 if (ctype == MEM_CGROUP_CHARGE_TYPE_ANON)
b2402857
KH
2942 anon = true;
2943 else
2944 anon = false;
2945
b070e65c 2946 mem_cgroup_charge_statistics(memcg, page, anon, nr_pages);
52d4b9ac 2947 unlock_page_cgroup(pc);
9ce70c02 2948
430e4863
KH
2949 /*
2950 * "charge_statistics" updated event counter. Then, check it.
2951 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2952 * if they exceeds softlimit.
2953 */
c0ff4b85 2954 memcg_check_events(memcg, page);
7a81b88c 2955}
66e1707b 2956
7cf27982
GC
2957static DEFINE_MUTEX(set_limit_mutex);
2958
7ae1e1d0
GC
2959#ifdef CONFIG_MEMCG_KMEM
2960static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg)
2961{
2962 return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg) &&
2963 (memcg->kmem_account_flags & KMEM_ACCOUNTED_MASK);
2964}
2965
1f458cbf
GC
2966/*
2967 * This is a bit cumbersome, but it is rarely used and avoids a backpointer
2968 * in the memcg_cache_params struct.
2969 */
2970static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
2971{
2972 struct kmem_cache *cachep;
2973
2974 VM_BUG_ON(p->is_root_cache);
2975 cachep = p->root_cache;
2976 return cachep->memcg_params->memcg_caches[memcg_cache_id(p->memcg)];
2977}
2978
749c5415
GC
2979#ifdef CONFIG_SLABINFO
2980static int mem_cgroup_slabinfo_read(struct cgroup *cont, struct cftype *cft,
2981 struct seq_file *m)
2982{
2983 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
2984 struct memcg_cache_params *params;
2985
2986 if (!memcg_can_account_kmem(memcg))
2987 return -EIO;
2988
2989 print_slabinfo_header(m);
2990
2991 mutex_lock(&memcg->slab_caches_mutex);
2992 list_for_each_entry(params, &memcg->memcg_slab_caches, list)
2993 cache_show(memcg_params_to_cache(params), m);
2994 mutex_unlock(&memcg->slab_caches_mutex);
2995
2996 return 0;
2997}
2998#endif
2999
7ae1e1d0
GC
3000static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size)
3001{
3002 struct res_counter *fail_res;
3003 struct mem_cgroup *_memcg;
3004 int ret = 0;
3005 bool may_oom;
3006
3007 ret = res_counter_charge(&memcg->kmem, size, &fail_res);
3008 if (ret)
3009 return ret;
3010
3011 /*
3012 * Conditions under which we can wait for the oom_killer. Those are
3013 * the same conditions tested by the core page allocator
3014 */
3015 may_oom = (gfp & __GFP_FS) && !(gfp & __GFP_NORETRY);
3016
3017 _memcg = memcg;
3018 ret = __mem_cgroup_try_charge(NULL, gfp, size >> PAGE_SHIFT,
3019 &_memcg, may_oom);
3020
3021 if (ret == -EINTR) {
3022 /*
3023 * __mem_cgroup_try_charge() chosed to bypass to root due to
3024 * OOM kill or fatal signal. Since our only options are to
3025 * either fail the allocation or charge it to this cgroup, do
3026 * it as a temporary condition. But we can't fail. From a
3027 * kmem/slab perspective, the cache has already been selected,
3028 * by mem_cgroup_kmem_get_cache(), so it is too late to change
3029 * our minds.
3030 *
3031 * This condition will only trigger if the task entered
3032 * memcg_charge_kmem in a sane state, but was OOM-killed during
3033 * __mem_cgroup_try_charge() above. Tasks that were already
3034 * dying when the allocation triggers should have been already
3035 * directed to the root cgroup in memcontrol.h
3036 */
3037 res_counter_charge_nofail(&memcg->res, size, &fail_res);
3038 if (do_swap_account)
3039 res_counter_charge_nofail(&memcg->memsw, size,
3040 &fail_res);
3041 ret = 0;
3042 } else if (ret)
3043 res_counter_uncharge(&memcg->kmem, size);
3044
3045 return ret;
3046}
3047
3048static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size)
3049{
7ae1e1d0
GC
3050 res_counter_uncharge(&memcg->res, size);
3051 if (do_swap_account)
3052 res_counter_uncharge(&memcg->memsw, size);
7de37682
GC
3053
3054 /* Not down to 0 */
3055 if (res_counter_uncharge(&memcg->kmem, size))
3056 return;
3057
10d5ebf4
LZ
3058 /*
3059 * Releases a reference taken in kmem_cgroup_css_offline in case
3060 * this last uncharge is racing with the offlining code or it is
3061 * outliving the memcg existence.
3062 *
3063 * The memory barrier imposed by test&clear is paired with the
3064 * explicit one in memcg_kmem_mark_dead().
3065 */
7de37682 3066 if (memcg_kmem_test_and_clear_dead(memcg))
10d5ebf4 3067 css_put(&memcg->css);
7ae1e1d0
GC
3068}
3069
2633d7a0
GC
3070void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep)
3071{
3072 if (!memcg)
3073 return;
3074
3075 mutex_lock(&memcg->slab_caches_mutex);
3076 list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches);
3077 mutex_unlock(&memcg->slab_caches_mutex);
3078}
3079
3080/*
3081 * helper for acessing a memcg's index. It will be used as an index in the
3082 * child cache array in kmem_cache, and also to derive its name. This function
3083 * will return -1 when this is not a kmem-limited memcg.
3084 */
3085int memcg_cache_id(struct mem_cgroup *memcg)
3086{
3087 return memcg ? memcg->kmemcg_id : -1;
3088}
3089
55007d84
GC
3090/*
3091 * This ends up being protected by the set_limit mutex, during normal
3092 * operation, because that is its main call site.
3093 *
3094 * But when we create a new cache, we can call this as well if its parent
3095 * is kmem-limited. That will have to hold set_limit_mutex as well.
3096 */
3097int memcg_update_cache_sizes(struct mem_cgroup *memcg)
3098{
3099 int num, ret;
3100
3101 num = ida_simple_get(&kmem_limited_groups,
3102 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
3103 if (num < 0)
3104 return num;
3105 /*
3106 * After this point, kmem_accounted (that we test atomically in
3107 * the beginning of this conditional), is no longer 0. This
3108 * guarantees only one process will set the following boolean
3109 * to true. We don't need test_and_set because we're protected
3110 * by the set_limit_mutex anyway.
3111 */
3112 memcg_kmem_set_activated(memcg);
3113
3114 ret = memcg_update_all_caches(num+1);
3115 if (ret) {
3116 ida_simple_remove(&kmem_limited_groups, num);
3117 memcg_kmem_clear_activated(memcg);
3118 return ret;
3119 }
3120
3121 memcg->kmemcg_id = num;
3122 INIT_LIST_HEAD(&memcg->memcg_slab_caches);
3123 mutex_init(&memcg->slab_caches_mutex);
3124 return 0;
3125}
3126
3127static size_t memcg_caches_array_size(int num_groups)
3128{
3129 ssize_t size;
3130 if (num_groups <= 0)
3131 return 0;
3132
3133 size = 2 * num_groups;
3134 if (size < MEMCG_CACHES_MIN_SIZE)
3135 size = MEMCG_CACHES_MIN_SIZE;
3136 else if (size > MEMCG_CACHES_MAX_SIZE)
3137 size = MEMCG_CACHES_MAX_SIZE;
3138
3139 return size;
3140}
3141
3142/*
3143 * We should update the current array size iff all caches updates succeed. This
3144 * can only be done from the slab side. The slab mutex needs to be held when
3145 * calling this.
3146 */
3147void memcg_update_array_size(int num)
3148{
3149 if (num > memcg_limited_groups_array_size)
3150 memcg_limited_groups_array_size = memcg_caches_array_size(num);
3151}
3152
15cf17d2
KK
3153static void kmem_cache_destroy_work_func(struct work_struct *w);
3154
55007d84
GC
3155int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
3156{
3157 struct memcg_cache_params *cur_params = s->memcg_params;
3158
3159 VM_BUG_ON(s->memcg_params && !s->memcg_params->is_root_cache);
3160
3161 if (num_groups > memcg_limited_groups_array_size) {
3162 int i;
3163 ssize_t size = memcg_caches_array_size(num_groups);
3164
3165 size *= sizeof(void *);
3166 size += sizeof(struct memcg_cache_params);
3167
3168 s->memcg_params = kzalloc(size, GFP_KERNEL);
3169 if (!s->memcg_params) {
3170 s->memcg_params = cur_params;
3171 return -ENOMEM;
3172 }
3173
3174 s->memcg_params->is_root_cache = true;
3175
3176 /*
3177 * There is the chance it will be bigger than
3178 * memcg_limited_groups_array_size, if we failed an allocation
3179 * in a cache, in which case all caches updated before it, will
3180 * have a bigger array.
3181 *
3182 * But if that is the case, the data after
3183 * memcg_limited_groups_array_size is certainly unused
3184 */
3185 for (i = 0; i < memcg_limited_groups_array_size; i++) {
3186 if (!cur_params->memcg_caches[i])
3187 continue;
3188 s->memcg_params->memcg_caches[i] =
3189 cur_params->memcg_caches[i];
3190 }
3191
3192 /*
3193 * Ideally, we would wait until all caches succeed, and only
3194 * then free the old one. But this is not worth the extra
3195 * pointer per-cache we'd have to have for this.
3196 *
3197 * It is not a big deal if some caches are left with a size
3198 * bigger than the others. And all updates will reset this
3199 * anyway.
3200 */
3201 kfree(cur_params);
3202 }
3203 return 0;
3204}
3205
943a451a
GC
3206int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
3207 struct kmem_cache *root_cache)
2633d7a0
GC
3208{
3209 size_t size = sizeof(struct memcg_cache_params);
3210
3211 if (!memcg_kmem_enabled())
3212 return 0;
3213
55007d84
GC
3214 if (!memcg)
3215 size += memcg_limited_groups_array_size * sizeof(void *);
3216
2633d7a0
GC
3217 s->memcg_params = kzalloc(size, GFP_KERNEL);
3218 if (!s->memcg_params)
3219 return -ENOMEM;
3220
15cf17d2
KK
3221 INIT_WORK(&s->memcg_params->destroy,
3222 kmem_cache_destroy_work_func);
943a451a 3223 if (memcg) {
2633d7a0 3224 s->memcg_params->memcg = memcg;
943a451a 3225 s->memcg_params->root_cache = root_cache;
4ba902b5
GC
3226 } else
3227 s->memcg_params->is_root_cache = true;
3228
2633d7a0
GC
3229 return 0;
3230}
3231
3232void memcg_release_cache(struct kmem_cache *s)
3233{
d7f25f8a
GC
3234 struct kmem_cache *root;
3235 struct mem_cgroup *memcg;
3236 int id;
3237
3238 /*
3239 * This happens, for instance, when a root cache goes away before we
3240 * add any memcg.
3241 */
3242 if (!s->memcg_params)
3243 return;
3244
3245 if (s->memcg_params->is_root_cache)
3246 goto out;
3247
3248 memcg = s->memcg_params->memcg;
3249 id = memcg_cache_id(memcg);
3250
3251 root = s->memcg_params->root_cache;
3252 root->memcg_params->memcg_caches[id] = NULL;
d7f25f8a
GC
3253
3254 mutex_lock(&memcg->slab_caches_mutex);
3255 list_del(&s->memcg_params->list);
3256 mutex_unlock(&memcg->slab_caches_mutex);
3257
20f05310 3258 css_put(&memcg->css);
d7f25f8a 3259out:
2633d7a0
GC
3260 kfree(s->memcg_params);
3261}
3262
0e9d92f2
GC
3263/*
3264 * During the creation a new cache, we need to disable our accounting mechanism
3265 * altogether. This is true even if we are not creating, but rather just
3266 * enqueing new caches to be created.
3267 *
3268 * This is because that process will trigger allocations; some visible, like
3269 * explicit kmallocs to auxiliary data structures, name strings and internal
3270 * cache structures; some well concealed, like INIT_WORK() that can allocate
3271 * objects during debug.
3272 *
3273 * If any allocation happens during memcg_kmem_get_cache, we will recurse back
3274 * to it. This may not be a bounded recursion: since the first cache creation
3275 * failed to complete (waiting on the allocation), we'll just try to create the
3276 * cache again, failing at the same point.
3277 *
3278 * memcg_kmem_get_cache is prepared to abort after seeing a positive count of
3279 * memcg_kmem_skip_account. So we enclose anything that might allocate memory
3280 * inside the following two functions.
3281 */
3282static inline void memcg_stop_kmem_account(void)
3283{
3284 VM_BUG_ON(!current->mm);
3285 current->memcg_kmem_skip_account++;
3286}
3287
3288static inline void memcg_resume_kmem_account(void)
3289{
3290 VM_BUG_ON(!current->mm);
3291 current->memcg_kmem_skip_account--;
3292}
3293
1f458cbf
GC
3294static void kmem_cache_destroy_work_func(struct work_struct *w)
3295{
3296 struct kmem_cache *cachep;
3297 struct memcg_cache_params *p;
3298
3299 p = container_of(w, struct memcg_cache_params, destroy);
3300
3301 cachep = memcg_params_to_cache(p);
3302
22933152
GC
3303 /*
3304 * If we get down to 0 after shrink, we could delete right away.
3305 * However, memcg_release_pages() already puts us back in the workqueue
3306 * in that case. If we proceed deleting, we'll get a dangling
3307 * reference, and removing the object from the workqueue in that case
3308 * is unnecessary complication. We are not a fast path.
3309 *
3310 * Note that this case is fundamentally different from racing with
3311 * shrink_slab(): if memcg_cgroup_destroy_cache() is called in
3312 * kmem_cache_shrink, not only we would be reinserting a dead cache
3313 * into the queue, but doing so from inside the worker racing to
3314 * destroy it.
3315 *
3316 * So if we aren't down to zero, we'll just schedule a worker and try
3317 * again
3318 */
3319 if (atomic_read(&cachep->memcg_params->nr_pages) != 0) {
3320 kmem_cache_shrink(cachep);
3321 if (atomic_read(&cachep->memcg_params->nr_pages) == 0)
3322 return;
3323 } else
1f458cbf
GC
3324 kmem_cache_destroy(cachep);
3325}
3326
3327void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
3328{
3329 if (!cachep->memcg_params->dead)
3330 return;
3331
22933152
GC
3332 /*
3333 * There are many ways in which we can get here.
3334 *
3335 * We can get to a memory-pressure situation while the delayed work is
3336 * still pending to run. The vmscan shrinkers can then release all
3337 * cache memory and get us to destruction. If this is the case, we'll
3338 * be executed twice, which is a bug (the second time will execute over
3339 * bogus data). In this case, cancelling the work should be fine.
3340 *
3341 * But we can also get here from the worker itself, if
3342 * kmem_cache_shrink is enough to shake all the remaining objects and
3343 * get the page count to 0. In this case, we'll deadlock if we try to
3344 * cancel the work (the worker runs with an internal lock held, which
3345 * is the same lock we would hold for cancel_work_sync().)
3346 *
3347 * Since we can't possibly know who got us here, just refrain from
3348 * running if there is already work pending
3349 */
3350 if (work_pending(&cachep->memcg_params->destroy))
3351 return;
1f458cbf
GC
3352 /*
3353 * We have to defer the actual destroying to a workqueue, because
3354 * we might currently be in a context that cannot sleep.
3355 */
3356 schedule_work(&cachep->memcg_params->destroy);
3357}
3358
d9c10ddd
MH
3359/*
3360 * This lock protects updaters, not readers. We want readers to be as fast as
3361 * they can, and they will either see NULL or a valid cache value. Our model
3362 * allow them to see NULL, in which case the root memcg will be selected.
3363 *
3364 * We need this lock because multiple allocations to the same cache from a non
3365 * will span more than one worker. Only one of them can create the cache.
3366 */
3367static DEFINE_MUTEX(memcg_cache_mutex);
d7f25f8a 3368
d9c10ddd
MH
3369/*
3370 * Called with memcg_cache_mutex held
3371 */
d7f25f8a
GC
3372static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg,
3373 struct kmem_cache *s)
3374{
d7f25f8a 3375 struct kmem_cache *new;
d9c10ddd 3376 static char *tmp_name = NULL;
d7f25f8a 3377
d9c10ddd
MH
3378 lockdep_assert_held(&memcg_cache_mutex);
3379
3380 /*
3381 * kmem_cache_create_memcg duplicates the given name and
3382 * cgroup_name for this name requires RCU context.
3383 * This static temporary buffer is used to prevent from
3384 * pointless shortliving allocation.
3385 */
3386 if (!tmp_name) {
3387 tmp_name = kmalloc(PATH_MAX, GFP_KERNEL);
3388 if (!tmp_name)
3389 return NULL;
3390 }
3391
3392 rcu_read_lock();
3393 snprintf(tmp_name, PATH_MAX, "%s(%d:%s)", s->name,
3394 memcg_cache_id(memcg), cgroup_name(memcg->css.cgroup));
3395 rcu_read_unlock();
d7f25f8a 3396
d9c10ddd 3397 new = kmem_cache_create_memcg(memcg, tmp_name, s->object_size, s->align,
943a451a 3398 (s->flags & ~SLAB_PANIC), s->ctor, s);
d7f25f8a 3399
d79923fa
GC
3400 if (new)
3401 new->allocflags |= __GFP_KMEMCG;
3402
d7f25f8a
GC
3403 return new;
3404}
3405
d7f25f8a
GC
3406static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
3407 struct kmem_cache *cachep)
3408{
3409 struct kmem_cache *new_cachep;
3410 int idx;
3411
3412 BUG_ON(!memcg_can_account_kmem(memcg));
3413
3414 idx = memcg_cache_id(memcg);
3415
3416 mutex_lock(&memcg_cache_mutex);
3417 new_cachep = cachep->memcg_params->memcg_caches[idx];
20f05310
LZ
3418 if (new_cachep) {
3419 css_put(&memcg->css);
d7f25f8a 3420 goto out;
20f05310 3421 }
d7f25f8a
GC
3422
3423 new_cachep = kmem_cache_dup(memcg, cachep);
d7f25f8a
GC
3424 if (new_cachep == NULL) {
3425 new_cachep = cachep;
20f05310 3426 css_put(&memcg->css);
d7f25f8a
GC
3427 goto out;
3428 }
3429
1f458cbf 3430 atomic_set(&new_cachep->memcg_params->nr_pages , 0);
d7f25f8a
GC
3431
3432 cachep->memcg_params->memcg_caches[idx] = new_cachep;
3433 /*
3434 * the readers won't lock, make sure everybody sees the updated value,
3435 * so they won't put stuff in the queue again for no reason
3436 */
3437 wmb();
3438out:
3439 mutex_unlock(&memcg_cache_mutex);
3440 return new_cachep;
3441}
3442
7cf27982
GC
3443void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
3444{
3445 struct kmem_cache *c;
3446 int i;
3447
3448 if (!s->memcg_params)
3449 return;
3450 if (!s->memcg_params->is_root_cache)
3451 return;
3452
3453 /*
3454 * If the cache is being destroyed, we trust that there is no one else
3455 * requesting objects from it. Even if there are, the sanity checks in
3456 * kmem_cache_destroy should caught this ill-case.
3457 *
3458 * Still, we don't want anyone else freeing memcg_caches under our
3459 * noses, which can happen if a new memcg comes to life. As usual,
3460 * we'll take the set_limit_mutex to protect ourselves against this.
3461 */
3462 mutex_lock(&set_limit_mutex);
3463 for (i = 0; i < memcg_limited_groups_array_size; i++) {
3464 c = s->memcg_params->memcg_caches[i];
3465 if (!c)
3466 continue;
3467
3468 /*
3469 * We will now manually delete the caches, so to avoid races
3470 * we need to cancel all pending destruction workers and
3471 * proceed with destruction ourselves.
3472 *
3473 * kmem_cache_destroy() will call kmem_cache_shrink internally,
3474 * and that could spawn the workers again: it is likely that
3475 * the cache still have active pages until this very moment.
3476 * This would lead us back to mem_cgroup_destroy_cache.
3477 *
3478 * But that will not execute at all if the "dead" flag is not
3479 * set, so flip it down to guarantee we are in control.
3480 */
3481 c->memcg_params->dead = false;
22933152 3482 cancel_work_sync(&c->memcg_params->destroy);
7cf27982
GC
3483 kmem_cache_destroy(c);
3484 }
3485 mutex_unlock(&set_limit_mutex);
3486}
3487
d7f25f8a
GC
3488struct create_work {
3489 struct mem_cgroup *memcg;
3490 struct kmem_cache *cachep;
3491 struct work_struct work;
3492};
3493
1f458cbf
GC
3494static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
3495{
3496 struct kmem_cache *cachep;
3497 struct memcg_cache_params *params;
3498
3499 if (!memcg_kmem_is_active(memcg))
3500 return;
3501
3502 mutex_lock(&memcg->slab_caches_mutex);
3503 list_for_each_entry(params, &memcg->memcg_slab_caches, list) {
3504 cachep = memcg_params_to_cache(params);
3505 cachep->memcg_params->dead = true;
1f458cbf
GC
3506 schedule_work(&cachep->memcg_params->destroy);
3507 }
3508 mutex_unlock(&memcg->slab_caches_mutex);
3509}
3510
d7f25f8a
GC
3511static void memcg_create_cache_work_func(struct work_struct *w)
3512{
3513 struct create_work *cw;
3514
3515 cw = container_of(w, struct create_work, work);
3516 memcg_create_kmem_cache(cw->memcg, cw->cachep);
d7f25f8a
GC
3517 kfree(cw);
3518}
3519
3520/*
3521 * Enqueue the creation of a per-memcg kmem_cache.
d7f25f8a 3522 */
0e9d92f2
GC
3523static void __memcg_create_cache_enqueue(struct mem_cgroup *memcg,
3524 struct kmem_cache *cachep)
d7f25f8a
GC
3525{
3526 struct create_work *cw;
3527
3528 cw = kmalloc(sizeof(struct create_work), GFP_NOWAIT);
ca0dde97
LZ
3529 if (cw == NULL) {
3530 css_put(&memcg->css);
d7f25f8a
GC
3531 return;
3532 }
3533
3534 cw->memcg = memcg;
3535 cw->cachep = cachep;
3536
3537 INIT_WORK(&cw->work, memcg_create_cache_work_func);
3538 schedule_work(&cw->work);
3539}
3540
0e9d92f2
GC
3541static void memcg_create_cache_enqueue(struct mem_cgroup *memcg,
3542 struct kmem_cache *cachep)
3543{
3544 /*
3545 * We need to stop accounting when we kmalloc, because if the
3546 * corresponding kmalloc cache is not yet created, the first allocation
3547 * in __memcg_create_cache_enqueue will recurse.
3548 *
3549 * However, it is better to enclose the whole function. Depending on
3550 * the debugging options enabled, INIT_WORK(), for instance, can
3551 * trigger an allocation. This too, will make us recurse. Because at
3552 * this point we can't allow ourselves back into memcg_kmem_get_cache,
3553 * the safest choice is to do it like this, wrapping the whole function.
3554 */
3555 memcg_stop_kmem_account();
3556 __memcg_create_cache_enqueue(memcg, cachep);
3557 memcg_resume_kmem_account();
3558}
d7f25f8a
GC
3559/*
3560 * Return the kmem_cache we're supposed to use for a slab allocation.
3561 * We try to use the current memcg's version of the cache.
3562 *
3563 * If the cache does not exist yet, if we are the first user of it,
3564 * we either create it immediately, if possible, or create it asynchronously
3565 * in a workqueue.
3566 * In the latter case, we will let the current allocation go through with
3567 * the original cache.
3568 *
3569 * Can't be called in interrupt context or from kernel threads.
3570 * This function needs to be called with rcu_read_lock() held.
3571 */
3572struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
3573 gfp_t gfp)
3574{
3575 struct mem_cgroup *memcg;
3576 int idx;
3577
3578 VM_BUG_ON(!cachep->memcg_params);
3579 VM_BUG_ON(!cachep->memcg_params->is_root_cache);
3580
0e9d92f2
GC
3581 if (!current->mm || current->memcg_kmem_skip_account)
3582 return cachep;
3583
d7f25f8a
GC
3584 rcu_read_lock();
3585 memcg = mem_cgroup_from_task(rcu_dereference(current->mm->owner));
d7f25f8a
GC
3586
3587 if (!memcg_can_account_kmem(memcg))
ca0dde97 3588 goto out;
d7f25f8a
GC
3589
3590 idx = memcg_cache_id(memcg);
3591
3592 /*
3593 * barrier to mare sure we're always seeing the up to date value. The
3594 * code updating memcg_caches will issue a write barrier to match this.
3595 */
3596 read_barrier_depends();
ca0dde97
LZ
3597 if (likely(cachep->memcg_params->memcg_caches[idx])) {
3598 cachep = cachep->memcg_params->memcg_caches[idx];
3599 goto out;
d7f25f8a
GC
3600 }
3601
ca0dde97
LZ
3602 /* The corresponding put will be done in the workqueue. */
3603 if (!css_tryget(&memcg->css))
3604 goto out;
3605 rcu_read_unlock();
3606
3607 /*
3608 * If we are in a safe context (can wait, and not in interrupt
3609 * context), we could be be predictable and return right away.
3610 * This would guarantee that the allocation being performed
3611 * already belongs in the new cache.
3612 *
3613 * However, there are some clashes that can arrive from locking.
3614 * For instance, because we acquire the slab_mutex while doing
3615 * kmem_cache_dup, this means no further allocation could happen
3616 * with the slab_mutex held.
3617 *
3618 * Also, because cache creation issue get_online_cpus(), this
3619 * creates a lock chain: memcg_slab_mutex -> cpu_hotplug_mutex,
3620 * that ends up reversed during cpu hotplug. (cpuset allocates
3621 * a bunch of GFP_KERNEL memory during cpuup). Due to all that,
3622 * better to defer everything.
3623 */
3624 memcg_create_cache_enqueue(memcg, cachep);
3625 return cachep;
3626out:
3627 rcu_read_unlock();
3628 return cachep;
d7f25f8a
GC
3629}
3630EXPORT_SYMBOL(__memcg_kmem_get_cache);
3631
7ae1e1d0
GC
3632/*
3633 * We need to verify if the allocation against current->mm->owner's memcg is
3634 * possible for the given order. But the page is not allocated yet, so we'll
3635 * need a further commit step to do the final arrangements.
3636 *
3637 * It is possible for the task to switch cgroups in this mean time, so at
3638 * commit time, we can't rely on task conversion any longer. We'll then use
3639 * the handle argument to return to the caller which cgroup we should commit
3640 * against. We could also return the memcg directly and avoid the pointer
3641 * passing, but a boolean return value gives better semantics considering
3642 * the compiled-out case as well.
3643 *
3644 * Returning true means the allocation is possible.
3645 */
3646bool
3647__memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
3648{
3649 struct mem_cgroup *memcg;
3650 int ret;
3651
3652 *_memcg = NULL;
6d42c232
GC
3653
3654 /*
3655 * Disabling accounting is only relevant for some specific memcg
3656 * internal allocations. Therefore we would initially not have such
3657 * check here, since direct calls to the page allocator that are marked
3658 * with GFP_KMEMCG only happen outside memcg core. We are mostly
3659 * concerned with cache allocations, and by having this test at
3660 * memcg_kmem_get_cache, we are already able to relay the allocation to
3661 * the root cache and bypass the memcg cache altogether.
3662 *
3663 * There is one exception, though: the SLUB allocator does not create
3664 * large order caches, but rather service large kmallocs directly from
3665 * the page allocator. Therefore, the following sequence when backed by
3666 * the SLUB allocator:
3667 *
3668 * memcg_stop_kmem_account();
3669 * kmalloc(<large_number>)
3670 * memcg_resume_kmem_account();
3671 *
3672 * would effectively ignore the fact that we should skip accounting,
3673 * since it will drive us directly to this function without passing
3674 * through the cache selector memcg_kmem_get_cache. Such large
3675 * allocations are extremely rare but can happen, for instance, for the
3676 * cache arrays. We bring this test here.
3677 */
3678 if (!current->mm || current->memcg_kmem_skip_account)
3679 return true;
3680
7ae1e1d0
GC
3681 memcg = try_get_mem_cgroup_from_mm(current->mm);
3682
3683 /*
3684 * very rare case described in mem_cgroup_from_task. Unfortunately there
3685 * isn't much we can do without complicating this too much, and it would
3686 * be gfp-dependent anyway. Just let it go
3687 */
3688 if (unlikely(!memcg))
3689 return true;
3690
3691 if (!memcg_can_account_kmem(memcg)) {
3692 css_put(&memcg->css);
3693 return true;
3694 }
3695
7ae1e1d0
GC
3696 ret = memcg_charge_kmem(memcg, gfp, PAGE_SIZE << order);
3697 if (!ret)
3698 *_memcg = memcg;
7ae1e1d0
GC
3699
3700 css_put(&memcg->css);
3701 return (ret == 0);
3702}
3703
3704void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
3705 int order)
3706{
3707 struct page_cgroup *pc;
3708
3709 VM_BUG_ON(mem_cgroup_is_root(memcg));
3710
3711 /* The page allocation failed. Revert */
3712 if (!page) {
3713 memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
7ae1e1d0
GC
3714 return;
3715 }
3716
3717 pc = lookup_page_cgroup(page);
3718 lock_page_cgroup(pc);
3719 pc->mem_cgroup = memcg;
3720 SetPageCgroupUsed(pc);
3721 unlock_page_cgroup(pc);
3722}
3723
3724void __memcg_kmem_uncharge_pages(struct page *page, int order)
3725{
3726 struct mem_cgroup *memcg = NULL;
3727 struct page_cgroup *pc;
3728
3729
3730 pc = lookup_page_cgroup(page);
3731 /*
3732 * Fast unlocked return. Theoretically might have changed, have to
3733 * check again after locking.
3734 */
3735 if (!PageCgroupUsed(pc))
3736 return;
3737
3738 lock_page_cgroup(pc);
3739 if (PageCgroupUsed(pc)) {
3740 memcg = pc->mem_cgroup;
3741 ClearPageCgroupUsed(pc);
3742 }
3743 unlock_page_cgroup(pc);
3744
3745 /*
3746 * We trust that only if there is a memcg associated with the page, it
3747 * is a valid allocation
3748 */
3749 if (!memcg)
3750 return;
3751
3752 VM_BUG_ON(mem_cgroup_is_root(memcg));
3753 memcg_uncharge_kmem(memcg, PAGE_SIZE << order);
7ae1e1d0 3754}
1f458cbf
GC
3755#else
3756static inline void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
3757{
3758}
7ae1e1d0
GC
3759#endif /* CONFIG_MEMCG_KMEM */
3760
ca3e0214
KH
3761#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3762
a0db00fc 3763#define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION)
ca3e0214
KH
3764/*
3765 * Because tail pages are not marked as "used", set it. We're under
e94c8a9c
KH
3766 * zone->lru_lock, 'splitting on pmd' and compound_lock.
3767 * charge/uncharge will be never happen and move_account() is done under
3768 * compound_lock(), so we don't have to take care of races.
ca3e0214 3769 */
e94c8a9c 3770void mem_cgroup_split_huge_fixup(struct page *head)
ca3e0214
KH
3771{
3772 struct page_cgroup *head_pc = lookup_page_cgroup(head);
e94c8a9c 3773 struct page_cgroup *pc;
b070e65c 3774 struct mem_cgroup *memcg;
e94c8a9c 3775 int i;
ca3e0214 3776
3d37c4a9
KH
3777 if (mem_cgroup_disabled())
3778 return;
b070e65c
DR
3779
3780 memcg = head_pc->mem_cgroup;
e94c8a9c
KH
3781 for (i = 1; i < HPAGE_PMD_NR; i++) {
3782 pc = head_pc + i;
b070e65c 3783 pc->mem_cgroup = memcg;
e94c8a9c 3784 smp_wmb();/* see __commit_charge() */
e94c8a9c
KH
3785 pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
3786 }
b070e65c
DR
3787 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
3788 HPAGE_PMD_NR);
ca3e0214 3789}
12d27107 3790#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
ca3e0214 3791
f817ed48 3792/**
de3638d9 3793 * mem_cgroup_move_account - move account of the page
5564e88b 3794 * @page: the page
7ec99d62 3795 * @nr_pages: number of regular pages (>1 for huge pages)
f817ed48
KH
3796 * @pc: page_cgroup of the page.
3797 * @from: mem_cgroup which the page is moved from.
3798 * @to: mem_cgroup which the page is moved to. @from != @to.
3799 *
3800 * The caller must confirm following.
08e552c6 3801 * - page is not on LRU (isolate_page() is useful.)
7ec99d62 3802 * - compound_lock is held when nr_pages > 1
f817ed48 3803 *
2f3479b1
KH
3804 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
3805 * from old cgroup.
f817ed48 3806 */
7ec99d62
JW
3807static int mem_cgroup_move_account(struct page *page,
3808 unsigned int nr_pages,
3809 struct page_cgroup *pc,
3810 struct mem_cgroup *from,
2f3479b1 3811 struct mem_cgroup *to)
f817ed48 3812{
de3638d9
JW
3813 unsigned long flags;
3814 int ret;
b2402857 3815 bool anon = PageAnon(page);
987eba66 3816
f817ed48 3817 VM_BUG_ON(from == to);
5564e88b 3818 VM_BUG_ON(PageLRU(page));
de3638d9
JW
3819 /*
3820 * The page is isolated from LRU. So, collapse function
3821 * will not handle this page. But page splitting can happen.
3822 * Do this check under compound_page_lock(). The caller should
3823 * hold it.
3824 */
3825 ret = -EBUSY;
7ec99d62 3826 if (nr_pages > 1 && !PageTransHuge(page))
de3638d9
JW
3827 goto out;
3828
3829 lock_page_cgroup(pc);
3830
3831 ret = -EINVAL;
3832 if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
3833 goto unlock;
3834
312734c0 3835 move_lock_mem_cgroup(from, &flags);
f817ed48 3836
2ff76f11 3837 if (!anon && page_mapped(page)) {
c62b1a3b
KH
3838 /* Update mapped_file data for mem_cgroup */
3839 preempt_disable();
3840 __this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
3841 __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
3842 preempt_enable();
d69b042f 3843 }
b070e65c 3844 mem_cgroup_charge_statistics(from, page, anon, -nr_pages);
d69b042f 3845
854ffa8d 3846 /* caller should have done css_get */
08e552c6 3847 pc->mem_cgroup = to;
b070e65c 3848 mem_cgroup_charge_statistics(to, page, anon, nr_pages);
312734c0 3849 move_unlock_mem_cgroup(from, &flags);
de3638d9
JW
3850 ret = 0;
3851unlock:
57f9fd7d 3852 unlock_page_cgroup(pc);
d2265e6f
KH
3853 /*
3854 * check events
3855 */
5564e88b
JW
3856 memcg_check_events(to, page);
3857 memcg_check_events(from, page);
de3638d9 3858out:
f817ed48
KH
3859 return ret;
3860}
3861
2ef37d3f
MH
3862/**
3863 * mem_cgroup_move_parent - moves page to the parent group
3864 * @page: the page to move
3865 * @pc: page_cgroup of the page
3866 * @child: page's cgroup
3867 *
3868 * move charges to its parent or the root cgroup if the group has no
3869 * parent (aka use_hierarchy==0).
3870 * Although this might fail (get_page_unless_zero, isolate_lru_page or
3871 * mem_cgroup_move_account fails) the failure is always temporary and
3872 * it signals a race with a page removal/uncharge or migration. In the
3873 * first case the page is on the way out and it will vanish from the LRU
3874 * on the next attempt and the call should be retried later.
3875 * Isolation from the LRU fails only if page has been isolated from
3876 * the LRU since we looked at it and that usually means either global
3877 * reclaim or migration going on. The page will either get back to the
3878 * LRU or vanish.
3879 * Finaly mem_cgroup_move_account fails only if the page got uncharged
3880 * (!PageCgroupUsed) or moved to a different group. The page will
3881 * disappear in the next attempt.
f817ed48 3882 */
5564e88b
JW
3883static int mem_cgroup_move_parent(struct page *page,
3884 struct page_cgroup *pc,
6068bf01 3885 struct mem_cgroup *child)
f817ed48 3886{
f817ed48 3887 struct mem_cgroup *parent;
7ec99d62 3888 unsigned int nr_pages;
4be4489f 3889 unsigned long uninitialized_var(flags);
f817ed48
KH
3890 int ret;
3891
d8423011 3892 VM_BUG_ON(mem_cgroup_is_root(child));
f817ed48 3893
57f9fd7d
DN
3894 ret = -EBUSY;
3895 if (!get_page_unless_zero(page))
3896 goto out;
3897 if (isolate_lru_page(page))
3898 goto put;
52dbb905 3899
7ec99d62 3900 nr_pages = hpage_nr_pages(page);
08e552c6 3901
cc926f78
KH
3902 parent = parent_mem_cgroup(child);
3903 /*
3904 * If no parent, move charges to root cgroup.
3905 */
3906 if (!parent)
3907 parent = root_mem_cgroup;
f817ed48 3908
2ef37d3f
MH
3909 if (nr_pages > 1) {
3910 VM_BUG_ON(!PageTransHuge(page));
987eba66 3911 flags = compound_lock_irqsave(page);
2ef37d3f 3912 }
987eba66 3913
cc926f78 3914 ret = mem_cgroup_move_account(page, nr_pages,
2f3479b1 3915 pc, child, parent);
cc926f78
KH
3916 if (!ret)
3917 __mem_cgroup_cancel_local_charge(child, nr_pages);
8dba474f 3918
7ec99d62 3919 if (nr_pages > 1)
987eba66 3920 compound_unlock_irqrestore(page, flags);
08e552c6 3921 putback_lru_page(page);
57f9fd7d 3922put:
40d58138 3923 put_page(page);
57f9fd7d 3924out:
f817ed48
KH
3925 return ret;
3926}
3927
7a81b88c
KH
3928/*
3929 * Charge the memory controller for page usage.
3930 * Return
3931 * 0 if the charge was successful
3932 * < 0 if the cgroup is over its limit
3933 */
3934static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
73045c47 3935 gfp_t gfp_mask, enum charge_type ctype)
7a81b88c 3936{
c0ff4b85 3937 struct mem_cgroup *memcg = NULL;
7ec99d62 3938 unsigned int nr_pages = 1;
8493ae43 3939 bool oom = true;
7a81b88c 3940 int ret;
ec168510 3941
37c2ac78 3942 if (PageTransHuge(page)) {
7ec99d62 3943 nr_pages <<= compound_order(page);
37c2ac78 3944 VM_BUG_ON(!PageTransHuge(page));
8493ae43
JW
3945 /*
3946 * Never OOM-kill a process for a huge page. The
3947 * fault handler will fall back to regular pages.
3948 */
3949 oom = false;
37c2ac78 3950 }
7a81b88c 3951
c0ff4b85 3952 ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
38c5d72f 3953 if (ret == -ENOMEM)
7a81b88c 3954 return ret;
ce587e65 3955 __mem_cgroup_commit_charge(memcg, page, nr_pages, ctype, false);
8a9f3ccd 3956 return 0;
8a9f3ccd
BS
3957}
3958
7a81b88c
KH
3959int mem_cgroup_newpage_charge(struct page *page,
3960 struct mm_struct *mm, gfp_t gfp_mask)
217bc319 3961{
f8d66542 3962 if (mem_cgroup_disabled())
cede86ac 3963 return 0;
7a0524cf
JW
3964 VM_BUG_ON(page_mapped(page));
3965 VM_BUG_ON(page->mapping && !PageAnon(page));
3966 VM_BUG_ON(!mm);
217bc319 3967 return mem_cgroup_charge_common(page, mm, gfp_mask,
41326c17 3968 MEM_CGROUP_CHARGE_TYPE_ANON);
217bc319
KH
3969}
3970
54595fe2
KH
3971/*
3972 * While swap-in, try_charge -> commit or cancel, the page is locked.
3973 * And when try_charge() successfully returns, one refcnt to memcg without
21ae2956 3974 * struct page_cgroup is acquired. This refcnt will be consumed by
54595fe2
KH
3975 * "commit()" or removed by "cancel()"
3976 */
0435a2fd
JW
3977static int __mem_cgroup_try_charge_swapin(struct mm_struct *mm,
3978 struct page *page,
3979 gfp_t mask,
3980 struct mem_cgroup **memcgp)
8c7c6e34 3981{
c0ff4b85 3982 struct mem_cgroup *memcg;
90deb788 3983 struct page_cgroup *pc;
54595fe2 3984 int ret;
8c7c6e34 3985
90deb788
JW
3986 pc = lookup_page_cgroup(page);
3987 /*
3988 * Every swap fault against a single page tries to charge the
3989 * page, bail as early as possible. shmem_unuse() encounters
3990 * already charged pages, too. The USED bit is protected by
3991 * the page lock, which serializes swap cache removal, which
3992 * in turn serializes uncharging.
3993 */
3994 if (PageCgroupUsed(pc))
3995 return 0;
8c7c6e34
KH
3996 if (!do_swap_account)
3997 goto charge_cur_mm;
c0ff4b85
R
3998 memcg = try_get_mem_cgroup_from_page(page);
3999 if (!memcg)
54595fe2 4000 goto charge_cur_mm;
72835c86
JW
4001 *memcgp = memcg;
4002 ret = __mem_cgroup_try_charge(NULL, mask, 1, memcgp, true);
c0ff4b85 4003 css_put(&memcg->css);
38c5d72f
KH
4004 if (ret == -EINTR)
4005 ret = 0;
54595fe2 4006 return ret;
8c7c6e34 4007charge_cur_mm:
38c5d72f
KH
4008 ret = __mem_cgroup_try_charge(mm, mask, 1, memcgp, true);
4009 if (ret == -EINTR)
4010 ret = 0;
4011 return ret;
8c7c6e34
KH
4012}
4013
0435a2fd
JW
4014int mem_cgroup_try_charge_swapin(struct mm_struct *mm, struct page *page,
4015 gfp_t gfp_mask, struct mem_cgroup **memcgp)
4016{
4017 *memcgp = NULL;
4018 if (mem_cgroup_disabled())
4019 return 0;
bdf4f4d2
JW
4020 /*
4021 * A racing thread's fault, or swapoff, may have already
4022 * updated the pte, and even removed page from swap cache: in
4023 * those cases unuse_pte()'s pte_same() test will fail; but
4024 * there's also a KSM case which does need to charge the page.
4025 */
4026 if (!PageSwapCache(page)) {
4027 int ret;
4028
4029 ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, memcgp, true);
4030 if (ret == -EINTR)
4031 ret = 0;
4032 return ret;
4033 }
0435a2fd
JW
4034 return __mem_cgroup_try_charge_swapin(mm, page, gfp_mask, memcgp);
4035}
4036
827a03d2
JW
4037void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
4038{
4039 if (mem_cgroup_disabled())
4040 return;
4041 if (!memcg)
4042 return;
4043 __mem_cgroup_cancel_charge(memcg, 1);
4044}
4045
83aae4c7 4046static void
72835c86 4047__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
83aae4c7 4048 enum charge_type ctype)
7a81b88c 4049{
f8d66542 4050 if (mem_cgroup_disabled())
7a81b88c 4051 return;
72835c86 4052 if (!memcg)
7a81b88c 4053 return;
5a6475a4 4054
ce587e65 4055 __mem_cgroup_commit_charge(memcg, page, 1, ctype, true);
8c7c6e34
KH
4056 /*
4057 * Now swap is on-memory. This means this page may be
4058 * counted both as mem and swap....double count.
03f3c433
KH
4059 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
4060 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
4061 * may call delete_from_swap_cache() before reach here.
8c7c6e34 4062 */
03f3c433 4063 if (do_swap_account && PageSwapCache(page)) {
8c7c6e34 4064 swp_entry_t ent = {.val = page_private(page)};
86493009 4065 mem_cgroup_uncharge_swap(ent);
8c7c6e34 4066 }
7a81b88c
KH
4067}
4068
72835c86
JW
4069void mem_cgroup_commit_charge_swapin(struct page *page,
4070 struct mem_cgroup *memcg)
83aae4c7 4071{
72835c86 4072 __mem_cgroup_commit_charge_swapin(page, memcg,
41326c17 4073 MEM_CGROUP_CHARGE_TYPE_ANON);
83aae4c7
DN
4074}
4075
827a03d2
JW
4076int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
4077 gfp_t gfp_mask)
7a81b88c 4078{
827a03d2
JW
4079 struct mem_cgroup *memcg = NULL;
4080 enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
4081 int ret;
4082
f8d66542 4083 if (mem_cgroup_disabled())
827a03d2
JW
4084 return 0;
4085 if (PageCompound(page))
4086 return 0;
4087
827a03d2
JW
4088 if (!PageSwapCache(page))
4089 ret = mem_cgroup_charge_common(page, mm, gfp_mask, type);
4090 else { /* page is swapcache/shmem */
0435a2fd
JW
4091 ret = __mem_cgroup_try_charge_swapin(mm, page,
4092 gfp_mask, &memcg);
827a03d2
JW
4093 if (!ret)
4094 __mem_cgroup_commit_charge_swapin(page, memcg, type);
4095 }
4096 return ret;
7a81b88c
KH
4097}
4098
c0ff4b85 4099static void mem_cgroup_do_uncharge(struct mem_cgroup *memcg,
7ec99d62
JW
4100 unsigned int nr_pages,
4101 const enum charge_type ctype)
569b846d
KH
4102{
4103 struct memcg_batch_info *batch = NULL;
4104 bool uncharge_memsw = true;
7ec99d62 4105
569b846d
KH
4106 /* If swapout, usage of swap doesn't decrease */
4107 if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
4108 uncharge_memsw = false;
569b846d
KH
4109
4110 batch = &current->memcg_batch;
4111 /*
4112 * In usual, we do css_get() when we remember memcg pointer.
4113 * But in this case, we keep res->usage until end of a series of
4114 * uncharges. Then, it's ok to ignore memcg's refcnt.
4115 */
4116 if (!batch->memcg)
c0ff4b85 4117 batch->memcg = memcg;
3c11ecf4
KH
4118 /*
4119 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
25985edc 4120 * In those cases, all pages freed continuously can be expected to be in
3c11ecf4
KH
4121 * the same cgroup and we have chance to coalesce uncharges.
4122 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
4123 * because we want to do uncharge as soon as possible.
4124 */
4125
4126 if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
4127 goto direct_uncharge;
4128
7ec99d62 4129 if (nr_pages > 1)
ec168510
AA
4130 goto direct_uncharge;
4131
569b846d
KH
4132 /*
4133 * In typical case, batch->memcg == mem. This means we can
4134 * merge a series of uncharges to an uncharge of res_counter.
4135 * If not, we uncharge res_counter ony by one.
4136 */
c0ff4b85 4137 if (batch->memcg != memcg)
569b846d
KH
4138 goto direct_uncharge;
4139 /* remember freed charge and uncharge it later */
7ffd4ca7 4140 batch->nr_pages++;
569b846d 4141 if (uncharge_memsw)
7ffd4ca7 4142 batch->memsw_nr_pages++;
569b846d
KH
4143 return;
4144direct_uncharge:
c0ff4b85 4145 res_counter_uncharge(&memcg->res, nr_pages * PAGE_SIZE);
569b846d 4146 if (uncharge_memsw)
c0ff4b85
R
4147 res_counter_uncharge(&memcg->memsw, nr_pages * PAGE_SIZE);
4148 if (unlikely(batch->memcg != memcg))
4149 memcg_oom_recover(memcg);
569b846d 4150}
7a81b88c 4151
8a9f3ccd 4152/*
69029cd5 4153 * uncharge if !page_mapped(page)
8a9f3ccd 4154 */
8c7c6e34 4155static struct mem_cgroup *
0030f535
JW
4156__mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype,
4157 bool end_migration)
8a9f3ccd 4158{
c0ff4b85 4159 struct mem_cgroup *memcg = NULL;
7ec99d62
JW
4160 unsigned int nr_pages = 1;
4161 struct page_cgroup *pc;
b2402857 4162 bool anon;
8a9f3ccd 4163
f8d66542 4164 if (mem_cgroup_disabled())
8c7c6e34 4165 return NULL;
4077960e 4166
37c2ac78 4167 if (PageTransHuge(page)) {
7ec99d62 4168 nr_pages <<= compound_order(page);
37c2ac78
AA
4169 VM_BUG_ON(!PageTransHuge(page));
4170 }
8697d331 4171 /*
3c541e14 4172 * Check if our page_cgroup is valid
8697d331 4173 */
52d4b9ac 4174 pc = lookup_page_cgroup(page);
cfa44946 4175 if (unlikely(!PageCgroupUsed(pc)))
8c7c6e34 4176 return NULL;
b9c565d5 4177
52d4b9ac 4178 lock_page_cgroup(pc);
d13d1443 4179
c0ff4b85 4180 memcg = pc->mem_cgroup;
8c7c6e34 4181
d13d1443
KH
4182 if (!PageCgroupUsed(pc))
4183 goto unlock_out;
4184
b2402857
KH
4185 anon = PageAnon(page);
4186
d13d1443 4187 switch (ctype) {
41326c17 4188 case MEM_CGROUP_CHARGE_TYPE_ANON:
2ff76f11
KH
4189 /*
4190 * Generally PageAnon tells if it's the anon statistics to be
4191 * updated; but sometimes e.g. mem_cgroup_uncharge_page() is
4192 * used before page reached the stage of being marked PageAnon.
4193 */
b2402857
KH
4194 anon = true;
4195 /* fallthrough */
8a9478ca 4196 case MEM_CGROUP_CHARGE_TYPE_DROP:
ac39cf8c 4197 /* See mem_cgroup_prepare_migration() */
0030f535
JW
4198 if (page_mapped(page))
4199 goto unlock_out;
4200 /*
4201 * Pages under migration may not be uncharged. But
4202 * end_migration() /must/ be the one uncharging the
4203 * unused post-migration page and so it has to call
4204 * here with the migration bit still set. See the
4205 * res_counter handling below.
4206 */
4207 if (!end_migration && PageCgroupMigration(pc))
d13d1443
KH
4208 goto unlock_out;
4209 break;
4210 case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
4211 if (!PageAnon(page)) { /* Shared memory */
4212 if (page->mapping && !page_is_file_cache(page))
4213 goto unlock_out;
4214 } else if (page_mapped(page)) /* Anon */
4215 goto unlock_out;
4216 break;
4217 default:
4218 break;
52d4b9ac 4219 }
d13d1443 4220
b070e65c 4221 mem_cgroup_charge_statistics(memcg, page, anon, -nr_pages);
04046e1a 4222
52d4b9ac 4223 ClearPageCgroupUsed(pc);
544122e5
KH
4224 /*
4225 * pc->mem_cgroup is not cleared here. It will be accessed when it's
4226 * freed from LRU. This is safe because uncharged page is expected not
4227 * to be reused (freed soon). Exception is SwapCache, it's handled by
4228 * special functions.
4229 */
b9c565d5 4230
52d4b9ac 4231 unlock_page_cgroup(pc);
f75ca962 4232 /*
c0ff4b85 4233 * even after unlock, we have memcg->res.usage here and this memcg
f75ca962
KH
4234 * will never be freed.
4235 */
c0ff4b85 4236 memcg_check_events(memcg, page);
f75ca962 4237 if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
c0ff4b85
R
4238 mem_cgroup_swap_statistics(memcg, true);
4239 mem_cgroup_get(memcg);
f75ca962 4240 }
0030f535
JW
4241 /*
4242 * Migration does not charge the res_counter for the
4243 * replacement page, so leave it alone when phasing out the
4244 * page that is unused after the migration.
4245 */
4246 if (!end_migration && !mem_cgroup_is_root(memcg))
c0ff4b85 4247 mem_cgroup_do_uncharge(memcg, nr_pages, ctype);
6d12e2d8 4248
c0ff4b85 4249 return memcg;
d13d1443
KH
4250
4251unlock_out:
4252 unlock_page_cgroup(pc);
8c7c6e34 4253 return NULL;
3c541e14
BS
4254}
4255
69029cd5
KH
4256void mem_cgroup_uncharge_page(struct page *page)
4257{
52d4b9ac
KH
4258 /* early check. */
4259 if (page_mapped(page))
4260 return;
40f23a21 4261 VM_BUG_ON(page->mapping && !PageAnon(page));
28ccddf7
JW
4262 /*
4263 * If the page is in swap cache, uncharge should be deferred
4264 * to the swap path, which also properly accounts swap usage
4265 * and handles memcg lifetime.
4266 *
4267 * Note that this check is not stable and reclaim may add the
4268 * page to swap cache at any time after this. However, if the
4269 * page is not in swap cache by the time page->mapcount hits
4270 * 0, there won't be any page table references to the swap
4271 * slot, and reclaim will free it and not actually write the
4272 * page to disk.
4273 */
0c59b89c
JW
4274 if (PageSwapCache(page))
4275 return;
0030f535 4276 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false);
69029cd5
KH
4277}
4278
4279void mem_cgroup_uncharge_cache_page(struct page *page)
4280{
4281 VM_BUG_ON(page_mapped(page));
b7abea96 4282 VM_BUG_ON(page->mapping);
0030f535 4283 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE, false);
69029cd5
KH
4284}
4285
569b846d
KH
4286/*
4287 * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
4288 * In that cases, pages are freed continuously and we can expect pages
4289 * are in the same memcg. All these calls itself limits the number of
4290 * pages freed at once, then uncharge_start/end() is called properly.
4291 * This may be called prural(2) times in a context,
4292 */
4293
4294void mem_cgroup_uncharge_start(void)
4295{
4296 current->memcg_batch.do_batch++;
4297 /* We can do nest. */
4298 if (current->memcg_batch.do_batch == 1) {
4299 current->memcg_batch.memcg = NULL;
7ffd4ca7
JW
4300 current->memcg_batch.nr_pages = 0;
4301 current->memcg_batch.memsw_nr_pages = 0;
569b846d
KH
4302 }
4303}
4304
4305void mem_cgroup_uncharge_end(void)
4306{
4307 struct memcg_batch_info *batch = &current->memcg_batch;
4308
4309 if (!batch->do_batch)
4310 return;
4311
4312 batch->do_batch--;
4313 if (batch->do_batch) /* If stacked, do nothing. */
4314 return;
4315
4316 if (!batch->memcg)
4317 return;
4318 /*
4319 * This "batch->memcg" is valid without any css_get/put etc...
4320 * bacause we hide charges behind us.
4321 */
7ffd4ca7
JW
4322 if (batch->nr_pages)
4323 res_counter_uncharge(&batch->memcg->res,
4324 batch->nr_pages * PAGE_SIZE);
4325 if (batch->memsw_nr_pages)
4326 res_counter_uncharge(&batch->memcg->memsw,
4327 batch->memsw_nr_pages * PAGE_SIZE);
3c11ecf4 4328 memcg_oom_recover(batch->memcg);
569b846d
KH
4329 /* forget this pointer (for sanity check) */
4330 batch->memcg = NULL;
4331}
4332
e767e056 4333#ifdef CONFIG_SWAP
8c7c6e34 4334/*
e767e056 4335 * called after __delete_from_swap_cache() and drop "page" account.
8c7c6e34
KH
4336 * memcg information is recorded to swap_cgroup of "ent"
4337 */
8a9478ca
KH
4338void
4339mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
8c7c6e34
KH
4340{
4341 struct mem_cgroup *memcg;
8a9478ca
KH
4342 int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
4343
4344 if (!swapout) /* this was a swap cache but the swap is unused ! */
4345 ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
4346
0030f535 4347 memcg = __mem_cgroup_uncharge_common(page, ctype, false);
8c7c6e34 4348
f75ca962
KH
4349 /*
4350 * record memcg information, if swapout && memcg != NULL,
4351 * mem_cgroup_get() was called in uncharge().
4352 */
4353 if (do_swap_account && swapout && memcg)
a3b2d692 4354 swap_cgroup_record(ent, css_id(&memcg->css));
8c7c6e34 4355}
e767e056 4356#endif
8c7c6e34 4357
c255a458 4358#ifdef CONFIG_MEMCG_SWAP
8c7c6e34
KH
4359/*
4360 * called from swap_entry_free(). remove record in swap_cgroup and
4361 * uncharge "memsw" account.
4362 */
4363void mem_cgroup_uncharge_swap(swp_entry_t ent)
d13d1443 4364{
8c7c6e34 4365 struct mem_cgroup *memcg;
a3b2d692 4366 unsigned short id;
8c7c6e34
KH
4367
4368 if (!do_swap_account)
4369 return;
4370
a3b2d692
KH
4371 id = swap_cgroup_record(ent, 0);
4372 rcu_read_lock();
4373 memcg = mem_cgroup_lookup(id);
8c7c6e34 4374 if (memcg) {
a3b2d692
KH
4375 /*
4376 * We uncharge this because swap is freed.
4377 * This memcg can be obsolete one. We avoid calling css_tryget
4378 */
0c3e73e8 4379 if (!mem_cgroup_is_root(memcg))
4e649152 4380 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
0c3e73e8 4381 mem_cgroup_swap_statistics(memcg, false);
8c7c6e34
KH
4382 mem_cgroup_put(memcg);
4383 }
a3b2d692 4384 rcu_read_unlock();
d13d1443 4385}
02491447
DN
4386
4387/**
4388 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
4389 * @entry: swap entry to be moved
4390 * @from: mem_cgroup which the entry is moved from
4391 * @to: mem_cgroup which the entry is moved to
4392 *
4393 * It succeeds only when the swap_cgroup's record for this entry is the same
4394 * as the mem_cgroup's id of @from.
4395 *
4396 * Returns 0 on success, -EINVAL on failure.
4397 *
4398 * The caller must have charged to @to, IOW, called res_counter_charge() about
4399 * both res and memsw, and called css_get().
4400 */
4401static int mem_cgroup_move_swap_account(swp_entry_t entry,
e91cbb42 4402 struct mem_cgroup *from, struct mem_cgroup *to)
02491447
DN
4403{
4404 unsigned short old_id, new_id;
4405
4406 old_id = css_id(&from->css);
4407 new_id = css_id(&to->css);
4408
4409 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
02491447 4410 mem_cgroup_swap_statistics(from, false);
483c30b5 4411 mem_cgroup_swap_statistics(to, true);
02491447 4412 /*
483c30b5
DN
4413 * This function is only called from task migration context now.
4414 * It postpones res_counter and refcount handling till the end
4415 * of task migration(mem_cgroup_clear_mc()) for performance
4416 * improvement. But we cannot postpone mem_cgroup_get(to)
4417 * because if the process that has been moved to @to does
4418 * swap-in, the refcount of @to might be decreased to 0.
02491447 4419 */
02491447 4420 mem_cgroup_get(to);
02491447
DN
4421 return 0;
4422 }
4423 return -EINVAL;
4424}
4425#else
4426static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
e91cbb42 4427 struct mem_cgroup *from, struct mem_cgroup *to)
02491447
DN
4428{
4429 return -EINVAL;
4430}
8c7c6e34 4431#endif
d13d1443 4432
ae41be37 4433/*
01b1ae63
KH
4434 * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
4435 * page belongs to.
ae41be37 4436 */
0030f535
JW
4437void mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
4438 struct mem_cgroup **memcgp)
ae41be37 4439{
c0ff4b85 4440 struct mem_cgroup *memcg = NULL;
b32967ff 4441 unsigned int nr_pages = 1;
7ec99d62 4442 struct page_cgroup *pc;
ac39cf8c 4443 enum charge_type ctype;
8869b8f6 4444
72835c86 4445 *memcgp = NULL;
56039efa 4446
f8d66542 4447 if (mem_cgroup_disabled())
0030f535 4448 return;
4077960e 4449
b32967ff
MG
4450 if (PageTransHuge(page))
4451 nr_pages <<= compound_order(page);
4452
52d4b9ac
KH
4453 pc = lookup_page_cgroup(page);
4454 lock_page_cgroup(pc);
4455 if (PageCgroupUsed(pc)) {
c0ff4b85
R
4456 memcg = pc->mem_cgroup;
4457 css_get(&memcg->css);
ac39cf8c 4458 /*
4459 * At migrating an anonymous page, its mapcount goes down
4460 * to 0 and uncharge() will be called. But, even if it's fully
4461 * unmapped, migration may fail and this page has to be
4462 * charged again. We set MIGRATION flag here and delay uncharge
4463 * until end_migration() is called
4464 *
4465 * Corner Case Thinking
4466 * A)
4467 * When the old page was mapped as Anon and it's unmap-and-freed
4468 * while migration was ongoing.
4469 * If unmap finds the old page, uncharge() of it will be delayed
4470 * until end_migration(). If unmap finds a new page, it's
4471 * uncharged when it make mapcount to be 1->0. If unmap code
4472 * finds swap_migration_entry, the new page will not be mapped
4473 * and end_migration() will find it(mapcount==0).
4474 *
4475 * B)
4476 * When the old page was mapped but migraion fails, the kernel
4477 * remaps it. A charge for it is kept by MIGRATION flag even
4478 * if mapcount goes down to 0. We can do remap successfully
4479 * without charging it again.
4480 *
4481 * C)
4482 * The "old" page is under lock_page() until the end of
4483 * migration, so, the old page itself will not be swapped-out.
4484 * If the new page is swapped out before end_migraton, our
4485 * hook to usual swap-out path will catch the event.
4486 */
4487 if (PageAnon(page))
4488 SetPageCgroupMigration(pc);
e8589cc1 4489 }
52d4b9ac 4490 unlock_page_cgroup(pc);
ac39cf8c 4491 /*
4492 * If the page is not charged at this point,
4493 * we return here.
4494 */
c0ff4b85 4495 if (!memcg)
0030f535 4496 return;
01b1ae63 4497
72835c86 4498 *memcgp = memcg;
ac39cf8c 4499 /*
4500 * We charge new page before it's used/mapped. So, even if unlock_page()
4501 * is called before end_migration, we can catch all events on this new
4502 * page. In the case new page is migrated but not remapped, new page's
4503 * mapcount will be finally 0 and we call uncharge in end_migration().
4504 */
ac39cf8c 4505 if (PageAnon(page))
41326c17 4506 ctype = MEM_CGROUP_CHARGE_TYPE_ANON;
ac39cf8c 4507 else
62ba7442 4508 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
0030f535
JW
4509 /*
4510 * The page is committed to the memcg, but it's not actually
4511 * charged to the res_counter since we plan on replacing the
4512 * old one and only one page is going to be left afterwards.
4513 */
b32967ff 4514 __mem_cgroup_commit_charge(memcg, newpage, nr_pages, ctype, false);
ae41be37 4515}
8869b8f6 4516
69029cd5 4517/* remove redundant charge if migration failed*/
c0ff4b85 4518void mem_cgroup_end_migration(struct mem_cgroup *memcg,
50de1dd9 4519 struct page *oldpage, struct page *newpage, bool migration_ok)
ae41be37 4520{
ac39cf8c 4521 struct page *used, *unused;
01b1ae63 4522 struct page_cgroup *pc;
b2402857 4523 bool anon;
01b1ae63 4524
c0ff4b85 4525 if (!memcg)
01b1ae63 4526 return;
b25ed609 4527
50de1dd9 4528 if (!migration_ok) {
ac39cf8c 4529 used = oldpage;
4530 unused = newpage;
01b1ae63 4531 } else {
ac39cf8c 4532 used = newpage;
01b1ae63
KH
4533 unused = oldpage;
4534 }
0030f535 4535 anon = PageAnon(used);
7d188958
JW
4536 __mem_cgroup_uncharge_common(unused,
4537 anon ? MEM_CGROUP_CHARGE_TYPE_ANON
4538 : MEM_CGROUP_CHARGE_TYPE_CACHE,
4539 true);
0030f535 4540 css_put(&memcg->css);
69029cd5 4541 /*
ac39cf8c 4542 * We disallowed uncharge of pages under migration because mapcount
4543 * of the page goes down to zero, temporarly.
4544 * Clear the flag and check the page should be charged.
01b1ae63 4545 */
ac39cf8c 4546 pc = lookup_page_cgroup(oldpage);
4547 lock_page_cgroup(pc);
4548 ClearPageCgroupMigration(pc);
4549 unlock_page_cgroup(pc);
ac39cf8c 4550
01b1ae63 4551 /*
ac39cf8c 4552 * If a page is a file cache, radix-tree replacement is very atomic
4553 * and we can skip this check. When it was an Anon page, its mapcount
4554 * goes down to 0. But because we added MIGRATION flage, it's not
4555 * uncharged yet. There are several case but page->mapcount check
4556 * and USED bit check in mem_cgroup_uncharge_page() will do enough
4557 * check. (see prepare_charge() also)
69029cd5 4558 */
b2402857 4559 if (anon)
ac39cf8c 4560 mem_cgroup_uncharge_page(used);
ae41be37 4561}
78fb7466 4562
ab936cbc
KH
4563/*
4564 * At replace page cache, newpage is not under any memcg but it's on
4565 * LRU. So, this function doesn't touch res_counter but handles LRU
4566 * in correct way. Both pages are locked so we cannot race with uncharge.
4567 */
4568void mem_cgroup_replace_page_cache(struct page *oldpage,
4569 struct page *newpage)
4570{
bde05d1c 4571 struct mem_cgroup *memcg = NULL;
ab936cbc 4572 struct page_cgroup *pc;
ab936cbc 4573 enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
ab936cbc
KH
4574
4575 if (mem_cgroup_disabled())
4576 return;
4577
4578 pc = lookup_page_cgroup(oldpage);
4579 /* fix accounting on old pages */
4580 lock_page_cgroup(pc);
bde05d1c
HD
4581 if (PageCgroupUsed(pc)) {
4582 memcg = pc->mem_cgroup;
b070e65c 4583 mem_cgroup_charge_statistics(memcg, oldpage, false, -1);
bde05d1c
HD
4584 ClearPageCgroupUsed(pc);
4585 }
ab936cbc
KH
4586 unlock_page_cgroup(pc);
4587
bde05d1c
HD
4588 /*
4589 * When called from shmem_replace_page(), in some cases the
4590 * oldpage has already been charged, and in some cases not.
4591 */
4592 if (!memcg)
4593 return;
ab936cbc
KH
4594 /*
4595 * Even if newpage->mapping was NULL before starting replacement,
4596 * the newpage may be on LRU(or pagevec for LRU) already. We lock
4597 * LRU while we overwrite pc->mem_cgroup.
4598 */
ce587e65 4599 __mem_cgroup_commit_charge(memcg, newpage, 1, type, true);
ab936cbc
KH
4600}
4601
f212ad7c
DN
4602#ifdef CONFIG_DEBUG_VM
4603static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
4604{
4605 struct page_cgroup *pc;
4606
4607 pc = lookup_page_cgroup(page);
cfa44946
JW
4608 /*
4609 * Can be NULL while feeding pages into the page allocator for
4610 * the first time, i.e. during boot or memory hotplug;
4611 * or when mem_cgroup_disabled().
4612 */
f212ad7c
DN
4613 if (likely(pc) && PageCgroupUsed(pc))
4614 return pc;
4615 return NULL;
4616}
4617
4618bool mem_cgroup_bad_page_check(struct page *page)
4619{
4620 if (mem_cgroup_disabled())
4621 return false;
4622
4623 return lookup_page_cgroup_used(page) != NULL;
4624}
4625
4626void mem_cgroup_print_bad_page(struct page *page)
4627{
4628 struct page_cgroup *pc;
4629
4630 pc = lookup_page_cgroup_used(page);
4631 if (pc) {
d045197f
AM
4632 pr_alert("pc:%p pc->flags:%lx pc->mem_cgroup:%p\n",
4633 pc, pc->flags, pc->mem_cgroup);
f212ad7c
DN
4634 }
4635}
4636#endif
4637
d38d2a75 4638static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
8c7c6e34 4639 unsigned long long val)
628f4235 4640{
81d39c20 4641 int retry_count;
3c11ecf4 4642 u64 memswlimit, memlimit;
628f4235 4643 int ret = 0;
81d39c20
KH
4644 int children = mem_cgroup_count_children(memcg);
4645 u64 curusage, oldusage;
3c11ecf4 4646 int enlarge;
81d39c20
KH
4647
4648 /*
4649 * For keeping hierarchical_reclaim simple, how long we should retry
4650 * is depends on callers. We set our retry-count to be function
4651 * of # of children which we should visit in this loop.
4652 */
4653 retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
4654
4655 oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
628f4235 4656
3c11ecf4 4657 enlarge = 0;
8c7c6e34 4658 while (retry_count) {
628f4235
KH
4659 if (signal_pending(current)) {
4660 ret = -EINTR;
4661 break;
4662 }
8c7c6e34
KH
4663 /*
4664 * Rather than hide all in some function, I do this in
4665 * open coded manner. You see what this really does.
aaad153e 4666 * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
8c7c6e34
KH
4667 */
4668 mutex_lock(&set_limit_mutex);
4669 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4670 if (memswlimit < val) {
4671 ret = -EINVAL;
4672 mutex_unlock(&set_limit_mutex);
628f4235
KH
4673 break;
4674 }
3c11ecf4
KH
4675
4676 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
4677 if (memlimit < val)
4678 enlarge = 1;
4679
8c7c6e34 4680 ret = res_counter_set_limit(&memcg->res, val);
22a668d7
KH
4681 if (!ret) {
4682 if (memswlimit == val)
4683 memcg->memsw_is_minimum = true;
4684 else
4685 memcg->memsw_is_minimum = false;
4686 }
8c7c6e34
KH
4687 mutex_unlock(&set_limit_mutex);
4688
4689 if (!ret)
4690 break;
4691
5660048c
JW
4692 mem_cgroup_reclaim(memcg, GFP_KERNEL,
4693 MEM_CGROUP_RECLAIM_SHRINK);
81d39c20
KH
4694 curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
4695 /* Usage is reduced ? */
4696 if (curusage >= oldusage)
4697 retry_count--;
4698 else
4699 oldusage = curusage;
8c7c6e34 4700 }
3c11ecf4
KH
4701 if (!ret && enlarge)
4702 memcg_oom_recover(memcg);
14797e23 4703
8c7c6e34
KH
4704 return ret;
4705}
4706
338c8431
LZ
4707static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
4708 unsigned long long val)
8c7c6e34 4709{
81d39c20 4710 int retry_count;
3c11ecf4 4711 u64 memlimit, memswlimit, oldusage, curusage;
81d39c20
KH
4712 int children = mem_cgroup_count_children(memcg);
4713 int ret = -EBUSY;
3c11ecf4 4714 int enlarge = 0;
8c7c6e34 4715
81d39c20
KH
4716 /* see mem_cgroup_resize_res_limit */
4717 retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
4718 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
8c7c6e34
KH
4719 while (retry_count) {
4720 if (signal_pending(current)) {
4721 ret = -EINTR;
4722 break;
4723 }
4724 /*
4725 * Rather than hide all in some function, I do this in
4726 * open coded manner. You see what this really does.
aaad153e 4727 * We have to guarantee memcg->res.limit <= memcg->memsw.limit.
8c7c6e34
KH
4728 */
4729 mutex_lock(&set_limit_mutex);
4730 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
4731 if (memlimit > val) {
4732 ret = -EINVAL;
4733 mutex_unlock(&set_limit_mutex);
4734 break;
4735 }
3c11ecf4
KH
4736 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
4737 if (memswlimit < val)
4738 enlarge = 1;
8c7c6e34 4739 ret = res_counter_set_limit(&memcg->memsw, val);
22a668d7
KH
4740 if (!ret) {
4741 if (memlimit == val)
4742 memcg->memsw_is_minimum = true;
4743 else
4744 memcg->memsw_is_minimum = false;
4745 }
8c7c6e34
KH
4746 mutex_unlock(&set_limit_mutex);
4747
4748 if (!ret)
4749 break;
4750
5660048c
JW
4751 mem_cgroup_reclaim(memcg, GFP_KERNEL,
4752 MEM_CGROUP_RECLAIM_NOSWAP |
4753 MEM_CGROUP_RECLAIM_SHRINK);
8c7c6e34 4754 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
81d39c20 4755 /* Usage is reduced ? */
8c7c6e34 4756 if (curusage >= oldusage)
628f4235 4757 retry_count--;
81d39c20
KH
4758 else
4759 oldusage = curusage;
628f4235 4760 }
3c11ecf4
KH
4761 if (!ret && enlarge)
4762 memcg_oom_recover(memcg);
628f4235
KH
4763 return ret;
4764}
4765
4e416953 4766unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
0ae5e89c
YH
4767 gfp_t gfp_mask,
4768 unsigned long *total_scanned)
4e416953
BS
4769{
4770 unsigned long nr_reclaimed = 0;
4771 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
4772 unsigned long reclaimed;
4773 int loop = 0;
4774 struct mem_cgroup_tree_per_zone *mctz;
ef8745c1 4775 unsigned long long excess;
0ae5e89c 4776 unsigned long nr_scanned;
4e416953
BS
4777
4778 if (order > 0)
4779 return 0;
4780
00918b6a 4781 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
4e416953
BS
4782 /*
4783 * This loop can run a while, specially if mem_cgroup's continuously
4784 * keep exceeding their soft limit and putting the system under
4785 * pressure
4786 */
4787 do {
4788 if (next_mz)
4789 mz = next_mz;
4790 else
4791 mz = mem_cgroup_largest_soft_limit_node(mctz);
4792 if (!mz)
4793 break;
4794
0ae5e89c 4795 nr_scanned = 0;
d79154bb 4796 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
5660048c 4797 gfp_mask, &nr_scanned);
4e416953 4798 nr_reclaimed += reclaimed;
0ae5e89c 4799 *total_scanned += nr_scanned;
4e416953
BS
4800 spin_lock(&mctz->lock);
4801
4802 /*
4803 * If we failed to reclaim anything from this memory cgroup
4804 * it is time to move on to the next cgroup
4805 */
4806 next_mz = NULL;
4807 if (!reclaimed) {
4808 do {
4809 /*
4810 * Loop until we find yet another one.
4811 *
4812 * By the time we get the soft_limit lock
4813 * again, someone might have aded the
4814 * group back on the RB tree. Iterate to
4815 * make sure we get a different mem.
4816 * mem_cgroup_largest_soft_limit_node returns
4817 * NULL if no other cgroup is present on
4818 * the tree
4819 */
4820 next_mz =
4821 __mem_cgroup_largest_soft_limit_node(mctz);
39cc98f1 4822 if (next_mz == mz)
d79154bb 4823 css_put(&next_mz->memcg->css);
39cc98f1 4824 else /* next_mz == NULL or other memcg */
4e416953
BS
4825 break;
4826 } while (1);
4827 }
d79154bb
HD
4828 __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
4829 excess = res_counter_soft_limit_excess(&mz->memcg->res);
4e416953
BS
4830 /*
4831 * One school of thought says that we should not add
4832 * back the node to the tree if reclaim returns 0.
4833 * But our reclaim could return 0, simply because due
4834 * to priority we are exposing a smaller subset of
4835 * memory to reclaim from. Consider this as a longer
4836 * term TODO.
4837 */
ef8745c1 4838 /* If excess == 0, no tree ops */
d79154bb 4839 __mem_cgroup_insert_exceeded(mz->memcg, mz, mctz, excess);
4e416953 4840 spin_unlock(&mctz->lock);
d79154bb 4841 css_put(&mz->memcg->css);
4e416953
BS
4842 loop++;
4843 /*
4844 * Could not reclaim anything and there are no more
4845 * mem cgroups to try or we seem to be looping without
4846 * reclaiming anything.
4847 */
4848 if (!nr_reclaimed &&
4849 (next_mz == NULL ||
4850 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
4851 break;
4852 } while (!nr_reclaimed);
4853 if (next_mz)
d79154bb 4854 css_put(&next_mz->memcg->css);
4e416953
BS
4855 return nr_reclaimed;
4856}
4857
2ef37d3f
MH
4858/**
4859 * mem_cgroup_force_empty_list - clears LRU of a group
4860 * @memcg: group to clear
4861 * @node: NUMA node
4862 * @zid: zone id
4863 * @lru: lru to to clear
4864 *
3c935d18 4865 * Traverse a specified page_cgroup list and try to drop them all. This doesn't
2ef37d3f
MH
4866 * reclaim the pages page themselves - pages are moved to the parent (or root)
4867 * group.
cc847582 4868 */
2ef37d3f 4869static void mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
08e552c6 4870 int node, int zid, enum lru_list lru)
cc847582 4871{
bea8c150 4872 struct lruvec *lruvec;
2ef37d3f 4873 unsigned long flags;
072c56c1 4874 struct list_head *list;
925b7673
JW
4875 struct page *busy;
4876 struct zone *zone;
072c56c1 4877
08e552c6 4878 zone = &NODE_DATA(node)->node_zones[zid];
bea8c150
HD
4879 lruvec = mem_cgroup_zone_lruvec(zone, memcg);
4880 list = &lruvec->lists[lru];
cc847582 4881
f817ed48 4882 busy = NULL;
2ef37d3f 4883 do {
925b7673 4884 struct page_cgroup *pc;
5564e88b
JW
4885 struct page *page;
4886
08e552c6 4887 spin_lock_irqsave(&zone->lru_lock, flags);
f817ed48 4888 if (list_empty(list)) {
08e552c6 4889 spin_unlock_irqrestore(&zone->lru_lock, flags);
52d4b9ac 4890 break;
f817ed48 4891 }
925b7673
JW
4892 page = list_entry(list->prev, struct page, lru);
4893 if (busy == page) {
4894 list_move(&page->lru, list);
648bcc77 4895 busy = NULL;
08e552c6 4896 spin_unlock_irqrestore(&zone->lru_lock, flags);
f817ed48
KH
4897 continue;
4898 }
08e552c6 4899 spin_unlock_irqrestore(&zone->lru_lock, flags);
f817ed48 4900
925b7673 4901 pc = lookup_page_cgroup(page);
5564e88b 4902
3c935d18 4903 if (mem_cgroup_move_parent(page, pc, memcg)) {
f817ed48 4904 /* found lock contention or "pc" is obsolete. */
925b7673 4905 busy = page;
f817ed48
KH
4906 cond_resched();
4907 } else
4908 busy = NULL;
2ef37d3f 4909 } while (!list_empty(list));
cc847582
KH
4910}
4911
4912/*
c26251f9
MH
4913 * make mem_cgroup's charge to be 0 if there is no task by moving
4914 * all the charges and pages to the parent.
cc847582 4915 * This enables deleting this mem_cgroup.
c26251f9
MH
4916 *
4917 * Caller is responsible for holding css reference on the memcg.
cc847582 4918 */
ab5196c2 4919static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
cc847582 4920{
c26251f9 4921 int node, zid;
bea207c8 4922 u64 usage;
f817ed48 4923
fce66477 4924 do {
52d4b9ac
KH
4925 /* This is for making all *used* pages to be on LRU. */
4926 lru_add_drain_all();
c0ff4b85 4927 drain_all_stock_sync(memcg);
c0ff4b85 4928 mem_cgroup_start_move(memcg);
31aaea4a 4929 for_each_node_state(node, N_MEMORY) {
2ef37d3f 4930 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
f156ab93
HD
4931 enum lru_list lru;
4932 for_each_lru(lru) {
2ef37d3f 4933 mem_cgroup_force_empty_list(memcg,
f156ab93 4934 node, zid, lru);
f817ed48 4935 }
1ecaab2b 4936 }
f817ed48 4937 }
c0ff4b85
R
4938 mem_cgroup_end_move(memcg);
4939 memcg_oom_recover(memcg);
52d4b9ac 4940 cond_resched();
f817ed48 4941
2ef37d3f 4942 /*
bea207c8
GC
4943 * Kernel memory may not necessarily be trackable to a specific
4944 * process. So they are not migrated, and therefore we can't
4945 * expect their value to drop to 0 here.
4946 * Having res filled up with kmem only is enough.
4947 *
2ef37d3f
MH
4948 * This is a safety check because mem_cgroup_force_empty_list
4949 * could have raced with mem_cgroup_replace_page_cache callers
4950 * so the lru seemed empty but the page could have been added
4951 * right after the check. RES_USAGE should be safe as we always
4952 * charge before adding to the LRU.
4953 */
bea207c8
GC
4954 usage = res_counter_read_u64(&memcg->res, RES_USAGE) -
4955 res_counter_read_u64(&memcg->kmem, RES_USAGE);
4956 } while (usage > 0);
c26251f9
MH
4957}
4958
b5f99b53
GC
4959/*
4960 * This mainly exists for tests during the setting of set of use_hierarchy.
4961 * Since this is the very setting we are changing, the current hierarchy value
4962 * is meaningless
4963 */
4964static inline bool __memcg_has_children(struct mem_cgroup *memcg)
4965{
4966 struct cgroup *pos;
4967
4968 /* bounce at first found */
4969 cgroup_for_each_child(pos, memcg->css.cgroup)
4970 return true;
4971 return false;
4972}
4973
4974/*
0999821b
GC
4975 * Must be called with memcg_create_mutex held, unless the cgroup is guaranteed
4976 * to be already dead (as in mem_cgroup_force_empty, for instance). This is
b5f99b53
GC
4977 * from mem_cgroup_count_children(), in the sense that we don't really care how
4978 * many children we have; we only need to know if we have any. It also counts
4979 * any memcg without hierarchy as infertile.
4980 */
4981static inline bool memcg_has_children(struct mem_cgroup *memcg)
4982{
4983 return memcg->use_hierarchy && __memcg_has_children(memcg);
4984}
4985
c26251f9
MH
4986/*
4987 * Reclaims as many pages from the given memcg as possible and moves
4988 * the rest to the parent.
4989 *
4990 * Caller is responsible for holding css reference for memcg.
4991 */
4992static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
4993{
4994 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
4995 struct cgroup *cgrp = memcg->css.cgroup;
f817ed48 4996
c1e862c1 4997 /* returns EBUSY if there is a task or if we come here twice. */
c26251f9
MH
4998 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
4999 return -EBUSY;
5000
c1e862c1
KH
5001 /* we call try-to-free pages for make this cgroup empty */
5002 lru_add_drain_all();
f817ed48 5003 /* try to free all pages in this cgroup */
569530fb 5004 while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) {
f817ed48 5005 int progress;
c1e862c1 5006
c26251f9
MH
5007 if (signal_pending(current))
5008 return -EINTR;
5009
c0ff4b85 5010 progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL,
185efc0f 5011 false);
c1e862c1 5012 if (!progress) {
f817ed48 5013 nr_retries--;
c1e862c1 5014 /* maybe some writeback is necessary */
8aa7e847 5015 congestion_wait(BLK_RW_ASYNC, HZ/10);
c1e862c1 5016 }
f817ed48
KH
5017
5018 }
08e552c6 5019 lru_add_drain();
ab5196c2
MH
5020 mem_cgroup_reparent_charges(memcg);
5021
5022 return 0;
cc847582
KH
5023}
5024
6bbda35c 5025static int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
c1e862c1 5026{
c26251f9
MH
5027 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
5028 int ret;
5029
d8423011
MH
5030 if (mem_cgroup_is_root(memcg))
5031 return -EINVAL;
c26251f9
MH
5032 css_get(&memcg->css);
5033 ret = mem_cgroup_force_empty(memcg);
5034 css_put(&memcg->css);
5035
5036 return ret;
c1e862c1
KH
5037}
5038
5039
18f59ea7
BS
5040static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
5041{
5042 return mem_cgroup_from_cont(cont)->use_hierarchy;
5043}
5044
5045static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
5046 u64 val)
5047{
5048 int retval = 0;
c0ff4b85 5049 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
18f59ea7 5050 struct cgroup *parent = cont->parent;
c0ff4b85 5051 struct mem_cgroup *parent_memcg = NULL;
18f59ea7
BS
5052
5053 if (parent)
c0ff4b85 5054 parent_memcg = mem_cgroup_from_cont(parent);
18f59ea7 5055
0999821b 5056 mutex_lock(&memcg_create_mutex);
567fb435
GC
5057
5058 if (memcg->use_hierarchy == val)
5059 goto out;
5060
18f59ea7 5061 /*
af901ca1 5062 * If parent's use_hierarchy is set, we can't make any modifications
18f59ea7
BS
5063 * in the child subtrees. If it is unset, then the change can
5064 * occur, provided the current cgroup has no children.
5065 *
5066 * For the root cgroup, parent_mem is NULL, we allow value to be
5067 * set if there are no children.
5068 */
c0ff4b85 5069 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
18f59ea7 5070 (val == 1 || val == 0)) {
b5f99b53 5071 if (!__memcg_has_children(memcg))
c0ff4b85 5072 memcg->use_hierarchy = val;
18f59ea7
BS
5073 else
5074 retval = -EBUSY;
5075 } else
5076 retval = -EINVAL;
567fb435
GC
5077
5078out:
0999821b 5079 mutex_unlock(&memcg_create_mutex);
18f59ea7
BS
5080
5081 return retval;
5082}
5083
0c3e73e8 5084
c0ff4b85 5085static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg,
7a159cc9 5086 enum mem_cgroup_stat_index idx)
0c3e73e8 5087{
7d74b06f 5088 struct mem_cgroup *iter;
7a159cc9 5089 long val = 0;
0c3e73e8 5090
7a159cc9 5091 /* Per-cpu values can be negative, use a signed accumulator */
c0ff4b85 5092 for_each_mem_cgroup_tree(iter, memcg)
7d74b06f
KH
5093 val += mem_cgroup_read_stat(iter, idx);
5094
5095 if (val < 0) /* race ? */
5096 val = 0;
5097 return val;
0c3e73e8
BS
5098}
5099
c0ff4b85 5100static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
104f3928 5101{
7d74b06f 5102 u64 val;
104f3928 5103
c0ff4b85 5104 if (!mem_cgroup_is_root(memcg)) {
104f3928 5105 if (!swap)
65c64ce8 5106 return res_counter_read_u64(&memcg->res, RES_USAGE);
104f3928 5107 else
65c64ce8 5108 return res_counter_read_u64(&memcg->memsw, RES_USAGE);
104f3928
KS
5109 }
5110
b070e65c
DR
5111 /*
5112 * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS
5113 * as well as in MEM_CGROUP_STAT_RSS_HUGE.
5114 */
c0ff4b85
R
5115 val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE);
5116 val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS);
104f3928 5117
7d74b06f 5118 if (swap)
bff6bb83 5119 val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP);
104f3928
KS
5120
5121 return val << PAGE_SHIFT;
5122}
5123
af36f906
TH
5124static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft,
5125 struct file *file, char __user *buf,
5126 size_t nbytes, loff_t *ppos)
8cdea7c0 5127{
c0ff4b85 5128 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
af36f906 5129 char str[64];
104f3928 5130 u64 val;
86ae53e1
GC
5131 int name, len;
5132 enum res_type type;
8c7c6e34
KH
5133
5134 type = MEMFILE_TYPE(cft->private);
5135 name = MEMFILE_ATTR(cft->private);
af36f906 5136
8c7c6e34
KH
5137 switch (type) {
5138 case _MEM:
104f3928 5139 if (name == RES_USAGE)
c0ff4b85 5140 val = mem_cgroup_usage(memcg, false);
104f3928 5141 else
c0ff4b85 5142 val = res_counter_read_u64(&memcg->res, name);
8c7c6e34
KH
5143 break;
5144 case _MEMSWAP:
104f3928 5145 if (name == RES_USAGE)
c0ff4b85 5146 val = mem_cgroup_usage(memcg, true);
104f3928 5147 else
c0ff4b85 5148 val = res_counter_read_u64(&memcg->memsw, name);
8c7c6e34 5149 break;
510fc4e1
GC
5150 case _KMEM:
5151 val = res_counter_read_u64(&memcg->kmem, name);
5152 break;
8c7c6e34
KH
5153 default:
5154 BUG();
8c7c6e34 5155 }
af36f906
TH
5156
5157 len = scnprintf(str, sizeof(str), "%llu\n", (unsigned long long)val);
5158 return simple_read_from_buffer(buf, nbytes, ppos, str, len);
8cdea7c0 5159}
510fc4e1
GC
5160
5161static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
5162{
5163 int ret = -EINVAL;
5164#ifdef CONFIG_MEMCG_KMEM
5165 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
5166 /*
5167 * For simplicity, we won't allow this to be disabled. It also can't
5168 * be changed if the cgroup has children already, or if tasks had
5169 * already joined.
5170 *
5171 * If tasks join before we set the limit, a person looking at
5172 * kmem.usage_in_bytes will have no way to determine when it took
5173 * place, which makes the value quite meaningless.
5174 *
5175 * After it first became limited, changes in the value of the limit are
5176 * of course permitted.
510fc4e1 5177 */
0999821b 5178 mutex_lock(&memcg_create_mutex);
510fc4e1
GC
5179 mutex_lock(&set_limit_mutex);
5180 if (!memcg->kmem_account_flags && val != RESOURCE_MAX) {
b5f99b53 5181 if (cgroup_task_count(cont) || memcg_has_children(memcg)) {
510fc4e1
GC
5182 ret = -EBUSY;
5183 goto out;
5184 }
5185 ret = res_counter_set_limit(&memcg->kmem, val);
5186 VM_BUG_ON(ret);
5187
55007d84
GC
5188 ret = memcg_update_cache_sizes(memcg);
5189 if (ret) {
5190 res_counter_set_limit(&memcg->kmem, RESOURCE_MAX);
5191 goto out;
5192 }
692e89ab
GC
5193 static_key_slow_inc(&memcg_kmem_enabled_key);
5194 /*
5195 * setting the active bit after the inc will guarantee no one
5196 * starts accounting before all call sites are patched
5197 */
5198 memcg_kmem_set_active(memcg);
510fc4e1
GC
5199 } else
5200 ret = res_counter_set_limit(&memcg->kmem, val);
5201out:
5202 mutex_unlock(&set_limit_mutex);
0999821b 5203 mutex_unlock(&memcg_create_mutex);
510fc4e1
GC
5204#endif
5205 return ret;
5206}
5207
6d043990 5208#ifdef CONFIG_MEMCG_KMEM
55007d84 5209static int memcg_propagate_kmem(struct mem_cgroup *memcg)
510fc4e1 5210{
55007d84 5211 int ret = 0;
510fc4e1
GC
5212 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5213 if (!parent)
55007d84
GC
5214 goto out;
5215
510fc4e1 5216 memcg->kmem_account_flags = parent->kmem_account_flags;
a8964b9b
GC
5217 /*
5218 * When that happen, we need to disable the static branch only on those
5219 * memcgs that enabled it. To achieve this, we would be forced to
5220 * complicate the code by keeping track of which memcgs were the ones
5221 * that actually enabled limits, and which ones got it from its
5222 * parents.
5223 *
5224 * It is a lot simpler just to do static_key_slow_inc() on every child
5225 * that is accounted.
5226 */
55007d84
GC
5227 if (!memcg_kmem_is_active(memcg))
5228 goto out;
5229
5230 /*
10d5ebf4
LZ
5231 * __mem_cgroup_free() will issue static_key_slow_dec() because this
5232 * memcg is active already. If the later initialization fails then the
5233 * cgroup core triggers the cleanup so we do not have to do it here.
55007d84 5234 */
55007d84
GC
5235 static_key_slow_inc(&memcg_kmem_enabled_key);
5236
5237 mutex_lock(&set_limit_mutex);
425c598d 5238 memcg_stop_kmem_account();
55007d84 5239 ret = memcg_update_cache_sizes(memcg);
425c598d 5240 memcg_resume_kmem_account();
55007d84 5241 mutex_unlock(&set_limit_mutex);
55007d84
GC
5242out:
5243 return ret;
510fc4e1 5244}
6d043990 5245#endif /* CONFIG_MEMCG_KMEM */
510fc4e1 5246
628f4235
KH
5247/*
5248 * The user of this function is...
5249 * RES_LIMIT.
5250 */
856c13aa
PM
5251static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
5252 const char *buffer)
8cdea7c0 5253{
628f4235 5254 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
86ae53e1
GC
5255 enum res_type type;
5256 int name;
628f4235
KH
5257 unsigned long long val;
5258 int ret;
5259
8c7c6e34
KH
5260 type = MEMFILE_TYPE(cft->private);
5261 name = MEMFILE_ATTR(cft->private);
af36f906 5262
8c7c6e34 5263 switch (name) {
628f4235 5264 case RES_LIMIT:
4b3bde4c
BS
5265 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
5266 ret = -EINVAL;
5267 break;
5268 }
628f4235
KH
5269 /* This function does all necessary parse...reuse it */
5270 ret = res_counter_memparse_write_strategy(buffer, &val);
8c7c6e34
KH
5271 if (ret)
5272 break;
5273 if (type == _MEM)
628f4235 5274 ret = mem_cgroup_resize_limit(memcg, val);
510fc4e1 5275 else if (type == _MEMSWAP)
8c7c6e34 5276 ret = mem_cgroup_resize_memsw_limit(memcg, val);
510fc4e1
GC
5277 else if (type == _KMEM)
5278 ret = memcg_update_kmem_limit(cont, val);
5279 else
5280 return -EINVAL;
628f4235 5281 break;
296c81d8
BS
5282 case RES_SOFT_LIMIT:
5283 ret = res_counter_memparse_write_strategy(buffer, &val);
5284 if (ret)
5285 break;
5286 /*
5287 * For memsw, soft limits are hard to implement in terms
5288 * of semantics, for now, we support soft limits for
5289 * control without swap
5290 */
5291 if (type == _MEM)
5292 ret = res_counter_set_soft_limit(&memcg->res, val);
5293 else
5294 ret = -EINVAL;
5295 break;
628f4235
KH
5296 default:
5297 ret = -EINVAL; /* should be BUG() ? */
5298 break;
5299 }
5300 return ret;
8cdea7c0
BS
5301}
5302
fee7b548
KH
5303static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
5304 unsigned long long *mem_limit, unsigned long long *memsw_limit)
5305{
5306 struct cgroup *cgroup;
5307 unsigned long long min_limit, min_memsw_limit, tmp;
5308
5309 min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
5310 min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
5311 cgroup = memcg->css.cgroup;
5312 if (!memcg->use_hierarchy)
5313 goto out;
5314
5315 while (cgroup->parent) {
5316 cgroup = cgroup->parent;
5317 memcg = mem_cgroup_from_cont(cgroup);
5318 if (!memcg->use_hierarchy)
5319 break;
5320 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
5321 min_limit = min(min_limit, tmp);
5322 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
5323 min_memsw_limit = min(min_memsw_limit, tmp);
5324 }
5325out:
5326 *mem_limit = min_limit;
5327 *memsw_limit = min_memsw_limit;
fee7b548
KH
5328}
5329
29f2a4da 5330static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
c84872e1 5331{
af36f906 5332 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
86ae53e1
GC
5333 int name;
5334 enum res_type type;
c84872e1 5335
8c7c6e34
KH
5336 type = MEMFILE_TYPE(event);
5337 name = MEMFILE_ATTR(event);
af36f906 5338
8c7c6e34 5339 switch (name) {
29f2a4da 5340 case RES_MAX_USAGE:
8c7c6e34 5341 if (type == _MEM)
c0ff4b85 5342 res_counter_reset_max(&memcg->res);
510fc4e1 5343 else if (type == _MEMSWAP)
c0ff4b85 5344 res_counter_reset_max(&memcg->memsw);
510fc4e1
GC
5345 else if (type == _KMEM)
5346 res_counter_reset_max(&memcg->kmem);
5347 else
5348 return -EINVAL;
29f2a4da
PE
5349 break;
5350 case RES_FAILCNT:
8c7c6e34 5351 if (type == _MEM)
c0ff4b85 5352 res_counter_reset_failcnt(&memcg->res);
510fc4e1 5353 else if (type == _MEMSWAP)
c0ff4b85 5354 res_counter_reset_failcnt(&memcg->memsw);
510fc4e1
GC
5355 else if (type == _KMEM)
5356 res_counter_reset_failcnt(&memcg->kmem);
5357 else
5358 return -EINVAL;
29f2a4da
PE
5359 break;
5360 }
f64c3f54 5361
85cc59db 5362 return 0;
c84872e1
PE
5363}
5364
7dc74be0
DN
5365static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
5366 struct cftype *cft)
5367{
5368 return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
5369}
5370
02491447 5371#ifdef CONFIG_MMU
7dc74be0
DN
5372static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
5373 struct cftype *cft, u64 val)
5374{
c0ff4b85 5375 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
7dc74be0
DN
5376
5377 if (val >= (1 << NR_MOVE_TYPE))
5378 return -EINVAL;
ee5e8472 5379
7dc74be0 5380 /*
ee5e8472
GC
5381 * No kind of locking is needed in here, because ->can_attach() will
5382 * check this value once in the beginning of the process, and then carry
5383 * on with stale data. This means that changes to this value will only
5384 * affect task migrations starting after the change.
7dc74be0 5385 */
c0ff4b85 5386 memcg->move_charge_at_immigrate = val;
7dc74be0
DN
5387 return 0;
5388}
02491447
DN
5389#else
5390static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
5391 struct cftype *cft, u64 val)
5392{
5393 return -ENOSYS;
5394}
5395#endif
7dc74be0 5396
406eb0c9 5397#ifdef CONFIG_NUMA
ab215884 5398static int memcg_numa_stat_show(struct cgroup *cont, struct cftype *cft,
fada52ca 5399 struct seq_file *m)
406eb0c9
YH
5400{
5401 int nid;
5402 unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
5403 unsigned long node_nr;
d79154bb 5404 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
406eb0c9 5405
d79154bb 5406 total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL);
406eb0c9 5407 seq_printf(m, "total=%lu", total_nr);
31aaea4a 5408 for_each_node_state(nid, N_MEMORY) {
d79154bb 5409 node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL);
406eb0c9
YH
5410 seq_printf(m, " N%d=%lu", nid, node_nr);
5411 }
5412 seq_putc(m, '\n');
5413
d79154bb 5414 file_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_FILE);
406eb0c9 5415 seq_printf(m, "file=%lu", file_nr);
31aaea4a 5416 for_each_node_state(nid, N_MEMORY) {
d79154bb 5417 node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
bb2a0de9 5418 LRU_ALL_FILE);
406eb0c9
YH
5419 seq_printf(m, " N%d=%lu", nid, node_nr);
5420 }
5421 seq_putc(m, '\n');
5422
d79154bb 5423 anon_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL_ANON);
406eb0c9 5424 seq_printf(m, "anon=%lu", anon_nr);
31aaea4a 5425 for_each_node_state(nid, N_MEMORY) {
d79154bb 5426 node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
bb2a0de9 5427 LRU_ALL_ANON);
406eb0c9
YH
5428 seq_printf(m, " N%d=%lu", nid, node_nr);
5429 }
5430 seq_putc(m, '\n');
5431
d79154bb 5432 unevictable_nr = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
406eb0c9 5433 seq_printf(m, "unevictable=%lu", unevictable_nr);
31aaea4a 5434 for_each_node_state(nid, N_MEMORY) {
d79154bb 5435 node_nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
bb2a0de9 5436 BIT(LRU_UNEVICTABLE));
406eb0c9
YH
5437 seq_printf(m, " N%d=%lu", nid, node_nr);
5438 }
5439 seq_putc(m, '\n');
5440 return 0;
5441}
5442#endif /* CONFIG_NUMA */
5443
af7c4b0e
JW
5444static inline void mem_cgroup_lru_names_not_uptodate(void)
5445{
5446 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
5447}
5448
ab215884 5449static int memcg_stat_show(struct cgroup *cont, struct cftype *cft,
78ccf5b5 5450 struct seq_file *m)
d2ceb9b7 5451{
d79154bb 5452 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
af7c4b0e
JW
5453 struct mem_cgroup *mi;
5454 unsigned int i;
406eb0c9 5455
af7c4b0e 5456 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
bff6bb83 5457 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1dd3a273 5458 continue;
af7c4b0e
JW
5459 seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
5460 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
1dd3a273 5461 }
7b854121 5462
af7c4b0e
JW
5463 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
5464 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
5465 mem_cgroup_read_events(memcg, i));
5466
5467 for (i = 0; i < NR_LRU_LISTS; i++)
5468 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
5469 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
5470
14067bb3 5471 /* Hierarchical information */
fee7b548
KH
5472 {
5473 unsigned long long limit, memsw_limit;
d79154bb 5474 memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit);
78ccf5b5 5475 seq_printf(m, "hierarchical_memory_limit %llu\n", limit);
fee7b548 5476 if (do_swap_account)
78ccf5b5
JW
5477 seq_printf(m, "hierarchical_memsw_limit %llu\n",
5478 memsw_limit);
fee7b548 5479 }
7f016ee8 5480
af7c4b0e
JW
5481 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
5482 long long val = 0;
5483
bff6bb83 5484 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1dd3a273 5485 continue;
af7c4b0e
JW
5486 for_each_mem_cgroup_tree(mi, memcg)
5487 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
5488 seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
5489 }
5490
5491 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
5492 unsigned long long val = 0;
5493
5494 for_each_mem_cgroup_tree(mi, memcg)
5495 val += mem_cgroup_read_events(mi, i);
5496 seq_printf(m, "total_%s %llu\n",
5497 mem_cgroup_events_names[i], val);
5498 }
5499
5500 for (i = 0; i < NR_LRU_LISTS; i++) {
5501 unsigned long long val = 0;
5502
5503 for_each_mem_cgroup_tree(mi, memcg)
5504 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
5505 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
1dd3a273 5506 }
14067bb3 5507
7f016ee8 5508#ifdef CONFIG_DEBUG_VM
7f016ee8
KM
5509 {
5510 int nid, zid;
5511 struct mem_cgroup_per_zone *mz;
89abfab1 5512 struct zone_reclaim_stat *rstat;
7f016ee8
KM
5513 unsigned long recent_rotated[2] = {0, 0};
5514 unsigned long recent_scanned[2] = {0, 0};
5515
5516 for_each_online_node(nid)
5517 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
d79154bb 5518 mz = mem_cgroup_zoneinfo(memcg, nid, zid);
89abfab1 5519 rstat = &mz->lruvec.reclaim_stat;
7f016ee8 5520
89abfab1
HD
5521 recent_rotated[0] += rstat->recent_rotated[0];
5522 recent_rotated[1] += rstat->recent_rotated[1];
5523 recent_scanned[0] += rstat->recent_scanned[0];
5524 recent_scanned[1] += rstat->recent_scanned[1];
7f016ee8 5525 }
78ccf5b5
JW
5526 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
5527 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
5528 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
5529 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
7f016ee8
KM
5530 }
5531#endif
5532
d2ceb9b7
KH
5533 return 0;
5534}
5535
a7885eb8
KM
5536static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
5537{
5538 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
5539
1f4c025b 5540 return mem_cgroup_swappiness(memcg);
a7885eb8
KM
5541}
5542
5543static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
5544 u64 val)
5545{
5546 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
5547 struct mem_cgroup *parent;
068b38c1 5548
a7885eb8
KM
5549 if (val > 100)
5550 return -EINVAL;
5551
5552 if (cgrp->parent == NULL)
5553 return -EINVAL;
5554
5555 parent = mem_cgroup_from_cont(cgrp->parent);
068b38c1 5556
0999821b 5557 mutex_lock(&memcg_create_mutex);
068b38c1 5558
a7885eb8 5559 /* If under hierarchy, only empty-root can set this value */
b5f99b53 5560 if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
0999821b 5561 mutex_unlock(&memcg_create_mutex);
a7885eb8 5562 return -EINVAL;
068b38c1 5563 }
a7885eb8 5564
a7885eb8 5565 memcg->swappiness = val;
a7885eb8 5566
0999821b 5567 mutex_unlock(&memcg_create_mutex);
068b38c1 5568
a7885eb8
KM
5569 return 0;
5570}
5571
2e72b634
KS
5572static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
5573{
5574 struct mem_cgroup_threshold_ary *t;
5575 u64 usage;
5576 int i;
5577
5578 rcu_read_lock();
5579 if (!swap)
2c488db2 5580 t = rcu_dereference(memcg->thresholds.primary);
2e72b634 5581 else
2c488db2 5582 t = rcu_dereference(memcg->memsw_thresholds.primary);
2e72b634
KS
5583
5584 if (!t)
5585 goto unlock;
5586
5587 usage = mem_cgroup_usage(memcg, swap);
5588
5589 /*
748dad36 5590 * current_threshold points to threshold just below or equal to usage.
2e72b634
KS
5591 * If it's not true, a threshold was crossed after last
5592 * call of __mem_cgroup_threshold().
5593 */
5407a562 5594 i = t->current_threshold;
2e72b634
KS
5595
5596 /*
5597 * Iterate backward over array of thresholds starting from
5598 * current_threshold and check if a threshold is crossed.
5599 * If none of thresholds below usage is crossed, we read
5600 * only one element of the array here.
5601 */
5602 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
5603 eventfd_signal(t->entries[i].eventfd, 1);
5604
5605 /* i = current_threshold + 1 */
5606 i++;
5607
5608 /*
5609 * Iterate forward over array of thresholds starting from
5610 * current_threshold+1 and check if a threshold is crossed.
5611 * If none of thresholds above usage is crossed, we read
5612 * only one element of the array here.
5613 */
5614 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
5615 eventfd_signal(t->entries[i].eventfd, 1);
5616
5617 /* Update current_threshold */
5407a562 5618 t->current_threshold = i - 1;
2e72b634
KS
5619unlock:
5620 rcu_read_unlock();
5621}
5622
5623static void mem_cgroup_threshold(struct mem_cgroup *memcg)
5624{
ad4ca5f4
KS
5625 while (memcg) {
5626 __mem_cgroup_threshold(memcg, false);
5627 if (do_swap_account)
5628 __mem_cgroup_threshold(memcg, true);
5629
5630 memcg = parent_mem_cgroup(memcg);
5631 }
2e72b634
KS
5632}
5633
5634static int compare_thresholds(const void *a, const void *b)
5635{
5636 const struct mem_cgroup_threshold *_a = a;
5637 const struct mem_cgroup_threshold *_b = b;
5638
5639 return _a->threshold - _b->threshold;
5640}
5641
c0ff4b85 5642static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
9490ff27
KH
5643{
5644 struct mem_cgroup_eventfd_list *ev;
5645
c0ff4b85 5646 list_for_each_entry(ev, &memcg->oom_notify, list)
9490ff27
KH
5647 eventfd_signal(ev->eventfd, 1);
5648 return 0;
5649}
5650
c0ff4b85 5651static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
9490ff27 5652{
7d74b06f
KH
5653 struct mem_cgroup *iter;
5654
c0ff4b85 5655 for_each_mem_cgroup_tree(iter, memcg)
7d74b06f 5656 mem_cgroup_oom_notify_cb(iter);
9490ff27
KH
5657}
5658
5659static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
5660 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
2e72b634
KS
5661{
5662 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
2c488db2
KS
5663 struct mem_cgroup_thresholds *thresholds;
5664 struct mem_cgroup_threshold_ary *new;
86ae53e1 5665 enum res_type type = MEMFILE_TYPE(cft->private);
2e72b634 5666 u64 threshold, usage;
2c488db2 5667 int i, size, ret;
2e72b634
KS
5668
5669 ret = res_counter_memparse_write_strategy(args, &threshold);
5670 if (ret)
5671 return ret;
5672
5673 mutex_lock(&memcg->thresholds_lock);
2c488db2 5674
2e72b634 5675 if (type == _MEM)
2c488db2 5676 thresholds = &memcg->thresholds;
2e72b634 5677 else if (type == _MEMSWAP)
2c488db2 5678 thresholds = &memcg->memsw_thresholds;
2e72b634
KS
5679 else
5680 BUG();
5681
5682 usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
5683
5684 /* Check if a threshold crossed before adding a new one */
2c488db2 5685 if (thresholds->primary)
2e72b634
KS
5686 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
5687
2c488db2 5688 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
2e72b634
KS
5689
5690 /* Allocate memory for new array of thresholds */
2c488db2 5691 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
2e72b634 5692 GFP_KERNEL);
2c488db2 5693 if (!new) {
2e72b634
KS
5694 ret = -ENOMEM;
5695 goto unlock;
5696 }
2c488db2 5697 new->size = size;
2e72b634
KS
5698
5699 /* Copy thresholds (if any) to new array */
2c488db2
KS
5700 if (thresholds->primary) {
5701 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
2e72b634 5702 sizeof(struct mem_cgroup_threshold));
2c488db2
KS
5703 }
5704
2e72b634 5705 /* Add new threshold */
2c488db2
KS
5706 new->entries[size - 1].eventfd = eventfd;
5707 new->entries[size - 1].threshold = threshold;
2e72b634
KS
5708
5709 /* Sort thresholds. Registering of new threshold isn't time-critical */
2c488db2 5710 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
2e72b634
KS
5711 compare_thresholds, NULL);
5712
5713 /* Find current threshold */
2c488db2 5714 new->current_threshold = -1;
2e72b634 5715 for (i = 0; i < size; i++) {
748dad36 5716 if (new->entries[i].threshold <= usage) {
2e72b634 5717 /*
2c488db2
KS
5718 * new->current_threshold will not be used until
5719 * rcu_assign_pointer(), so it's safe to increment
2e72b634
KS
5720 * it here.
5721 */
2c488db2 5722 ++new->current_threshold;
748dad36
SZ
5723 } else
5724 break;
2e72b634
KS
5725 }
5726
2c488db2
KS
5727 /* Free old spare buffer and save old primary buffer as spare */
5728 kfree(thresholds->spare);
5729 thresholds->spare = thresholds->primary;
5730
5731 rcu_assign_pointer(thresholds->primary, new);
2e72b634 5732
907860ed 5733 /* To be sure that nobody uses thresholds */
2e72b634
KS
5734 synchronize_rcu();
5735
2e72b634
KS
5736unlock:
5737 mutex_unlock(&memcg->thresholds_lock);
5738
5739 return ret;
5740}
5741
907860ed 5742static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
9490ff27 5743 struct cftype *cft, struct eventfd_ctx *eventfd)
2e72b634
KS
5744{
5745 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
2c488db2
KS
5746 struct mem_cgroup_thresholds *thresholds;
5747 struct mem_cgroup_threshold_ary *new;
86ae53e1 5748 enum res_type type = MEMFILE_TYPE(cft->private);
2e72b634 5749 u64 usage;
2c488db2 5750 int i, j, size;
2e72b634
KS
5751
5752 mutex_lock(&memcg->thresholds_lock);
5753 if (type == _MEM)
2c488db2 5754 thresholds = &memcg->thresholds;
2e72b634 5755 else if (type == _MEMSWAP)
2c488db2 5756 thresholds = &memcg->memsw_thresholds;
2e72b634
KS
5757 else
5758 BUG();
5759
371528ca
AV
5760 if (!thresholds->primary)
5761 goto unlock;
5762
2e72b634
KS
5763 usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
5764
5765 /* Check if a threshold crossed before removing */
5766 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
5767
5768 /* Calculate new number of threshold */
2c488db2
KS
5769 size = 0;
5770 for (i = 0; i < thresholds->primary->size; i++) {
5771 if (thresholds->primary->entries[i].eventfd != eventfd)
2e72b634
KS
5772 size++;
5773 }
5774
2c488db2 5775 new = thresholds->spare;
907860ed 5776
2e72b634
KS
5777 /* Set thresholds array to NULL if we don't have thresholds */
5778 if (!size) {
2c488db2
KS
5779 kfree(new);
5780 new = NULL;
907860ed 5781 goto swap_buffers;
2e72b634
KS
5782 }
5783
2c488db2 5784 new->size = size;
2e72b634
KS
5785
5786 /* Copy thresholds and find current threshold */
2c488db2
KS
5787 new->current_threshold = -1;
5788 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
5789 if (thresholds->primary->entries[i].eventfd == eventfd)
2e72b634
KS
5790 continue;
5791
2c488db2 5792 new->entries[j] = thresholds->primary->entries[i];
748dad36 5793 if (new->entries[j].threshold <= usage) {
2e72b634 5794 /*
2c488db2 5795 * new->current_threshold will not be used
2e72b634
KS
5796 * until rcu_assign_pointer(), so it's safe to increment
5797 * it here.
5798 */
2c488db2 5799 ++new->current_threshold;
2e72b634
KS
5800 }
5801 j++;
5802 }
5803
907860ed 5804swap_buffers:
2c488db2
KS
5805 /* Swap primary and spare array */
5806 thresholds->spare = thresholds->primary;
8c757763
SZ
5807 /* If all events are unregistered, free the spare array */
5808 if (!new) {
5809 kfree(thresholds->spare);
5810 thresholds->spare = NULL;
5811 }
5812
2c488db2 5813 rcu_assign_pointer(thresholds->primary, new);
2e72b634 5814
907860ed 5815 /* To be sure that nobody uses thresholds */
2e72b634 5816 synchronize_rcu();
371528ca 5817unlock:
2e72b634 5818 mutex_unlock(&memcg->thresholds_lock);
2e72b634 5819}
c1e862c1 5820
9490ff27
KH
5821static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
5822 struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
5823{
5824 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
5825 struct mem_cgroup_eventfd_list *event;
86ae53e1 5826 enum res_type type = MEMFILE_TYPE(cft->private);
9490ff27
KH
5827
5828 BUG_ON(type != _OOM_TYPE);
5829 event = kmalloc(sizeof(*event), GFP_KERNEL);
5830 if (!event)
5831 return -ENOMEM;
5832
1af8efe9 5833 spin_lock(&memcg_oom_lock);
9490ff27
KH
5834
5835 event->eventfd = eventfd;
5836 list_add(&event->list, &memcg->oom_notify);
5837
5838 /* already in OOM ? */
79dfdacc 5839 if (atomic_read(&memcg->under_oom))
9490ff27 5840 eventfd_signal(eventfd, 1);
1af8efe9 5841 spin_unlock(&memcg_oom_lock);
9490ff27
KH
5842
5843 return 0;
5844}
5845
907860ed 5846static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
9490ff27
KH
5847 struct cftype *cft, struct eventfd_ctx *eventfd)
5848{
c0ff4b85 5849 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
9490ff27 5850 struct mem_cgroup_eventfd_list *ev, *tmp;
86ae53e1 5851 enum res_type type = MEMFILE_TYPE(cft->private);
9490ff27
KH
5852
5853 BUG_ON(type != _OOM_TYPE);
5854
1af8efe9 5855 spin_lock(&memcg_oom_lock);
9490ff27 5856
c0ff4b85 5857 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
9490ff27
KH
5858 if (ev->eventfd == eventfd) {
5859 list_del(&ev->list);
5860 kfree(ev);
5861 }
5862 }
5863
1af8efe9 5864 spin_unlock(&memcg_oom_lock);
9490ff27
KH
5865}
5866
3c11ecf4
KH
5867static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
5868 struct cftype *cft, struct cgroup_map_cb *cb)
5869{
c0ff4b85 5870 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3c11ecf4 5871
c0ff4b85 5872 cb->fill(cb, "oom_kill_disable", memcg->oom_kill_disable);
3c11ecf4 5873
c0ff4b85 5874 if (atomic_read(&memcg->under_oom))
3c11ecf4
KH
5875 cb->fill(cb, "under_oom", 1);
5876 else
5877 cb->fill(cb, "under_oom", 0);
5878 return 0;
5879}
5880
3c11ecf4
KH
5881static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
5882 struct cftype *cft, u64 val)
5883{
c0ff4b85 5884 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3c11ecf4
KH
5885 struct mem_cgroup *parent;
5886
5887 /* cannot set to root cgroup and only 0 and 1 are allowed */
5888 if (!cgrp->parent || !((val == 0) || (val == 1)))
5889 return -EINVAL;
5890
5891 parent = mem_cgroup_from_cont(cgrp->parent);
5892
0999821b 5893 mutex_lock(&memcg_create_mutex);
3c11ecf4 5894 /* oom-kill-disable is a flag for subhierarchy. */
b5f99b53 5895 if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
0999821b 5896 mutex_unlock(&memcg_create_mutex);
3c11ecf4
KH
5897 return -EINVAL;
5898 }
c0ff4b85 5899 memcg->oom_kill_disable = val;
4d845ebf 5900 if (!val)
c0ff4b85 5901 memcg_oom_recover(memcg);
0999821b 5902 mutex_unlock(&memcg_create_mutex);
3c11ecf4
KH
5903 return 0;
5904}
5905
c255a458 5906#ifdef CONFIG_MEMCG_KMEM
cbe128e3 5907static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
e5671dfa 5908{
55007d84
GC
5909 int ret;
5910
2633d7a0 5911 memcg->kmemcg_id = -1;
55007d84
GC
5912 ret = memcg_propagate_kmem(memcg);
5913 if (ret)
5914 return ret;
2633d7a0 5915
1d62e436 5916 return mem_cgroup_sockets_init(memcg, ss);
573b400d 5917}
e5671dfa 5918
10d5ebf4 5919static void memcg_destroy_kmem(struct mem_cgroup *memcg)
d1a4c0b3 5920{
1d62e436 5921 mem_cgroup_sockets_destroy(memcg);
10d5ebf4
LZ
5922}
5923
5924static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
5925{
5926 if (!memcg_kmem_is_active(memcg))
5927 return;
5928
5929 /*
5930 * kmem charges can outlive the cgroup. In the case of slab
5931 * pages, for instance, a page contain objects from various
5932 * processes. As we prevent from taking a reference for every
5933 * such allocation we have to be careful when doing uncharge
5934 * (see memcg_uncharge_kmem) and here during offlining.
5935 *
5936 * The idea is that that only the _last_ uncharge which sees
5937 * the dead memcg will drop the last reference. An additional
5938 * reference is taken here before the group is marked dead
5939 * which is then paired with css_put during uncharge resp. here.
5940 *
5941 * Although this might sound strange as this path is called from
5942 * css_offline() when the referencemight have dropped down to 0
5943 * and shouldn't be incremented anymore (css_tryget would fail)
5944 * we do not have other options because of the kmem allocations
5945 * lifetime.
5946 */
5947 css_get(&memcg->css);
7de37682
GC
5948
5949 memcg_kmem_mark_dead(memcg);
5950
5951 if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0)
5952 return;
5953
7de37682 5954 if (memcg_kmem_test_and_clear_dead(memcg))
10d5ebf4 5955 css_put(&memcg->css);
d1a4c0b3 5956}
e5671dfa 5957#else
cbe128e3 5958static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
e5671dfa
GC
5959{
5960 return 0;
5961}
d1a4c0b3 5962
10d5ebf4
LZ
5963static void memcg_destroy_kmem(struct mem_cgroup *memcg)
5964{
5965}
5966
5967static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
d1a4c0b3
GC
5968{
5969}
e5671dfa
GC
5970#endif
5971
8cdea7c0
BS
5972static struct cftype mem_cgroup_files[] = {
5973 {
0eea1030 5974 .name = "usage_in_bytes",
8c7c6e34 5975 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
af36f906 5976 .read = mem_cgroup_read,
9490ff27
KH
5977 .register_event = mem_cgroup_usage_register_event,
5978 .unregister_event = mem_cgroup_usage_unregister_event,
8cdea7c0 5979 },
c84872e1
PE
5980 {
5981 .name = "max_usage_in_bytes",
8c7c6e34 5982 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
29f2a4da 5983 .trigger = mem_cgroup_reset,
af36f906 5984 .read = mem_cgroup_read,
c84872e1 5985 },
8cdea7c0 5986 {
0eea1030 5987 .name = "limit_in_bytes",
8c7c6e34 5988 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
856c13aa 5989 .write_string = mem_cgroup_write,
af36f906 5990 .read = mem_cgroup_read,
8cdea7c0 5991 },
296c81d8
BS
5992 {
5993 .name = "soft_limit_in_bytes",
5994 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
5995 .write_string = mem_cgroup_write,
af36f906 5996 .read = mem_cgroup_read,
296c81d8 5997 },
8cdea7c0
BS
5998 {
5999 .name = "failcnt",
8c7c6e34 6000 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
29f2a4da 6001 .trigger = mem_cgroup_reset,
af36f906 6002 .read = mem_cgroup_read,
8cdea7c0 6003 },
d2ceb9b7
KH
6004 {
6005 .name = "stat",
ab215884 6006 .read_seq_string = memcg_stat_show,
d2ceb9b7 6007 },
c1e862c1
KH
6008 {
6009 .name = "force_empty",
6010 .trigger = mem_cgroup_force_empty_write,
6011 },
18f59ea7
BS
6012 {
6013 .name = "use_hierarchy",
f00baae7 6014 .flags = CFTYPE_INSANE,
18f59ea7
BS
6015 .write_u64 = mem_cgroup_hierarchy_write,
6016 .read_u64 = mem_cgroup_hierarchy_read,
6017 },
a7885eb8
KM
6018 {
6019 .name = "swappiness",
6020 .read_u64 = mem_cgroup_swappiness_read,
6021 .write_u64 = mem_cgroup_swappiness_write,
6022 },
7dc74be0
DN
6023 {
6024 .name = "move_charge_at_immigrate",
6025 .read_u64 = mem_cgroup_move_charge_read,
6026 .write_u64 = mem_cgroup_move_charge_write,
6027 },
9490ff27
KH
6028 {
6029 .name = "oom_control",
3c11ecf4
KH
6030 .read_map = mem_cgroup_oom_control_read,
6031 .write_u64 = mem_cgroup_oom_control_write,
9490ff27
KH
6032 .register_event = mem_cgroup_oom_register_event,
6033 .unregister_event = mem_cgroup_oom_unregister_event,
6034 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
6035 },
70ddf637
AV
6036 {
6037 .name = "pressure_level",
6038 .register_event = vmpressure_register_event,
6039 .unregister_event = vmpressure_unregister_event,
6040 },
406eb0c9
YH
6041#ifdef CONFIG_NUMA
6042 {
6043 .name = "numa_stat",
ab215884 6044 .read_seq_string = memcg_numa_stat_show,
406eb0c9
YH
6045 },
6046#endif
510fc4e1
GC
6047#ifdef CONFIG_MEMCG_KMEM
6048 {
6049 .name = "kmem.limit_in_bytes",
6050 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
6051 .write_string = mem_cgroup_write,
6052 .read = mem_cgroup_read,
6053 },
6054 {
6055 .name = "kmem.usage_in_bytes",
6056 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
6057 .read = mem_cgroup_read,
6058 },
6059 {
6060 .name = "kmem.failcnt",
6061 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
6062 .trigger = mem_cgroup_reset,
6063 .read = mem_cgroup_read,
6064 },
6065 {
6066 .name = "kmem.max_usage_in_bytes",
6067 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
6068 .trigger = mem_cgroup_reset,
6069 .read = mem_cgroup_read,
6070 },
749c5415
GC
6071#ifdef CONFIG_SLABINFO
6072 {
6073 .name = "kmem.slabinfo",
6074 .read_seq_string = mem_cgroup_slabinfo_read,
6075 },
6076#endif
8c7c6e34 6077#endif
6bc10349 6078 { }, /* terminate */
af36f906 6079};
8c7c6e34 6080
2d11085e
MH
6081#ifdef CONFIG_MEMCG_SWAP
6082static struct cftype memsw_cgroup_files[] = {
6083 {
6084 .name = "memsw.usage_in_bytes",
6085 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
6086 .read = mem_cgroup_read,
6087 .register_event = mem_cgroup_usage_register_event,
6088 .unregister_event = mem_cgroup_usage_unregister_event,
6089 },
6090 {
6091 .name = "memsw.max_usage_in_bytes",
6092 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
6093 .trigger = mem_cgroup_reset,
6094 .read = mem_cgroup_read,
6095 },
6096 {
6097 .name = "memsw.limit_in_bytes",
6098 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
6099 .write_string = mem_cgroup_write,
6100 .read = mem_cgroup_read,
6101 },
6102 {
6103 .name = "memsw.failcnt",
6104 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
6105 .trigger = mem_cgroup_reset,
6106 .read = mem_cgroup_read,
6107 },
6108 { }, /* terminate */
6109};
6110#endif
c0ff4b85 6111static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
6d12e2d8
KH
6112{
6113 struct mem_cgroup_per_node *pn;
1ecaab2b 6114 struct mem_cgroup_per_zone *mz;
41e3355d 6115 int zone, tmp = node;
1ecaab2b
KH
6116 /*
6117 * This routine is called against possible nodes.
6118 * But it's BUG to call kmalloc() against offline node.
6119 *
6120 * TODO: this routine can waste much memory for nodes which will
6121 * never be onlined. It's better to use memory hotplug callback
6122 * function.
6123 */
41e3355d
KH
6124 if (!node_state(node, N_NORMAL_MEMORY))
6125 tmp = -1;
17295c88 6126 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
6d12e2d8
KH
6127 if (!pn)
6128 return 1;
1ecaab2b 6129
1ecaab2b
KH
6130 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
6131 mz = &pn->zoneinfo[zone];
bea8c150 6132 lruvec_init(&mz->lruvec);
f64c3f54 6133 mz->usage_in_excess = 0;
4e416953 6134 mz->on_tree = false;
d79154bb 6135 mz->memcg = memcg;
1ecaab2b 6136 }
54f72fe0 6137 memcg->nodeinfo[node] = pn;
6d12e2d8
KH
6138 return 0;
6139}
6140
c0ff4b85 6141static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
1ecaab2b 6142{
54f72fe0 6143 kfree(memcg->nodeinfo[node]);
1ecaab2b
KH
6144}
6145
33327948
KH
6146static struct mem_cgroup *mem_cgroup_alloc(void)
6147{
d79154bb 6148 struct mem_cgroup *memcg;
45cf7ebd 6149 size_t size = memcg_size();
33327948 6150
45cf7ebd 6151 /* Can be very big if nr_node_ids is very big */
c8dad2bb 6152 if (size < PAGE_SIZE)
d79154bb 6153 memcg = kzalloc(size, GFP_KERNEL);
33327948 6154 else
d79154bb 6155 memcg = vzalloc(size);
33327948 6156
d79154bb 6157 if (!memcg)
e7bbcdf3
DC
6158 return NULL;
6159
d79154bb
HD
6160 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
6161 if (!memcg->stat)
d2e61b8d 6162 goto out_free;
d79154bb
HD
6163 spin_lock_init(&memcg->pcp_counter_lock);
6164 return memcg;
d2e61b8d
DC
6165
6166out_free:
6167 if (size < PAGE_SIZE)
d79154bb 6168 kfree(memcg);
d2e61b8d 6169 else
d79154bb 6170 vfree(memcg);
d2e61b8d 6171 return NULL;
33327948
KH
6172}
6173
59927fb9 6174/*
c8b2a36f
GC
6175 * At destroying mem_cgroup, references from swap_cgroup can remain.
6176 * (scanning all at force_empty is too costly...)
6177 *
6178 * Instead of clearing all references at force_empty, we remember
6179 * the number of reference from swap_cgroup and free mem_cgroup when
6180 * it goes down to 0.
6181 *
6182 * Removal of cgroup itself succeeds regardless of refs from swap.
59927fb9 6183 */
c8b2a36f
GC
6184
6185static void __mem_cgroup_free(struct mem_cgroup *memcg)
59927fb9 6186{
c8b2a36f 6187 int node;
45cf7ebd 6188 size_t size = memcg_size();
59927fb9 6189
c8b2a36f
GC
6190 mem_cgroup_remove_from_trees(memcg);
6191 free_css_id(&mem_cgroup_subsys, &memcg->css);
6192
6193 for_each_node(node)
6194 free_mem_cgroup_per_zone_info(memcg, node);
6195
6196 free_percpu(memcg->stat);
6197
3f134619
GC
6198 /*
6199 * We need to make sure that (at least for now), the jump label
6200 * destruction code runs outside of the cgroup lock. This is because
6201 * get_online_cpus(), which is called from the static_branch update,
6202 * can't be called inside the cgroup_lock. cpusets are the ones
6203 * enforcing this dependency, so if they ever change, we might as well.
6204 *
6205 * schedule_work() will guarantee this happens. Be careful if you need
6206 * to move this code around, and make sure it is outside
6207 * the cgroup_lock.
6208 */
a8964b9b 6209 disarm_static_keys(memcg);
3afe36b1
GC
6210 if (size < PAGE_SIZE)
6211 kfree(memcg);
6212 else
6213 vfree(memcg);
59927fb9 6214}
3afe36b1 6215
59927fb9 6216
8c7c6e34 6217/*
c8b2a36f
GC
6218 * Helpers for freeing a kmalloc()ed/vzalloc()ed mem_cgroup by RCU,
6219 * but in process context. The work_freeing structure is overlaid
6220 * on the rcu_freeing structure, which itself is overlaid on memsw.
8c7c6e34 6221 */
c8b2a36f 6222static void free_work(struct work_struct *work)
33327948 6223{
c8b2a36f 6224 struct mem_cgroup *memcg;
08e552c6 6225
c8b2a36f
GC
6226 memcg = container_of(work, struct mem_cgroup, work_freeing);
6227 __mem_cgroup_free(memcg);
6228}
04046e1a 6229
c8b2a36f
GC
6230static void free_rcu(struct rcu_head *rcu_head)
6231{
6232 struct mem_cgroup *memcg;
08e552c6 6233
c8b2a36f
GC
6234 memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing);
6235 INIT_WORK(&memcg->work_freeing, free_work);
6236 schedule_work(&memcg->work_freeing);
33327948
KH
6237}
6238
c0ff4b85 6239static void mem_cgroup_get(struct mem_cgroup *memcg)
8c7c6e34 6240{
c0ff4b85 6241 atomic_inc(&memcg->refcnt);
8c7c6e34
KH
6242}
6243
c0ff4b85 6244static void __mem_cgroup_put(struct mem_cgroup *memcg, int count)
8c7c6e34 6245{
c0ff4b85
R
6246 if (atomic_sub_and_test(count, &memcg->refcnt)) {
6247 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
c8b2a36f 6248 call_rcu(&memcg->rcu_freeing, free_rcu);
7bcc1bb1
DN
6249 if (parent)
6250 mem_cgroup_put(parent);
6251 }
8c7c6e34
KH
6252}
6253
c0ff4b85 6254static void mem_cgroup_put(struct mem_cgroup *memcg)
483c30b5 6255{
c0ff4b85 6256 __mem_cgroup_put(memcg, 1);
483c30b5
DN
6257}
6258
7bcc1bb1
DN
6259/*
6260 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
6261 */
e1aab161 6262struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
7bcc1bb1 6263{
c0ff4b85 6264 if (!memcg->res.parent)
7bcc1bb1 6265 return NULL;
c0ff4b85 6266 return mem_cgroup_from_res_counter(memcg->res.parent, res);
7bcc1bb1 6267}
e1aab161 6268EXPORT_SYMBOL(parent_mem_cgroup);
33327948 6269
8787a1df 6270static void __init mem_cgroup_soft_limit_tree_init(void)
f64c3f54
BS
6271{
6272 struct mem_cgroup_tree_per_node *rtpn;
6273 struct mem_cgroup_tree_per_zone *rtpz;
6274 int tmp, node, zone;
6275
3ed28fa1 6276 for_each_node(node) {
f64c3f54
BS
6277 tmp = node;
6278 if (!node_state(node, N_NORMAL_MEMORY))
6279 tmp = -1;
6280 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
8787a1df 6281 BUG_ON(!rtpn);
f64c3f54
BS
6282
6283 soft_limit_tree.rb_tree_per_node[node] = rtpn;
6284
6285 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
6286 rtpz = &rtpn->rb_tree_per_zone[zone];
6287 rtpz->rb_root = RB_ROOT;
6288 spin_lock_init(&rtpz->lock);
6289 }
6290 }
f64c3f54
BS
6291}
6292
0eb253e2 6293static struct cgroup_subsys_state * __ref
92fb9748 6294mem_cgroup_css_alloc(struct cgroup *cont)
8cdea7c0 6295{
d142e3e6 6296 struct mem_cgroup *memcg;
04046e1a 6297 long error = -ENOMEM;
6d12e2d8 6298 int node;
8cdea7c0 6299
c0ff4b85
R
6300 memcg = mem_cgroup_alloc();
6301 if (!memcg)
04046e1a 6302 return ERR_PTR(error);
78fb7466 6303
3ed28fa1 6304 for_each_node(node)
c0ff4b85 6305 if (alloc_mem_cgroup_per_zone_info(memcg, node))
6d12e2d8 6306 goto free_out;
f64c3f54 6307
c077719b 6308 /* root ? */
28dbc4b6 6309 if (cont->parent == NULL) {
a41c58a6 6310 root_mem_cgroup = memcg;
d142e3e6
GC
6311 res_counter_init(&memcg->res, NULL);
6312 res_counter_init(&memcg->memsw, NULL);
6313 res_counter_init(&memcg->kmem, NULL);
18f59ea7 6314 }
28dbc4b6 6315
d142e3e6
GC
6316 memcg->last_scanned_node = MAX_NUMNODES;
6317 INIT_LIST_HEAD(&memcg->oom_notify);
6318 atomic_set(&memcg->refcnt, 1);
6319 memcg->move_charge_at_immigrate = 0;
6320 mutex_init(&memcg->thresholds_lock);
6321 spin_lock_init(&memcg->move_lock);
70ddf637 6322 vmpressure_init(&memcg->vmpressure);
d142e3e6
GC
6323
6324 return &memcg->css;
6325
6326free_out:
6327 __mem_cgroup_free(memcg);
6328 return ERR_PTR(error);
6329}
6330
6331static int
6332mem_cgroup_css_online(struct cgroup *cont)
6333{
6334 struct mem_cgroup *memcg, *parent;
6335 int error = 0;
6336
6337 if (!cont->parent)
6338 return 0;
6339
0999821b 6340 mutex_lock(&memcg_create_mutex);
d142e3e6
GC
6341 memcg = mem_cgroup_from_cont(cont);
6342 parent = mem_cgroup_from_cont(cont->parent);
6343
6344 memcg->use_hierarchy = parent->use_hierarchy;
6345 memcg->oom_kill_disable = parent->oom_kill_disable;
6346 memcg->swappiness = mem_cgroup_swappiness(parent);
6347
6348 if (parent->use_hierarchy) {
c0ff4b85
R
6349 res_counter_init(&memcg->res, &parent->res);
6350 res_counter_init(&memcg->memsw, &parent->memsw);
510fc4e1 6351 res_counter_init(&memcg->kmem, &parent->kmem);
55007d84 6352
7bcc1bb1
DN
6353 /*
6354 * We increment refcnt of the parent to ensure that we can
6355 * safely access it on res_counter_charge/uncharge.
6356 * This refcnt will be decremented when freeing this
6357 * mem_cgroup(see mem_cgroup_put).
6358 */
6359 mem_cgroup_get(parent);
18f59ea7 6360 } else {
c0ff4b85
R
6361 res_counter_init(&memcg->res, NULL);
6362 res_counter_init(&memcg->memsw, NULL);
510fc4e1 6363 res_counter_init(&memcg->kmem, NULL);
8c7f6edb
TH
6364 /*
6365 * Deeper hierachy with use_hierarchy == false doesn't make
6366 * much sense so let cgroup subsystem know about this
6367 * unfortunate state in our controller.
6368 */
d142e3e6 6369 if (parent != root_mem_cgroup)
8c7f6edb 6370 mem_cgroup_subsys.broken_hierarchy = true;
18f59ea7 6371 }
cbe128e3
GC
6372
6373 error = memcg_init_kmem(memcg, &mem_cgroup_subsys);
0999821b 6374 mutex_unlock(&memcg_create_mutex);
d142e3e6 6375 return error;
8cdea7c0
BS
6376}
6377
5f578161
MH
6378/*
6379 * Announce all parents that a group from their hierarchy is gone.
6380 */
6381static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
6382{
6383 struct mem_cgroup *parent = memcg;
6384
6385 while ((parent = parent_mem_cgroup(parent)))
519ebea3 6386 mem_cgroup_iter_invalidate(parent);
5f578161
MH
6387
6388 /*
6389 * if the root memcg is not hierarchical we have to check it
6390 * explicitely.
6391 */
6392 if (!root_mem_cgroup->use_hierarchy)
519ebea3 6393 mem_cgroup_iter_invalidate(root_mem_cgroup);
5f578161
MH
6394}
6395
92fb9748 6396static void mem_cgroup_css_offline(struct cgroup *cont)
df878fb0 6397{
c0ff4b85 6398 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
ec64f515 6399
10d5ebf4
LZ
6400 kmem_cgroup_css_offline(memcg);
6401
5f578161 6402 mem_cgroup_invalidate_reclaim_iterators(memcg);
ab5196c2 6403 mem_cgroup_reparent_charges(memcg);
1f458cbf 6404 mem_cgroup_destroy_all_caches(memcg);
df878fb0
KH
6405}
6406
92fb9748 6407static void mem_cgroup_css_free(struct cgroup *cont)
8cdea7c0 6408{
c0ff4b85 6409 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
c268e994 6410
10d5ebf4
LZ
6411 memcg_destroy_kmem(memcg);
6412 __mem_cgroup_free(memcg);
8cdea7c0
BS
6413}
6414
02491447 6415#ifdef CONFIG_MMU
7dc74be0 6416/* Handlers for move charge at task migration. */
854ffa8d
DN
6417#define PRECHARGE_COUNT_AT_ONCE 256
6418static int mem_cgroup_do_precharge(unsigned long count)
7dc74be0 6419{
854ffa8d
DN
6420 int ret = 0;
6421 int batch_count = PRECHARGE_COUNT_AT_ONCE;
c0ff4b85 6422 struct mem_cgroup *memcg = mc.to;
4ffef5fe 6423
c0ff4b85 6424 if (mem_cgroup_is_root(memcg)) {
854ffa8d
DN
6425 mc.precharge += count;
6426 /* we don't need css_get for root */
6427 return ret;
6428 }
6429 /* try to charge at once */
6430 if (count > 1) {
6431 struct res_counter *dummy;
6432 /*
c0ff4b85 6433 * "memcg" cannot be under rmdir() because we've already checked
854ffa8d
DN
6434 * by cgroup_lock_live_cgroup() that it is not removed and we
6435 * are still under the same cgroup_mutex. So we can postpone
6436 * css_get().
6437 */
c0ff4b85 6438 if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy))
854ffa8d 6439 goto one_by_one;
c0ff4b85 6440 if (do_swap_account && res_counter_charge(&memcg->memsw,
854ffa8d 6441 PAGE_SIZE * count, &dummy)) {
c0ff4b85 6442 res_counter_uncharge(&memcg->res, PAGE_SIZE * count);
854ffa8d
DN
6443 goto one_by_one;
6444 }
6445 mc.precharge += count;
854ffa8d
DN
6446 return ret;
6447 }
6448one_by_one:
6449 /* fall back to one by one charge */
6450 while (count--) {
6451 if (signal_pending(current)) {
6452 ret = -EINTR;
6453 break;
6454 }
6455 if (!batch_count--) {
6456 batch_count = PRECHARGE_COUNT_AT_ONCE;
6457 cond_resched();
6458 }
c0ff4b85
R
6459 ret = __mem_cgroup_try_charge(NULL,
6460 GFP_KERNEL, 1, &memcg, false);
38c5d72f 6461 if (ret)
854ffa8d 6462 /* mem_cgroup_clear_mc() will do uncharge later */
38c5d72f 6463 return ret;
854ffa8d
DN
6464 mc.precharge++;
6465 }
4ffef5fe
DN
6466 return ret;
6467}
6468
6469/**
8d32ff84 6470 * get_mctgt_type - get target type of moving charge
4ffef5fe
DN
6471 * @vma: the vma the pte to be checked belongs
6472 * @addr: the address corresponding to the pte to be checked
6473 * @ptent: the pte to be checked
02491447 6474 * @target: the pointer the target page or swap ent will be stored(can be NULL)
4ffef5fe
DN
6475 *
6476 * Returns
6477 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
6478 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
6479 * move charge. if @target is not NULL, the page is stored in target->page
6480 * with extra refcnt got(Callers should handle it).
02491447
DN
6481 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
6482 * target for charge migration. if @target is not NULL, the entry is stored
6483 * in target->ent.
4ffef5fe
DN
6484 *
6485 * Called with pte lock held.
6486 */
4ffef5fe
DN
6487union mc_target {
6488 struct page *page;
02491447 6489 swp_entry_t ent;
4ffef5fe
DN
6490};
6491
4ffef5fe 6492enum mc_target_type {
8d32ff84 6493 MC_TARGET_NONE = 0,
4ffef5fe 6494 MC_TARGET_PAGE,
02491447 6495 MC_TARGET_SWAP,
4ffef5fe
DN
6496};
6497
90254a65
DN
6498static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
6499 unsigned long addr, pte_t ptent)
4ffef5fe 6500{
90254a65 6501 struct page *page = vm_normal_page(vma, addr, ptent);
4ffef5fe 6502
90254a65
DN
6503 if (!page || !page_mapped(page))
6504 return NULL;
6505 if (PageAnon(page)) {
6506 /* we don't move shared anon */
4b91355e 6507 if (!move_anon())
90254a65 6508 return NULL;
87946a72
DN
6509 } else if (!move_file())
6510 /* we ignore mapcount for file pages */
90254a65
DN
6511 return NULL;
6512 if (!get_page_unless_zero(page))
6513 return NULL;
6514
6515 return page;
6516}
6517
4b91355e 6518#ifdef CONFIG_SWAP
90254a65
DN
6519static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
6520 unsigned long addr, pte_t ptent, swp_entry_t *entry)
6521{
90254a65
DN
6522 struct page *page = NULL;
6523 swp_entry_t ent = pte_to_swp_entry(ptent);
6524
6525 if (!move_anon() || non_swap_entry(ent))
6526 return NULL;
4b91355e
KH
6527 /*
6528 * Because lookup_swap_cache() updates some statistics counter,
6529 * we call find_get_page() with swapper_space directly.
6530 */
33806f06 6531 page = find_get_page(swap_address_space(ent), ent.val);
90254a65
DN
6532 if (do_swap_account)
6533 entry->val = ent.val;
6534
6535 return page;
6536}
4b91355e
KH
6537#else
6538static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
6539 unsigned long addr, pte_t ptent, swp_entry_t *entry)
6540{
6541 return NULL;
6542}
6543#endif
90254a65 6544
87946a72
DN
6545static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
6546 unsigned long addr, pte_t ptent, swp_entry_t *entry)
6547{
6548 struct page *page = NULL;
87946a72
DN
6549 struct address_space *mapping;
6550 pgoff_t pgoff;
6551
6552 if (!vma->vm_file) /* anonymous vma */
6553 return NULL;
6554 if (!move_file())
6555 return NULL;
6556
87946a72
DN
6557 mapping = vma->vm_file->f_mapping;
6558 if (pte_none(ptent))
6559 pgoff = linear_page_index(vma, addr);
6560 else /* pte_file(ptent) is true */
6561 pgoff = pte_to_pgoff(ptent);
6562
6563 /* page is moved even if it's not RSS of this task(page-faulted). */
aa3b1895
HD
6564 page = find_get_page(mapping, pgoff);
6565
6566#ifdef CONFIG_SWAP
6567 /* shmem/tmpfs may report page out on swap: account for that too. */
6568 if (radix_tree_exceptional_entry(page)) {
6569 swp_entry_t swap = radix_to_swp_entry(page);
87946a72 6570 if (do_swap_account)
aa3b1895 6571 *entry = swap;
33806f06 6572 page = find_get_page(swap_address_space(swap), swap.val);
87946a72 6573 }
aa3b1895 6574#endif
87946a72
DN
6575 return page;
6576}
6577
8d32ff84 6578static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
90254a65
DN
6579 unsigned long addr, pte_t ptent, union mc_target *target)
6580{
6581 struct page *page = NULL;
6582 struct page_cgroup *pc;
8d32ff84 6583 enum mc_target_type ret = MC_TARGET_NONE;
90254a65
DN
6584 swp_entry_t ent = { .val = 0 };
6585
6586 if (pte_present(ptent))
6587 page = mc_handle_present_pte(vma, addr, ptent);
6588 else if (is_swap_pte(ptent))
6589 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
87946a72
DN
6590 else if (pte_none(ptent) || pte_file(ptent))
6591 page = mc_handle_file_pte(vma, addr, ptent, &ent);
90254a65
DN
6592
6593 if (!page && !ent.val)
8d32ff84 6594 return ret;
02491447
DN
6595 if (page) {
6596 pc = lookup_page_cgroup(page);
6597 /*
6598 * Do only loose check w/o page_cgroup lock.
6599 * mem_cgroup_move_account() checks the pc is valid or not under
6600 * the lock.
6601 */
6602 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
6603 ret = MC_TARGET_PAGE;
6604 if (target)
6605 target->page = page;
6606 }
6607 if (!ret || !target)
6608 put_page(page);
6609 }
90254a65
DN
6610 /* There is a swap entry and a page doesn't exist or isn't charged */
6611 if (ent.val && !ret &&
9fb4b7cc 6612 css_id(&mc.from->css) == lookup_swap_cgroup_id(ent)) {
7f0f1546
KH
6613 ret = MC_TARGET_SWAP;
6614 if (target)
6615 target->ent = ent;
4ffef5fe 6616 }
4ffef5fe
DN
6617 return ret;
6618}
6619
12724850
NH
6620#ifdef CONFIG_TRANSPARENT_HUGEPAGE
6621/*
6622 * We don't consider swapping or file mapped pages because THP does not
6623 * support them for now.
6624 * Caller should make sure that pmd_trans_huge(pmd) is true.
6625 */
6626static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6627 unsigned long addr, pmd_t pmd, union mc_target *target)
6628{
6629 struct page *page = NULL;
6630 struct page_cgroup *pc;
6631 enum mc_target_type ret = MC_TARGET_NONE;
6632
6633 page = pmd_page(pmd);
6634 VM_BUG_ON(!page || !PageHead(page));
6635 if (!move_anon())
6636 return ret;
6637 pc = lookup_page_cgroup(page);
6638 if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
6639 ret = MC_TARGET_PAGE;
6640 if (target) {
6641 get_page(page);
6642 target->page = page;
6643 }
6644 }
6645 return ret;
6646}
6647#else
6648static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6649 unsigned long addr, pmd_t pmd, union mc_target *target)
6650{
6651 return MC_TARGET_NONE;
6652}
6653#endif
6654
4ffef5fe
DN
6655static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
6656 unsigned long addr, unsigned long end,
6657 struct mm_walk *walk)
6658{
6659 struct vm_area_struct *vma = walk->private;
6660 pte_t *pte;
6661 spinlock_t *ptl;
6662
12724850
NH
6663 if (pmd_trans_huge_lock(pmd, vma) == 1) {
6664 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
6665 mc.precharge += HPAGE_PMD_NR;
6666 spin_unlock(&vma->vm_mm->page_table_lock);
1a5a9906 6667 return 0;
12724850 6668 }
03319327 6669
45f83cef
AA
6670 if (pmd_trans_unstable(pmd))
6671 return 0;
4ffef5fe
DN
6672 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6673 for (; addr != end; pte++, addr += PAGE_SIZE)
8d32ff84 6674 if (get_mctgt_type(vma, addr, *pte, NULL))
4ffef5fe
DN
6675 mc.precharge++; /* increment precharge temporarily */
6676 pte_unmap_unlock(pte - 1, ptl);
6677 cond_resched();
6678
7dc74be0
DN
6679 return 0;
6680}
6681
4ffef5fe
DN
6682static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
6683{
6684 unsigned long precharge;
6685 struct vm_area_struct *vma;
6686
dfe076b0 6687 down_read(&mm->mmap_sem);
4ffef5fe
DN
6688 for (vma = mm->mmap; vma; vma = vma->vm_next) {
6689 struct mm_walk mem_cgroup_count_precharge_walk = {
6690 .pmd_entry = mem_cgroup_count_precharge_pte_range,
6691 .mm = mm,
6692 .private = vma,
6693 };
6694 if (is_vm_hugetlb_page(vma))
6695 continue;
4ffef5fe
DN
6696 walk_page_range(vma->vm_start, vma->vm_end,
6697 &mem_cgroup_count_precharge_walk);
6698 }
dfe076b0 6699 up_read(&mm->mmap_sem);
4ffef5fe
DN
6700
6701 precharge = mc.precharge;
6702 mc.precharge = 0;
6703
6704 return precharge;
6705}
6706
4ffef5fe
DN
6707static int mem_cgroup_precharge_mc(struct mm_struct *mm)
6708{
dfe076b0
DN
6709 unsigned long precharge = mem_cgroup_count_precharge(mm);
6710
6711 VM_BUG_ON(mc.moving_task);
6712 mc.moving_task = current;
6713 return mem_cgroup_do_precharge(precharge);
4ffef5fe
DN
6714}
6715
dfe076b0
DN
6716/* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
6717static void __mem_cgroup_clear_mc(void)
4ffef5fe 6718{
2bd9bb20
KH
6719 struct mem_cgroup *from = mc.from;
6720 struct mem_cgroup *to = mc.to;
6721
4ffef5fe 6722 /* we must uncharge all the leftover precharges from mc.to */
854ffa8d
DN
6723 if (mc.precharge) {
6724 __mem_cgroup_cancel_charge(mc.to, mc.precharge);
6725 mc.precharge = 0;
6726 }
6727 /*
6728 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
6729 * we must uncharge here.
6730 */
6731 if (mc.moved_charge) {
6732 __mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
6733 mc.moved_charge = 0;
4ffef5fe 6734 }
483c30b5
DN
6735 /* we must fixup refcnts and charges */
6736 if (mc.moved_swap) {
483c30b5
DN
6737 /* uncharge swap account from the old cgroup */
6738 if (!mem_cgroup_is_root(mc.from))
6739 res_counter_uncharge(&mc.from->memsw,
6740 PAGE_SIZE * mc.moved_swap);
6741 __mem_cgroup_put(mc.from, mc.moved_swap);
6742
6743 if (!mem_cgroup_is_root(mc.to)) {
6744 /*
6745 * we charged both to->res and to->memsw, so we should
6746 * uncharge to->res.
6747 */
6748 res_counter_uncharge(&mc.to->res,
6749 PAGE_SIZE * mc.moved_swap);
483c30b5
DN
6750 }
6751 /* we've already done mem_cgroup_get(mc.to) */
483c30b5
DN
6752 mc.moved_swap = 0;
6753 }
dfe076b0
DN
6754 memcg_oom_recover(from);
6755 memcg_oom_recover(to);
6756 wake_up_all(&mc.waitq);
6757}
6758
6759static void mem_cgroup_clear_mc(void)
6760{
6761 struct mem_cgroup *from = mc.from;
6762
6763 /*
6764 * we must clear moving_task before waking up waiters at the end of
6765 * task migration.
6766 */
6767 mc.moving_task = NULL;
6768 __mem_cgroup_clear_mc();
2bd9bb20 6769 spin_lock(&mc.lock);
4ffef5fe
DN
6770 mc.from = NULL;
6771 mc.to = NULL;
2bd9bb20 6772 spin_unlock(&mc.lock);
32047e2a 6773 mem_cgroup_end_move(from);
4ffef5fe
DN
6774}
6775
761b3ef5
LZ
6776static int mem_cgroup_can_attach(struct cgroup *cgroup,
6777 struct cgroup_taskset *tset)
7dc74be0 6778{
2f7ee569 6779 struct task_struct *p = cgroup_taskset_first(tset);
7dc74be0 6780 int ret = 0;
c0ff4b85 6781 struct mem_cgroup *memcg = mem_cgroup_from_cont(cgroup);
ee5e8472 6782 unsigned long move_charge_at_immigrate;
7dc74be0 6783
ee5e8472
GC
6784 /*
6785 * We are now commited to this value whatever it is. Changes in this
6786 * tunable will only affect upcoming migrations, not the current one.
6787 * So we need to save it, and keep it going.
6788 */
6789 move_charge_at_immigrate = memcg->move_charge_at_immigrate;
6790 if (move_charge_at_immigrate) {
7dc74be0
DN
6791 struct mm_struct *mm;
6792 struct mem_cgroup *from = mem_cgroup_from_task(p);
6793
c0ff4b85 6794 VM_BUG_ON(from == memcg);
7dc74be0
DN
6795
6796 mm = get_task_mm(p);
6797 if (!mm)
6798 return 0;
7dc74be0 6799 /* We move charges only when we move a owner of the mm */
4ffef5fe
DN
6800 if (mm->owner == p) {
6801 VM_BUG_ON(mc.from);
6802 VM_BUG_ON(mc.to);
6803 VM_BUG_ON(mc.precharge);
854ffa8d 6804 VM_BUG_ON(mc.moved_charge);
483c30b5 6805 VM_BUG_ON(mc.moved_swap);
32047e2a 6806 mem_cgroup_start_move(from);
2bd9bb20 6807 spin_lock(&mc.lock);
4ffef5fe 6808 mc.from = from;
c0ff4b85 6809 mc.to = memcg;
ee5e8472 6810 mc.immigrate_flags = move_charge_at_immigrate;
2bd9bb20 6811 spin_unlock(&mc.lock);
dfe076b0 6812 /* We set mc.moving_task later */
4ffef5fe
DN
6813
6814 ret = mem_cgroup_precharge_mc(mm);
6815 if (ret)
6816 mem_cgroup_clear_mc();
dfe076b0
DN
6817 }
6818 mmput(mm);
7dc74be0
DN
6819 }
6820 return ret;
6821}
6822
761b3ef5
LZ
6823static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
6824 struct cgroup_taskset *tset)
7dc74be0 6825{
4ffef5fe 6826 mem_cgroup_clear_mc();
7dc74be0
DN
6827}
6828
4ffef5fe
DN
6829static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6830 unsigned long addr, unsigned long end,
6831 struct mm_walk *walk)
7dc74be0 6832{
4ffef5fe
DN
6833 int ret = 0;
6834 struct vm_area_struct *vma = walk->private;
6835 pte_t *pte;
6836 spinlock_t *ptl;
12724850
NH
6837 enum mc_target_type target_type;
6838 union mc_target target;
6839 struct page *page;
6840 struct page_cgroup *pc;
4ffef5fe 6841
12724850
NH
6842 /*
6843 * We don't take compound_lock() here but no race with splitting thp
6844 * happens because:
6845 * - if pmd_trans_huge_lock() returns 1, the relevant thp is not
6846 * under splitting, which means there's no concurrent thp split,
6847 * - if another thread runs into split_huge_page() just after we
6848 * entered this if-block, the thread must wait for page table lock
6849 * to be unlocked in __split_huge_page_splitting(), where the main
6850 * part of thp split is not executed yet.
6851 */
6852 if (pmd_trans_huge_lock(pmd, vma) == 1) {
62ade86a 6853 if (mc.precharge < HPAGE_PMD_NR) {
12724850
NH
6854 spin_unlock(&vma->vm_mm->page_table_lock);
6855 return 0;
6856 }
6857 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6858 if (target_type == MC_TARGET_PAGE) {
6859 page = target.page;
6860 if (!isolate_lru_page(page)) {
6861 pc = lookup_page_cgroup(page);
6862 if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
2f3479b1 6863 pc, mc.from, mc.to)) {
12724850
NH
6864 mc.precharge -= HPAGE_PMD_NR;
6865 mc.moved_charge += HPAGE_PMD_NR;
6866 }
6867 putback_lru_page(page);
6868 }
6869 put_page(page);
6870 }
6871 spin_unlock(&vma->vm_mm->page_table_lock);
1a5a9906 6872 return 0;
12724850
NH
6873 }
6874
45f83cef
AA
6875 if (pmd_trans_unstable(pmd))
6876 return 0;
4ffef5fe
DN
6877retry:
6878 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6879 for (; addr != end; addr += PAGE_SIZE) {
6880 pte_t ptent = *(pte++);
02491447 6881 swp_entry_t ent;
4ffef5fe
DN
6882
6883 if (!mc.precharge)
6884 break;
6885
8d32ff84 6886 switch (get_mctgt_type(vma, addr, ptent, &target)) {
4ffef5fe
DN
6887 case MC_TARGET_PAGE:
6888 page = target.page;
6889 if (isolate_lru_page(page))
6890 goto put;
6891 pc = lookup_page_cgroup(page);
7ec99d62 6892 if (!mem_cgroup_move_account(page, 1, pc,
2f3479b1 6893 mc.from, mc.to)) {
4ffef5fe 6894 mc.precharge--;
854ffa8d
DN
6895 /* we uncharge from mc.from later. */
6896 mc.moved_charge++;
4ffef5fe
DN
6897 }
6898 putback_lru_page(page);
8d32ff84 6899put: /* get_mctgt_type() gets the page */
4ffef5fe
DN
6900 put_page(page);
6901 break;
02491447
DN
6902 case MC_TARGET_SWAP:
6903 ent = target.ent;
e91cbb42 6904 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
02491447 6905 mc.precharge--;
483c30b5
DN
6906 /* we fixup refcnts and charges later. */
6907 mc.moved_swap++;
6908 }
02491447 6909 break;
4ffef5fe
DN
6910 default:
6911 break;
6912 }
6913 }
6914 pte_unmap_unlock(pte - 1, ptl);
6915 cond_resched();
6916
6917 if (addr != end) {
6918 /*
6919 * We have consumed all precharges we got in can_attach().
6920 * We try charge one by one, but don't do any additional
6921 * charges to mc.to if we have failed in charge once in attach()
6922 * phase.
6923 */
854ffa8d 6924 ret = mem_cgroup_do_precharge(1);
4ffef5fe
DN
6925 if (!ret)
6926 goto retry;
6927 }
6928
6929 return ret;
6930}
6931
6932static void mem_cgroup_move_charge(struct mm_struct *mm)
6933{
6934 struct vm_area_struct *vma;
6935
6936 lru_add_drain_all();
dfe076b0
DN
6937retry:
6938 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
6939 /*
6940 * Someone who are holding the mmap_sem might be waiting in
6941 * waitq. So we cancel all extra charges, wake up all waiters,
6942 * and retry. Because we cancel precharges, we might not be able
6943 * to move enough charges, but moving charge is a best-effort
6944 * feature anyway, so it wouldn't be a big problem.
6945 */
6946 __mem_cgroup_clear_mc();
6947 cond_resched();
6948 goto retry;
6949 }
4ffef5fe
DN
6950 for (vma = mm->mmap; vma; vma = vma->vm_next) {
6951 int ret;
6952 struct mm_walk mem_cgroup_move_charge_walk = {
6953 .pmd_entry = mem_cgroup_move_charge_pte_range,
6954 .mm = mm,
6955 .private = vma,
6956 };
6957 if (is_vm_hugetlb_page(vma))
6958 continue;
4ffef5fe
DN
6959 ret = walk_page_range(vma->vm_start, vma->vm_end,
6960 &mem_cgroup_move_charge_walk);
6961 if (ret)
6962 /*
6963 * means we have consumed all precharges and failed in
6964 * doing additional charge. Just abandon here.
6965 */
6966 break;
6967 }
dfe076b0 6968 up_read(&mm->mmap_sem);
7dc74be0
DN
6969}
6970
761b3ef5
LZ
6971static void mem_cgroup_move_task(struct cgroup *cont,
6972 struct cgroup_taskset *tset)
67e465a7 6973{
2f7ee569 6974 struct task_struct *p = cgroup_taskset_first(tset);
a433658c 6975 struct mm_struct *mm = get_task_mm(p);
dfe076b0 6976
dfe076b0 6977 if (mm) {
a433658c
KM
6978 if (mc.to)
6979 mem_cgroup_move_charge(mm);
dfe076b0
DN
6980 mmput(mm);
6981 }
a433658c
KM
6982 if (mc.to)
6983 mem_cgroup_clear_mc();
67e465a7 6984}
5cfb80a7 6985#else /* !CONFIG_MMU */
761b3ef5
LZ
6986static int mem_cgroup_can_attach(struct cgroup *cgroup,
6987 struct cgroup_taskset *tset)
5cfb80a7
DN
6988{
6989 return 0;
6990}
761b3ef5
LZ
6991static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
6992 struct cgroup_taskset *tset)
5cfb80a7
DN
6993{
6994}
761b3ef5
LZ
6995static void mem_cgroup_move_task(struct cgroup *cont,
6996 struct cgroup_taskset *tset)
5cfb80a7
DN
6997{
6998}
6999#endif
67e465a7 7000
f00baae7
TH
7001/*
7002 * Cgroup retains root cgroups across [un]mount cycles making it necessary
7003 * to verify sane_behavior flag on each mount attempt.
7004 */
7005static void mem_cgroup_bind(struct cgroup *root)
7006{
7007 /*
7008 * use_hierarchy is forced with sane_behavior. cgroup core
7009 * guarantees that @root doesn't have any children, so turning it
7010 * on for the root memcg is enough.
7011 */
7012 if (cgroup_sane_behavior(root))
7013 mem_cgroup_from_cont(root)->use_hierarchy = true;
7014}
7015
8cdea7c0
BS
7016struct cgroup_subsys mem_cgroup_subsys = {
7017 .name = "memory",
7018 .subsys_id = mem_cgroup_subsys_id,
92fb9748 7019 .css_alloc = mem_cgroup_css_alloc,
d142e3e6 7020 .css_online = mem_cgroup_css_online,
92fb9748
TH
7021 .css_offline = mem_cgroup_css_offline,
7022 .css_free = mem_cgroup_css_free,
7dc74be0
DN
7023 .can_attach = mem_cgroup_can_attach,
7024 .cancel_attach = mem_cgroup_cancel_attach,
67e465a7 7025 .attach = mem_cgroup_move_task,
f00baae7 7026 .bind = mem_cgroup_bind,
6bc10349 7027 .base_cftypes = mem_cgroup_files,
6d12e2d8 7028 .early_init = 0,
04046e1a 7029 .use_id = 1,
8cdea7c0 7030};
c077719b 7031
c255a458 7032#ifdef CONFIG_MEMCG_SWAP
a42c390c
MH
7033static int __init enable_swap_account(char *s)
7034{
7035 /* consider enabled if no parameter or 1 is given */
a2c8990a 7036 if (!strcmp(s, "1"))
a42c390c 7037 really_do_swap_account = 1;
a2c8990a 7038 else if (!strcmp(s, "0"))
a42c390c
MH
7039 really_do_swap_account = 0;
7040 return 1;
7041}
a2c8990a 7042__setup("swapaccount=", enable_swap_account);
c077719b 7043
2d11085e
MH
7044static void __init memsw_file_init(void)
7045{
6acc8b02
MH
7046 WARN_ON(cgroup_add_cftypes(&mem_cgroup_subsys, memsw_cgroup_files));
7047}
7048
7049static void __init enable_swap_cgroup(void)
7050{
7051 if (!mem_cgroup_disabled() && really_do_swap_account) {
7052 do_swap_account = 1;
7053 memsw_file_init();
7054 }
2d11085e 7055}
6acc8b02 7056
2d11085e 7057#else
6acc8b02 7058static void __init enable_swap_cgroup(void)
2d11085e
MH
7059{
7060}
c077719b 7061#endif
2d11085e
MH
7062
7063/*
1081312f
MH
7064 * subsys_initcall() for memory controller.
7065 *
7066 * Some parts like hotcpu_notifier() have to be initialized from this context
7067 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
7068 * everything that doesn't depend on a specific mem_cgroup structure should
7069 * be initialized from here.
2d11085e
MH
7070 */
7071static int __init mem_cgroup_init(void)
7072{
7073 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
6acc8b02 7074 enable_swap_cgroup();
8787a1df 7075 mem_cgroup_soft_limit_tree_init();
e4777496 7076 memcg_stock_init();
2d11085e
MH
7077 return 0;
7078}
7079subsys_initcall(mem_cgroup_init);
This page took 0.94836 seconds and 5 git commands to generate.