iio: imu: mpu6050: Fix name/chip_id when using ACPI
[deliverable/linux.git] / mm / memcontrol.c
1 /* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
9 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
13 * Kernel Memory Controller
14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
15 * Authors: Glauber Costa and Suleiman Souhlal
16 *
17 * Native page reclaim
18 * Charge lifetime sanitation
19 * Lockless page tracking & accounting
20 * Unified hierarchy configuration model
21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
22 *
23 * This program is free software; you can redistribute it and/or modify
24 * it under the terms of the GNU General Public License as published by
25 * the Free Software Foundation; either version 2 of the License, or
26 * (at your option) any later version.
27 *
28 * This program is distributed in the hope that it will be useful,
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 * GNU General Public License for more details.
32 */
33
34 #include <linux/page_counter.h>
35 #include <linux/memcontrol.h>
36 #include <linux/cgroup.h>
37 #include <linux/mm.h>
38 #include <linux/hugetlb.h>
39 #include <linux/pagemap.h>
40 #include <linux/smp.h>
41 #include <linux/page-flags.h>
42 #include <linux/backing-dev.h>
43 #include <linux/bit_spinlock.h>
44 #include <linux/rcupdate.h>
45 #include <linux/limits.h>
46 #include <linux/export.h>
47 #include <linux/mutex.h>
48 #include <linux/rbtree.h>
49 #include <linux/slab.h>
50 #include <linux/swap.h>
51 #include <linux/swapops.h>
52 #include <linux/spinlock.h>
53 #include <linux/eventfd.h>
54 #include <linux/poll.h>
55 #include <linux/sort.h>
56 #include <linux/fs.h>
57 #include <linux/seq_file.h>
58 #include <linux/vmpressure.h>
59 #include <linux/mm_inline.h>
60 #include <linux/swap_cgroup.h>
61 #include <linux/cpu.h>
62 #include <linux/oom.h>
63 #include <linux/lockdep.h>
64 #include <linux/file.h>
65 #include <linux/tracehook.h>
66 #include "internal.h"
67 #include <net/sock.h>
68 #include <net/ip.h>
69 #include "slab.h"
70
71 #include <asm/uaccess.h>
72
73 #include <trace/events/vmscan.h>
74
75 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
76 EXPORT_SYMBOL(memory_cgrp_subsys);
77
78 struct mem_cgroup *root_mem_cgroup __read_mostly;
79
80 #define MEM_CGROUP_RECLAIM_RETRIES 5
81
82 /* Socket memory accounting disabled? */
83 static bool cgroup_memory_nosocket;
84
85 /* Kernel memory accounting disabled? */
86 static bool cgroup_memory_nokmem;
87
88 /* Whether the swap controller is active */
89 #ifdef CONFIG_MEMCG_SWAP
90 int do_swap_account __read_mostly;
91 #else
92 #define do_swap_account 0
93 #endif
94
95 /* Whether legacy memory+swap accounting is active */
96 static bool do_memsw_account(void)
97 {
98 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
99 }
100
101 static const char * const mem_cgroup_stat_names[] = {
102 "cache",
103 "rss",
104 "rss_huge",
105 "mapped_file",
106 "dirty",
107 "writeback",
108 "swap",
109 };
110
111 static const char * const mem_cgroup_events_names[] = {
112 "pgpgin",
113 "pgpgout",
114 "pgfault",
115 "pgmajfault",
116 };
117
118 static const char * const mem_cgroup_lru_names[] = {
119 "inactive_anon",
120 "active_anon",
121 "inactive_file",
122 "active_file",
123 "unevictable",
124 };
125
126 #define THRESHOLDS_EVENTS_TARGET 128
127 #define SOFTLIMIT_EVENTS_TARGET 1024
128 #define NUMAINFO_EVENTS_TARGET 1024
129
130 /*
131 * Cgroups above their limits are maintained in a RB-Tree, independent of
132 * their hierarchy representation
133 */
134
135 struct mem_cgroup_tree_per_zone {
136 struct rb_root rb_root;
137 spinlock_t lock;
138 };
139
140 struct mem_cgroup_tree_per_node {
141 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
142 };
143
144 struct mem_cgroup_tree {
145 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
146 };
147
148 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
149
150 /* for OOM */
151 struct mem_cgroup_eventfd_list {
152 struct list_head list;
153 struct eventfd_ctx *eventfd;
154 };
155
156 /*
157 * cgroup_event represents events which userspace want to receive.
158 */
159 struct mem_cgroup_event {
160 /*
161 * memcg which the event belongs to.
162 */
163 struct mem_cgroup *memcg;
164 /*
165 * eventfd to signal userspace about the event.
166 */
167 struct eventfd_ctx *eventfd;
168 /*
169 * Each of these stored in a list by the cgroup.
170 */
171 struct list_head list;
172 /*
173 * register_event() callback will be used to add new userspace
174 * waiter for changes related to this event. Use eventfd_signal()
175 * on eventfd to send notification to userspace.
176 */
177 int (*register_event)(struct mem_cgroup *memcg,
178 struct eventfd_ctx *eventfd, const char *args);
179 /*
180 * unregister_event() callback will be called when userspace closes
181 * the eventfd or on cgroup removing. This callback must be set,
182 * if you want provide notification functionality.
183 */
184 void (*unregister_event)(struct mem_cgroup *memcg,
185 struct eventfd_ctx *eventfd);
186 /*
187 * All fields below needed to unregister event when
188 * userspace closes eventfd.
189 */
190 poll_table pt;
191 wait_queue_head_t *wqh;
192 wait_queue_t wait;
193 struct work_struct remove;
194 };
195
196 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
197 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
198
199 /* Stuffs for move charges at task migration. */
200 /*
201 * Types of charges to be moved.
202 */
203 #define MOVE_ANON 0x1U
204 #define MOVE_FILE 0x2U
205 #define MOVE_MASK (MOVE_ANON | MOVE_FILE)
206
207 /* "mc" and its members are protected by cgroup_mutex */
208 static struct move_charge_struct {
209 spinlock_t lock; /* for from, to */
210 struct mem_cgroup *from;
211 struct mem_cgroup *to;
212 unsigned long flags;
213 unsigned long precharge;
214 unsigned long moved_charge;
215 unsigned long moved_swap;
216 struct task_struct *moving_task; /* a task moving charges */
217 wait_queue_head_t waitq; /* a waitq for other context */
218 } mc = {
219 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
220 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
221 };
222
223 /*
224 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
225 * limit reclaim to prevent infinite loops, if they ever occur.
226 */
227 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
228 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
229
230 enum charge_type {
231 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
232 MEM_CGROUP_CHARGE_TYPE_ANON,
233 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
234 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
235 NR_CHARGE_TYPE,
236 };
237
238 /* for encoding cft->private value on file */
239 enum res_type {
240 _MEM,
241 _MEMSWAP,
242 _OOM_TYPE,
243 _KMEM,
244 _TCP,
245 };
246
247 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
248 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
249 #define MEMFILE_ATTR(val) ((val) & 0xffff)
250 /* Used for OOM nofiier */
251 #define OOM_CONTROL (0)
252
253 /* Some nice accessors for the vmpressure. */
254 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
255 {
256 if (!memcg)
257 memcg = root_mem_cgroup;
258 return &memcg->vmpressure;
259 }
260
261 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
262 {
263 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
264 }
265
266 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
267 {
268 return (memcg == root_mem_cgroup);
269 }
270
271 #ifndef CONFIG_SLOB
272 /*
273 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
274 * The main reason for not using cgroup id for this:
275 * this works better in sparse environments, where we have a lot of memcgs,
276 * but only a few kmem-limited. Or also, if we have, for instance, 200
277 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
278 * 200 entry array for that.
279 *
280 * The current size of the caches array is stored in memcg_nr_cache_ids. It
281 * will double each time we have to increase it.
282 */
283 static DEFINE_IDA(memcg_cache_ida);
284 int memcg_nr_cache_ids;
285
286 /* Protects memcg_nr_cache_ids */
287 static DECLARE_RWSEM(memcg_cache_ids_sem);
288
289 void memcg_get_cache_ids(void)
290 {
291 down_read(&memcg_cache_ids_sem);
292 }
293
294 void memcg_put_cache_ids(void)
295 {
296 up_read(&memcg_cache_ids_sem);
297 }
298
299 /*
300 * MIN_SIZE is different than 1, because we would like to avoid going through
301 * the alloc/free process all the time. In a small machine, 4 kmem-limited
302 * cgroups is a reasonable guess. In the future, it could be a parameter or
303 * tunable, but that is strictly not necessary.
304 *
305 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
306 * this constant directly from cgroup, but it is understandable that this is
307 * better kept as an internal representation in cgroup.c. In any case, the
308 * cgrp_id space is not getting any smaller, and we don't have to necessarily
309 * increase ours as well if it increases.
310 */
311 #define MEMCG_CACHES_MIN_SIZE 4
312 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
313
314 /*
315 * A lot of the calls to the cache allocation functions are expected to be
316 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
317 * conditional to this static branch, we'll have to allow modules that does
318 * kmem_cache_alloc and the such to see this symbol as well
319 */
320 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
321 EXPORT_SYMBOL(memcg_kmem_enabled_key);
322
323 #endif /* !CONFIG_SLOB */
324
325 static struct mem_cgroup_per_zone *
326 mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
327 {
328 int nid = zone_to_nid(zone);
329 int zid = zone_idx(zone);
330
331 return &memcg->nodeinfo[nid]->zoneinfo[zid];
332 }
333
334 /**
335 * mem_cgroup_css_from_page - css of the memcg associated with a page
336 * @page: page of interest
337 *
338 * If memcg is bound to the default hierarchy, css of the memcg associated
339 * with @page is returned. The returned css remains associated with @page
340 * until it is released.
341 *
342 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
343 * is returned.
344 */
345 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
346 {
347 struct mem_cgroup *memcg;
348
349 memcg = page->mem_cgroup;
350
351 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
352 memcg = root_mem_cgroup;
353
354 return &memcg->css;
355 }
356
357 /**
358 * page_cgroup_ino - return inode number of the memcg a page is charged to
359 * @page: the page
360 *
361 * Look up the closest online ancestor of the memory cgroup @page is charged to
362 * and return its inode number or 0 if @page is not charged to any cgroup. It
363 * is safe to call this function without holding a reference to @page.
364 *
365 * Note, this function is inherently racy, because there is nothing to prevent
366 * the cgroup inode from getting torn down and potentially reallocated a moment
367 * after page_cgroup_ino() returns, so it only should be used by callers that
368 * do not care (such as procfs interfaces).
369 */
370 ino_t page_cgroup_ino(struct page *page)
371 {
372 struct mem_cgroup *memcg;
373 unsigned long ino = 0;
374
375 rcu_read_lock();
376 memcg = READ_ONCE(page->mem_cgroup);
377 while (memcg && !(memcg->css.flags & CSS_ONLINE))
378 memcg = parent_mem_cgroup(memcg);
379 if (memcg)
380 ino = cgroup_ino(memcg->css.cgroup);
381 rcu_read_unlock();
382 return ino;
383 }
384
385 static struct mem_cgroup_per_zone *
386 mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page)
387 {
388 int nid = page_to_nid(page);
389 int zid = page_zonenum(page);
390
391 return &memcg->nodeinfo[nid]->zoneinfo[zid];
392 }
393
394 static struct mem_cgroup_tree_per_zone *
395 soft_limit_tree_node_zone(int nid, int zid)
396 {
397 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
398 }
399
400 static struct mem_cgroup_tree_per_zone *
401 soft_limit_tree_from_page(struct page *page)
402 {
403 int nid = page_to_nid(page);
404 int zid = page_zonenum(page);
405
406 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
407 }
408
409 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_zone *mz,
410 struct mem_cgroup_tree_per_zone *mctz,
411 unsigned long new_usage_in_excess)
412 {
413 struct rb_node **p = &mctz->rb_root.rb_node;
414 struct rb_node *parent = NULL;
415 struct mem_cgroup_per_zone *mz_node;
416
417 if (mz->on_tree)
418 return;
419
420 mz->usage_in_excess = new_usage_in_excess;
421 if (!mz->usage_in_excess)
422 return;
423 while (*p) {
424 parent = *p;
425 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
426 tree_node);
427 if (mz->usage_in_excess < mz_node->usage_in_excess)
428 p = &(*p)->rb_left;
429 /*
430 * We can't avoid mem cgroups that are over their soft
431 * limit by the same amount
432 */
433 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
434 p = &(*p)->rb_right;
435 }
436 rb_link_node(&mz->tree_node, parent, p);
437 rb_insert_color(&mz->tree_node, &mctz->rb_root);
438 mz->on_tree = true;
439 }
440
441 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
442 struct mem_cgroup_tree_per_zone *mctz)
443 {
444 if (!mz->on_tree)
445 return;
446 rb_erase(&mz->tree_node, &mctz->rb_root);
447 mz->on_tree = false;
448 }
449
450 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
451 struct mem_cgroup_tree_per_zone *mctz)
452 {
453 unsigned long flags;
454
455 spin_lock_irqsave(&mctz->lock, flags);
456 __mem_cgroup_remove_exceeded(mz, mctz);
457 spin_unlock_irqrestore(&mctz->lock, flags);
458 }
459
460 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
461 {
462 unsigned long nr_pages = page_counter_read(&memcg->memory);
463 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
464 unsigned long excess = 0;
465
466 if (nr_pages > soft_limit)
467 excess = nr_pages - soft_limit;
468
469 return excess;
470 }
471
472 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
473 {
474 unsigned long excess;
475 struct mem_cgroup_per_zone *mz;
476 struct mem_cgroup_tree_per_zone *mctz;
477
478 mctz = soft_limit_tree_from_page(page);
479 /*
480 * Necessary to update all ancestors when hierarchy is used.
481 * because their event counter is not touched.
482 */
483 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
484 mz = mem_cgroup_page_zoneinfo(memcg, page);
485 excess = soft_limit_excess(memcg);
486 /*
487 * We have to update the tree if mz is on RB-tree or
488 * mem is over its softlimit.
489 */
490 if (excess || mz->on_tree) {
491 unsigned long flags;
492
493 spin_lock_irqsave(&mctz->lock, flags);
494 /* if on-tree, remove it */
495 if (mz->on_tree)
496 __mem_cgroup_remove_exceeded(mz, mctz);
497 /*
498 * Insert again. mz->usage_in_excess will be updated.
499 * If excess is 0, no tree ops.
500 */
501 __mem_cgroup_insert_exceeded(mz, mctz, excess);
502 spin_unlock_irqrestore(&mctz->lock, flags);
503 }
504 }
505 }
506
507 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
508 {
509 struct mem_cgroup_tree_per_zone *mctz;
510 struct mem_cgroup_per_zone *mz;
511 int nid, zid;
512
513 for_each_node(nid) {
514 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
515 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
516 mctz = soft_limit_tree_node_zone(nid, zid);
517 mem_cgroup_remove_exceeded(mz, mctz);
518 }
519 }
520 }
521
522 static struct mem_cgroup_per_zone *
523 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
524 {
525 struct rb_node *rightmost = NULL;
526 struct mem_cgroup_per_zone *mz;
527
528 retry:
529 mz = NULL;
530 rightmost = rb_last(&mctz->rb_root);
531 if (!rightmost)
532 goto done; /* Nothing to reclaim from */
533
534 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
535 /*
536 * Remove the node now but someone else can add it back,
537 * we will to add it back at the end of reclaim to its correct
538 * position in the tree.
539 */
540 __mem_cgroup_remove_exceeded(mz, mctz);
541 if (!soft_limit_excess(mz->memcg) ||
542 !css_tryget_online(&mz->memcg->css))
543 goto retry;
544 done:
545 return mz;
546 }
547
548 static struct mem_cgroup_per_zone *
549 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
550 {
551 struct mem_cgroup_per_zone *mz;
552
553 spin_lock_irq(&mctz->lock);
554 mz = __mem_cgroup_largest_soft_limit_node(mctz);
555 spin_unlock_irq(&mctz->lock);
556 return mz;
557 }
558
559 /*
560 * Return page count for single (non recursive) @memcg.
561 *
562 * Implementation Note: reading percpu statistics for memcg.
563 *
564 * Both of vmstat[] and percpu_counter has threshold and do periodic
565 * synchronization to implement "quick" read. There are trade-off between
566 * reading cost and precision of value. Then, we may have a chance to implement
567 * a periodic synchronization of counter in memcg's counter.
568 *
569 * But this _read() function is used for user interface now. The user accounts
570 * memory usage by memory cgroup and he _always_ requires exact value because
571 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
572 * have to visit all online cpus and make sum. So, for now, unnecessary
573 * synchronization is not implemented. (just implemented for cpu hotplug)
574 *
575 * If there are kernel internal actions which can make use of some not-exact
576 * value, and reading all cpu value can be performance bottleneck in some
577 * common workload, threshold and synchronization as vmstat[] should be
578 * implemented.
579 */
580 static unsigned long
581 mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
582 {
583 long val = 0;
584 int cpu;
585
586 /* Per-cpu values can be negative, use a signed accumulator */
587 for_each_possible_cpu(cpu)
588 val += per_cpu(memcg->stat->count[idx], cpu);
589 /*
590 * Summing races with updates, so val may be negative. Avoid exposing
591 * transient negative values.
592 */
593 if (val < 0)
594 val = 0;
595 return val;
596 }
597
598 static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
599 enum mem_cgroup_events_index idx)
600 {
601 unsigned long val = 0;
602 int cpu;
603
604 for_each_possible_cpu(cpu)
605 val += per_cpu(memcg->stat->events[idx], cpu);
606 return val;
607 }
608
609 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
610 struct page *page,
611 bool compound, int nr_pages)
612 {
613 /*
614 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
615 * counted as CACHE even if it's on ANON LRU.
616 */
617 if (PageAnon(page))
618 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
619 nr_pages);
620 else
621 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
622 nr_pages);
623
624 if (compound) {
625 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
626 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
627 nr_pages);
628 }
629
630 /* pagein of a big page is an event. So, ignore page size */
631 if (nr_pages > 0)
632 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
633 else {
634 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
635 nr_pages = -nr_pages; /* for event */
636 }
637
638 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
639 }
640
641 unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
642 int nid, unsigned int lru_mask)
643 {
644 unsigned long nr = 0;
645 int zid;
646
647 VM_BUG_ON((unsigned)nid >= nr_node_ids);
648
649 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
650 struct mem_cgroup_per_zone *mz;
651 enum lru_list lru;
652
653 for_each_lru(lru) {
654 if (!(BIT(lru) & lru_mask))
655 continue;
656 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
657 nr += mz->lru_size[lru];
658 }
659 }
660 return nr;
661 }
662
663 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
664 unsigned int lru_mask)
665 {
666 unsigned long nr = 0;
667 int nid;
668
669 for_each_node_state(nid, N_MEMORY)
670 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
671 return nr;
672 }
673
674 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
675 enum mem_cgroup_events_target target)
676 {
677 unsigned long val, next;
678
679 val = __this_cpu_read(memcg->stat->nr_page_events);
680 next = __this_cpu_read(memcg->stat->targets[target]);
681 /* from time_after() in jiffies.h */
682 if ((long)next - (long)val < 0) {
683 switch (target) {
684 case MEM_CGROUP_TARGET_THRESH:
685 next = val + THRESHOLDS_EVENTS_TARGET;
686 break;
687 case MEM_CGROUP_TARGET_SOFTLIMIT:
688 next = val + SOFTLIMIT_EVENTS_TARGET;
689 break;
690 case MEM_CGROUP_TARGET_NUMAINFO:
691 next = val + NUMAINFO_EVENTS_TARGET;
692 break;
693 default:
694 break;
695 }
696 __this_cpu_write(memcg->stat->targets[target], next);
697 return true;
698 }
699 return false;
700 }
701
702 /*
703 * Check events in order.
704 *
705 */
706 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
707 {
708 /* threshold event is triggered in finer grain than soft limit */
709 if (unlikely(mem_cgroup_event_ratelimit(memcg,
710 MEM_CGROUP_TARGET_THRESH))) {
711 bool do_softlimit;
712 bool do_numainfo __maybe_unused;
713
714 do_softlimit = mem_cgroup_event_ratelimit(memcg,
715 MEM_CGROUP_TARGET_SOFTLIMIT);
716 #if MAX_NUMNODES > 1
717 do_numainfo = mem_cgroup_event_ratelimit(memcg,
718 MEM_CGROUP_TARGET_NUMAINFO);
719 #endif
720 mem_cgroup_threshold(memcg);
721 if (unlikely(do_softlimit))
722 mem_cgroup_update_tree(memcg, page);
723 #if MAX_NUMNODES > 1
724 if (unlikely(do_numainfo))
725 atomic_inc(&memcg->numainfo_events);
726 #endif
727 }
728 }
729
730 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
731 {
732 /*
733 * mm_update_next_owner() may clear mm->owner to NULL
734 * if it races with swapoff, page migration, etc.
735 * So this can be called with p == NULL.
736 */
737 if (unlikely(!p))
738 return NULL;
739
740 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
741 }
742 EXPORT_SYMBOL(mem_cgroup_from_task);
743
744 static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
745 {
746 struct mem_cgroup *memcg = NULL;
747
748 rcu_read_lock();
749 do {
750 /*
751 * Page cache insertions can happen withou an
752 * actual mm context, e.g. during disk probing
753 * on boot, loopback IO, acct() writes etc.
754 */
755 if (unlikely(!mm))
756 memcg = root_mem_cgroup;
757 else {
758 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
759 if (unlikely(!memcg))
760 memcg = root_mem_cgroup;
761 }
762 } while (!css_tryget_online(&memcg->css));
763 rcu_read_unlock();
764 return memcg;
765 }
766
767 /**
768 * mem_cgroup_iter - iterate over memory cgroup hierarchy
769 * @root: hierarchy root
770 * @prev: previously returned memcg, NULL on first invocation
771 * @reclaim: cookie for shared reclaim walks, NULL for full walks
772 *
773 * Returns references to children of the hierarchy below @root, or
774 * @root itself, or %NULL after a full round-trip.
775 *
776 * Caller must pass the return value in @prev on subsequent
777 * invocations for reference counting, or use mem_cgroup_iter_break()
778 * to cancel a hierarchy walk before the round-trip is complete.
779 *
780 * Reclaimers can specify a zone and a priority level in @reclaim to
781 * divide up the memcgs in the hierarchy among all concurrent
782 * reclaimers operating on the same zone and priority.
783 */
784 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
785 struct mem_cgroup *prev,
786 struct mem_cgroup_reclaim_cookie *reclaim)
787 {
788 struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
789 struct cgroup_subsys_state *css = NULL;
790 struct mem_cgroup *memcg = NULL;
791 struct mem_cgroup *pos = NULL;
792
793 if (mem_cgroup_disabled())
794 return NULL;
795
796 if (!root)
797 root = root_mem_cgroup;
798
799 if (prev && !reclaim)
800 pos = prev;
801
802 if (!root->use_hierarchy && root != root_mem_cgroup) {
803 if (prev)
804 goto out;
805 return root;
806 }
807
808 rcu_read_lock();
809
810 if (reclaim) {
811 struct mem_cgroup_per_zone *mz;
812
813 mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone);
814 iter = &mz->iter[reclaim->priority];
815
816 if (prev && reclaim->generation != iter->generation)
817 goto out_unlock;
818
819 while (1) {
820 pos = READ_ONCE(iter->position);
821 if (!pos || css_tryget(&pos->css))
822 break;
823 /*
824 * css reference reached zero, so iter->position will
825 * be cleared by ->css_released. However, we should not
826 * rely on this happening soon, because ->css_released
827 * is called from a work queue, and by busy-waiting we
828 * might block it. So we clear iter->position right
829 * away.
830 */
831 (void)cmpxchg(&iter->position, pos, NULL);
832 }
833 }
834
835 if (pos)
836 css = &pos->css;
837
838 for (;;) {
839 css = css_next_descendant_pre(css, &root->css);
840 if (!css) {
841 /*
842 * Reclaimers share the hierarchy walk, and a
843 * new one might jump in right at the end of
844 * the hierarchy - make sure they see at least
845 * one group and restart from the beginning.
846 */
847 if (!prev)
848 continue;
849 break;
850 }
851
852 /*
853 * Verify the css and acquire a reference. The root
854 * is provided by the caller, so we know it's alive
855 * and kicking, and don't take an extra reference.
856 */
857 memcg = mem_cgroup_from_css(css);
858
859 if (css == &root->css)
860 break;
861
862 if (css_tryget(css))
863 break;
864
865 memcg = NULL;
866 }
867
868 if (reclaim) {
869 /*
870 * The position could have already been updated by a competing
871 * thread, so check that the value hasn't changed since we read
872 * it to avoid reclaiming from the same cgroup twice.
873 */
874 (void)cmpxchg(&iter->position, pos, memcg);
875
876 if (pos)
877 css_put(&pos->css);
878
879 if (!memcg)
880 iter->generation++;
881 else if (!prev)
882 reclaim->generation = iter->generation;
883 }
884
885 out_unlock:
886 rcu_read_unlock();
887 out:
888 if (prev && prev != root)
889 css_put(&prev->css);
890
891 return memcg;
892 }
893
894 /**
895 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
896 * @root: hierarchy root
897 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
898 */
899 void mem_cgroup_iter_break(struct mem_cgroup *root,
900 struct mem_cgroup *prev)
901 {
902 if (!root)
903 root = root_mem_cgroup;
904 if (prev && prev != root)
905 css_put(&prev->css);
906 }
907
908 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
909 {
910 struct mem_cgroup *memcg = dead_memcg;
911 struct mem_cgroup_reclaim_iter *iter;
912 struct mem_cgroup_per_zone *mz;
913 int nid, zid;
914 int i;
915
916 while ((memcg = parent_mem_cgroup(memcg))) {
917 for_each_node(nid) {
918 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
919 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
920 for (i = 0; i <= DEF_PRIORITY; i++) {
921 iter = &mz->iter[i];
922 cmpxchg(&iter->position,
923 dead_memcg, NULL);
924 }
925 }
926 }
927 }
928 }
929
930 /*
931 * Iteration constructs for visiting all cgroups (under a tree). If
932 * loops are exited prematurely (break), mem_cgroup_iter_break() must
933 * be used for reference counting.
934 */
935 #define for_each_mem_cgroup_tree(iter, root) \
936 for (iter = mem_cgroup_iter(root, NULL, NULL); \
937 iter != NULL; \
938 iter = mem_cgroup_iter(root, iter, NULL))
939
940 #define for_each_mem_cgroup(iter) \
941 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
942 iter != NULL; \
943 iter = mem_cgroup_iter(NULL, iter, NULL))
944
945 /**
946 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
947 * @zone: zone of the wanted lruvec
948 * @memcg: memcg of the wanted lruvec
949 *
950 * Returns the lru list vector holding pages for the given @zone and
951 * @mem. This can be the global zone lruvec, if the memory controller
952 * is disabled.
953 */
954 struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
955 struct mem_cgroup *memcg)
956 {
957 struct mem_cgroup_per_zone *mz;
958 struct lruvec *lruvec;
959
960 if (mem_cgroup_disabled()) {
961 lruvec = &zone->lruvec;
962 goto out;
963 }
964
965 mz = mem_cgroup_zone_zoneinfo(memcg, zone);
966 lruvec = &mz->lruvec;
967 out:
968 /*
969 * Since a node can be onlined after the mem_cgroup was created,
970 * we have to be prepared to initialize lruvec->zone here;
971 * and if offlined then reonlined, we need to reinitialize it.
972 */
973 if (unlikely(lruvec->zone != zone))
974 lruvec->zone = zone;
975 return lruvec;
976 }
977
978 /**
979 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
980 * @page: the page
981 * @zone: zone of the page
982 *
983 * This function is only safe when following the LRU page isolation
984 * and putback protocol: the LRU lock must be held, and the page must
985 * either be PageLRU() or the caller must have isolated/allocated it.
986 */
987 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
988 {
989 struct mem_cgroup_per_zone *mz;
990 struct mem_cgroup *memcg;
991 struct lruvec *lruvec;
992
993 if (mem_cgroup_disabled()) {
994 lruvec = &zone->lruvec;
995 goto out;
996 }
997
998 memcg = page->mem_cgroup;
999 /*
1000 * Swapcache readahead pages are added to the LRU - and
1001 * possibly migrated - before they are charged.
1002 */
1003 if (!memcg)
1004 memcg = root_mem_cgroup;
1005
1006 mz = mem_cgroup_page_zoneinfo(memcg, page);
1007 lruvec = &mz->lruvec;
1008 out:
1009 /*
1010 * Since a node can be onlined after the mem_cgroup was created,
1011 * we have to be prepared to initialize lruvec->zone here;
1012 * and if offlined then reonlined, we need to reinitialize it.
1013 */
1014 if (unlikely(lruvec->zone != zone))
1015 lruvec->zone = zone;
1016 return lruvec;
1017 }
1018
1019 /**
1020 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1021 * @lruvec: mem_cgroup per zone lru vector
1022 * @lru: index of lru list the page is sitting on
1023 * @nr_pages: positive when adding or negative when removing
1024 *
1025 * This function must be called when a page is added to or removed from an
1026 * lru list.
1027 */
1028 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1029 int nr_pages)
1030 {
1031 struct mem_cgroup_per_zone *mz;
1032 unsigned long *lru_size;
1033
1034 if (mem_cgroup_disabled())
1035 return;
1036
1037 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1038 lru_size = mz->lru_size + lru;
1039 *lru_size += nr_pages;
1040 VM_BUG_ON((long)(*lru_size) < 0);
1041 }
1042
1043 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
1044 {
1045 struct mem_cgroup *task_memcg;
1046 struct task_struct *p;
1047 bool ret;
1048
1049 p = find_lock_task_mm(task);
1050 if (p) {
1051 task_memcg = get_mem_cgroup_from_mm(p->mm);
1052 task_unlock(p);
1053 } else {
1054 /*
1055 * All threads may have already detached their mm's, but the oom
1056 * killer still needs to detect if they have already been oom
1057 * killed to prevent needlessly killing additional tasks.
1058 */
1059 rcu_read_lock();
1060 task_memcg = mem_cgroup_from_task(task);
1061 css_get(&task_memcg->css);
1062 rcu_read_unlock();
1063 }
1064 ret = mem_cgroup_is_descendant(task_memcg, memcg);
1065 css_put(&task_memcg->css);
1066 return ret;
1067 }
1068
1069 /**
1070 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1071 * @memcg: the memory cgroup
1072 *
1073 * Returns the maximum amount of memory @mem can be charged with, in
1074 * pages.
1075 */
1076 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1077 {
1078 unsigned long margin = 0;
1079 unsigned long count;
1080 unsigned long limit;
1081
1082 count = page_counter_read(&memcg->memory);
1083 limit = READ_ONCE(memcg->memory.limit);
1084 if (count < limit)
1085 margin = limit - count;
1086
1087 if (do_memsw_account()) {
1088 count = page_counter_read(&memcg->memsw);
1089 limit = READ_ONCE(memcg->memsw.limit);
1090 if (count <= limit)
1091 margin = min(margin, limit - count);
1092 }
1093
1094 return margin;
1095 }
1096
1097 /*
1098 * A routine for checking "mem" is under move_account() or not.
1099 *
1100 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1101 * moving cgroups. This is for waiting at high-memory pressure
1102 * caused by "move".
1103 */
1104 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1105 {
1106 struct mem_cgroup *from;
1107 struct mem_cgroup *to;
1108 bool ret = false;
1109 /*
1110 * Unlike task_move routines, we access mc.to, mc.from not under
1111 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1112 */
1113 spin_lock(&mc.lock);
1114 from = mc.from;
1115 to = mc.to;
1116 if (!from)
1117 goto unlock;
1118
1119 ret = mem_cgroup_is_descendant(from, memcg) ||
1120 mem_cgroup_is_descendant(to, memcg);
1121 unlock:
1122 spin_unlock(&mc.lock);
1123 return ret;
1124 }
1125
1126 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1127 {
1128 if (mc.moving_task && current != mc.moving_task) {
1129 if (mem_cgroup_under_move(memcg)) {
1130 DEFINE_WAIT(wait);
1131 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1132 /* moving charge context might have finished. */
1133 if (mc.moving_task)
1134 schedule();
1135 finish_wait(&mc.waitq, &wait);
1136 return true;
1137 }
1138 }
1139 return false;
1140 }
1141
1142 #define K(x) ((x) << (PAGE_SHIFT-10))
1143 /**
1144 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
1145 * @memcg: The memory cgroup that went over limit
1146 * @p: Task that is going to be killed
1147 *
1148 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1149 * enabled
1150 */
1151 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1152 {
1153 struct mem_cgroup *iter;
1154 unsigned int i;
1155
1156 rcu_read_lock();
1157
1158 if (p) {
1159 pr_info("Task in ");
1160 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1161 pr_cont(" killed as a result of limit of ");
1162 } else {
1163 pr_info("Memory limit reached of cgroup ");
1164 }
1165
1166 pr_cont_cgroup_path(memcg->css.cgroup);
1167 pr_cont("\n");
1168
1169 rcu_read_unlock();
1170
1171 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1172 K((u64)page_counter_read(&memcg->memory)),
1173 K((u64)memcg->memory.limit), memcg->memory.failcnt);
1174 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1175 K((u64)page_counter_read(&memcg->memsw)),
1176 K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
1177 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1178 K((u64)page_counter_read(&memcg->kmem)),
1179 K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
1180
1181 for_each_mem_cgroup_tree(iter, memcg) {
1182 pr_info("Memory cgroup stats for ");
1183 pr_cont_cgroup_path(iter->css.cgroup);
1184 pr_cont(":");
1185
1186 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1187 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1188 continue;
1189 pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
1190 K(mem_cgroup_read_stat(iter, i)));
1191 }
1192
1193 for (i = 0; i < NR_LRU_LISTS; i++)
1194 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1195 K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1196
1197 pr_cont("\n");
1198 }
1199 }
1200
1201 /*
1202 * This function returns the number of memcg under hierarchy tree. Returns
1203 * 1(self count) if no children.
1204 */
1205 static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1206 {
1207 int num = 0;
1208 struct mem_cgroup *iter;
1209
1210 for_each_mem_cgroup_tree(iter, memcg)
1211 num++;
1212 return num;
1213 }
1214
1215 /*
1216 * Return the memory (and swap, if configured) limit for a memcg.
1217 */
1218 static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
1219 {
1220 unsigned long limit;
1221
1222 limit = memcg->memory.limit;
1223 if (mem_cgroup_swappiness(memcg)) {
1224 unsigned long memsw_limit;
1225 unsigned long swap_limit;
1226
1227 memsw_limit = memcg->memsw.limit;
1228 swap_limit = memcg->swap.limit;
1229 swap_limit = min(swap_limit, (unsigned long)total_swap_pages);
1230 limit = min(limit + swap_limit, memsw_limit);
1231 }
1232 return limit;
1233 }
1234
1235 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1236 int order)
1237 {
1238 struct oom_control oc = {
1239 .zonelist = NULL,
1240 .nodemask = NULL,
1241 .gfp_mask = gfp_mask,
1242 .order = order,
1243 };
1244 struct mem_cgroup *iter;
1245 unsigned long chosen_points = 0;
1246 unsigned long totalpages;
1247 unsigned int points = 0;
1248 struct task_struct *chosen = NULL;
1249
1250 mutex_lock(&oom_lock);
1251
1252 /*
1253 * If current has a pending SIGKILL or is exiting, then automatically
1254 * select it. The goal is to allow it to allocate so that it may
1255 * quickly exit and free its memory.
1256 */
1257 if (fatal_signal_pending(current) || task_will_free_mem(current)) {
1258 mark_oom_victim(current);
1259 goto unlock;
1260 }
1261
1262 check_panic_on_oom(&oc, CONSTRAINT_MEMCG, memcg);
1263 totalpages = mem_cgroup_get_limit(memcg) ? : 1;
1264 for_each_mem_cgroup_tree(iter, memcg) {
1265 struct css_task_iter it;
1266 struct task_struct *task;
1267
1268 css_task_iter_start(&iter->css, &it);
1269 while ((task = css_task_iter_next(&it))) {
1270 switch (oom_scan_process_thread(&oc, task, totalpages)) {
1271 case OOM_SCAN_SELECT:
1272 if (chosen)
1273 put_task_struct(chosen);
1274 chosen = task;
1275 chosen_points = ULONG_MAX;
1276 get_task_struct(chosen);
1277 /* fall through */
1278 case OOM_SCAN_CONTINUE:
1279 continue;
1280 case OOM_SCAN_ABORT:
1281 css_task_iter_end(&it);
1282 mem_cgroup_iter_break(memcg, iter);
1283 if (chosen)
1284 put_task_struct(chosen);
1285 goto unlock;
1286 case OOM_SCAN_OK:
1287 break;
1288 };
1289 points = oom_badness(task, memcg, NULL, totalpages);
1290 if (!points || points < chosen_points)
1291 continue;
1292 /* Prefer thread group leaders for display purposes */
1293 if (points == chosen_points &&
1294 thread_group_leader(chosen))
1295 continue;
1296
1297 if (chosen)
1298 put_task_struct(chosen);
1299 chosen = task;
1300 chosen_points = points;
1301 get_task_struct(chosen);
1302 }
1303 css_task_iter_end(&it);
1304 }
1305
1306 if (chosen) {
1307 points = chosen_points * 1000 / totalpages;
1308 oom_kill_process(&oc, chosen, points, totalpages, memcg,
1309 "Memory cgroup out of memory");
1310 }
1311 unlock:
1312 mutex_unlock(&oom_lock);
1313 return chosen;
1314 }
1315
1316 #if MAX_NUMNODES > 1
1317
1318 /**
1319 * test_mem_cgroup_node_reclaimable
1320 * @memcg: the target memcg
1321 * @nid: the node ID to be checked.
1322 * @noswap : specify true here if the user wants flle only information.
1323 *
1324 * This function returns whether the specified memcg contains any
1325 * reclaimable pages on a node. Returns true if there are any reclaimable
1326 * pages in the node.
1327 */
1328 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1329 int nid, bool noswap)
1330 {
1331 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1332 return true;
1333 if (noswap || !total_swap_pages)
1334 return false;
1335 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1336 return true;
1337 return false;
1338
1339 }
1340
1341 /*
1342 * Always updating the nodemask is not very good - even if we have an empty
1343 * list or the wrong list here, we can start from some node and traverse all
1344 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1345 *
1346 */
1347 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1348 {
1349 int nid;
1350 /*
1351 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1352 * pagein/pageout changes since the last update.
1353 */
1354 if (!atomic_read(&memcg->numainfo_events))
1355 return;
1356 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1357 return;
1358
1359 /* make a nodemask where this memcg uses memory from */
1360 memcg->scan_nodes = node_states[N_MEMORY];
1361
1362 for_each_node_mask(nid, node_states[N_MEMORY]) {
1363
1364 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1365 node_clear(nid, memcg->scan_nodes);
1366 }
1367
1368 atomic_set(&memcg->numainfo_events, 0);
1369 atomic_set(&memcg->numainfo_updating, 0);
1370 }
1371
1372 /*
1373 * Selecting a node where we start reclaim from. Because what we need is just
1374 * reducing usage counter, start from anywhere is O,K. Considering
1375 * memory reclaim from current node, there are pros. and cons.
1376 *
1377 * Freeing memory from current node means freeing memory from a node which
1378 * we'll use or we've used. So, it may make LRU bad. And if several threads
1379 * hit limits, it will see a contention on a node. But freeing from remote
1380 * node means more costs for memory reclaim because of memory latency.
1381 *
1382 * Now, we use round-robin. Better algorithm is welcomed.
1383 */
1384 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1385 {
1386 int node;
1387
1388 mem_cgroup_may_update_nodemask(memcg);
1389 node = memcg->last_scanned_node;
1390
1391 node = next_node(node, memcg->scan_nodes);
1392 if (node == MAX_NUMNODES)
1393 node = first_node(memcg->scan_nodes);
1394 /*
1395 * We call this when we hit limit, not when pages are added to LRU.
1396 * No LRU may hold pages because all pages are UNEVICTABLE or
1397 * memcg is too small and all pages are not on LRU. In that case,
1398 * we use curret node.
1399 */
1400 if (unlikely(node == MAX_NUMNODES))
1401 node = numa_node_id();
1402
1403 memcg->last_scanned_node = node;
1404 return node;
1405 }
1406 #else
1407 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1408 {
1409 return 0;
1410 }
1411 #endif
1412
1413 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1414 struct zone *zone,
1415 gfp_t gfp_mask,
1416 unsigned long *total_scanned)
1417 {
1418 struct mem_cgroup *victim = NULL;
1419 int total = 0;
1420 int loop = 0;
1421 unsigned long excess;
1422 unsigned long nr_scanned;
1423 struct mem_cgroup_reclaim_cookie reclaim = {
1424 .zone = zone,
1425 .priority = 0,
1426 };
1427
1428 excess = soft_limit_excess(root_memcg);
1429
1430 while (1) {
1431 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1432 if (!victim) {
1433 loop++;
1434 if (loop >= 2) {
1435 /*
1436 * If we have not been able to reclaim
1437 * anything, it might because there are
1438 * no reclaimable pages under this hierarchy
1439 */
1440 if (!total)
1441 break;
1442 /*
1443 * We want to do more targeted reclaim.
1444 * excess >> 2 is not to excessive so as to
1445 * reclaim too much, nor too less that we keep
1446 * coming back to reclaim from this cgroup
1447 */
1448 if (total >= (excess >> 2) ||
1449 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1450 break;
1451 }
1452 continue;
1453 }
1454 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
1455 zone, &nr_scanned);
1456 *total_scanned += nr_scanned;
1457 if (!soft_limit_excess(root_memcg))
1458 break;
1459 }
1460 mem_cgroup_iter_break(root_memcg, victim);
1461 return total;
1462 }
1463
1464 #ifdef CONFIG_LOCKDEP
1465 static struct lockdep_map memcg_oom_lock_dep_map = {
1466 .name = "memcg_oom_lock",
1467 };
1468 #endif
1469
1470 static DEFINE_SPINLOCK(memcg_oom_lock);
1471
1472 /*
1473 * Check OOM-Killer is already running under our hierarchy.
1474 * If someone is running, return false.
1475 */
1476 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1477 {
1478 struct mem_cgroup *iter, *failed = NULL;
1479
1480 spin_lock(&memcg_oom_lock);
1481
1482 for_each_mem_cgroup_tree(iter, memcg) {
1483 if (iter->oom_lock) {
1484 /*
1485 * this subtree of our hierarchy is already locked
1486 * so we cannot give a lock.
1487 */
1488 failed = iter;
1489 mem_cgroup_iter_break(memcg, iter);
1490 break;
1491 } else
1492 iter->oom_lock = true;
1493 }
1494
1495 if (failed) {
1496 /*
1497 * OK, we failed to lock the whole subtree so we have
1498 * to clean up what we set up to the failing subtree
1499 */
1500 for_each_mem_cgroup_tree(iter, memcg) {
1501 if (iter == failed) {
1502 mem_cgroup_iter_break(memcg, iter);
1503 break;
1504 }
1505 iter->oom_lock = false;
1506 }
1507 } else
1508 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1509
1510 spin_unlock(&memcg_oom_lock);
1511
1512 return !failed;
1513 }
1514
1515 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1516 {
1517 struct mem_cgroup *iter;
1518
1519 spin_lock(&memcg_oom_lock);
1520 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
1521 for_each_mem_cgroup_tree(iter, memcg)
1522 iter->oom_lock = false;
1523 spin_unlock(&memcg_oom_lock);
1524 }
1525
1526 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1527 {
1528 struct mem_cgroup *iter;
1529
1530 spin_lock(&memcg_oom_lock);
1531 for_each_mem_cgroup_tree(iter, memcg)
1532 iter->under_oom++;
1533 spin_unlock(&memcg_oom_lock);
1534 }
1535
1536 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1537 {
1538 struct mem_cgroup *iter;
1539
1540 /*
1541 * When a new child is created while the hierarchy is under oom,
1542 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
1543 */
1544 spin_lock(&memcg_oom_lock);
1545 for_each_mem_cgroup_tree(iter, memcg)
1546 if (iter->under_oom > 0)
1547 iter->under_oom--;
1548 spin_unlock(&memcg_oom_lock);
1549 }
1550
1551 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1552
1553 struct oom_wait_info {
1554 struct mem_cgroup *memcg;
1555 wait_queue_t wait;
1556 };
1557
1558 static int memcg_oom_wake_function(wait_queue_t *wait,
1559 unsigned mode, int sync, void *arg)
1560 {
1561 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1562 struct mem_cgroup *oom_wait_memcg;
1563 struct oom_wait_info *oom_wait_info;
1564
1565 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1566 oom_wait_memcg = oom_wait_info->memcg;
1567
1568 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1569 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1570 return 0;
1571 return autoremove_wake_function(wait, mode, sync, arg);
1572 }
1573
1574 static void memcg_oom_recover(struct mem_cgroup *memcg)
1575 {
1576 /*
1577 * For the following lockless ->under_oom test, the only required
1578 * guarantee is that it must see the state asserted by an OOM when
1579 * this function is called as a result of userland actions
1580 * triggered by the notification of the OOM. This is trivially
1581 * achieved by invoking mem_cgroup_mark_under_oom() before
1582 * triggering notification.
1583 */
1584 if (memcg && memcg->under_oom)
1585 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1586 }
1587
1588 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1589 {
1590 if (!current->memcg_may_oom)
1591 return;
1592 /*
1593 * We are in the middle of the charge context here, so we
1594 * don't want to block when potentially sitting on a callstack
1595 * that holds all kinds of filesystem and mm locks.
1596 *
1597 * Also, the caller may handle a failed allocation gracefully
1598 * (like optional page cache readahead) and so an OOM killer
1599 * invocation might not even be necessary.
1600 *
1601 * That's why we don't do anything here except remember the
1602 * OOM context and then deal with it at the end of the page
1603 * fault when the stack is unwound, the locks are released,
1604 * and when we know whether the fault was overall successful.
1605 */
1606 css_get(&memcg->css);
1607 current->memcg_in_oom = memcg;
1608 current->memcg_oom_gfp_mask = mask;
1609 current->memcg_oom_order = order;
1610 }
1611
1612 /**
1613 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1614 * @handle: actually kill/wait or just clean up the OOM state
1615 *
1616 * This has to be called at the end of a page fault if the memcg OOM
1617 * handler was enabled.
1618 *
1619 * Memcg supports userspace OOM handling where failed allocations must
1620 * sleep on a waitqueue until the userspace task resolves the
1621 * situation. Sleeping directly in the charge context with all kinds
1622 * of locks held is not a good idea, instead we remember an OOM state
1623 * in the task and mem_cgroup_oom_synchronize() has to be called at
1624 * the end of the page fault to complete the OOM handling.
1625 *
1626 * Returns %true if an ongoing memcg OOM situation was detected and
1627 * completed, %false otherwise.
1628 */
1629 bool mem_cgroup_oom_synchronize(bool handle)
1630 {
1631 struct mem_cgroup *memcg = current->memcg_in_oom;
1632 struct oom_wait_info owait;
1633 bool locked;
1634
1635 /* OOM is global, do not handle */
1636 if (!memcg)
1637 return false;
1638
1639 if (!handle || oom_killer_disabled)
1640 goto cleanup;
1641
1642 owait.memcg = memcg;
1643 owait.wait.flags = 0;
1644 owait.wait.func = memcg_oom_wake_function;
1645 owait.wait.private = current;
1646 INIT_LIST_HEAD(&owait.wait.task_list);
1647
1648 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1649 mem_cgroup_mark_under_oom(memcg);
1650
1651 locked = mem_cgroup_oom_trylock(memcg);
1652
1653 if (locked)
1654 mem_cgroup_oom_notify(memcg);
1655
1656 if (locked && !memcg->oom_kill_disable) {
1657 mem_cgroup_unmark_under_oom(memcg);
1658 finish_wait(&memcg_oom_waitq, &owait.wait);
1659 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1660 current->memcg_oom_order);
1661 } else {
1662 schedule();
1663 mem_cgroup_unmark_under_oom(memcg);
1664 finish_wait(&memcg_oom_waitq, &owait.wait);
1665 }
1666
1667 if (locked) {
1668 mem_cgroup_oom_unlock(memcg);
1669 /*
1670 * There is no guarantee that an OOM-lock contender
1671 * sees the wakeups triggered by the OOM kill
1672 * uncharges. Wake any sleepers explicitely.
1673 */
1674 memcg_oom_recover(memcg);
1675 }
1676 cleanup:
1677 current->memcg_in_oom = NULL;
1678 css_put(&memcg->css);
1679 return true;
1680 }
1681
1682 /**
1683 * lock_page_memcg - lock a page->mem_cgroup binding
1684 * @page: the page
1685 *
1686 * This function protects unlocked LRU pages from being moved to
1687 * another cgroup and stabilizes their page->mem_cgroup binding.
1688 */
1689 void lock_page_memcg(struct page *page)
1690 {
1691 struct mem_cgroup *memcg;
1692 unsigned long flags;
1693
1694 /*
1695 * The RCU lock is held throughout the transaction. The fast
1696 * path can get away without acquiring the memcg->move_lock
1697 * because page moving starts with an RCU grace period.
1698 */
1699 rcu_read_lock();
1700
1701 if (mem_cgroup_disabled())
1702 return;
1703 again:
1704 memcg = page->mem_cgroup;
1705 if (unlikely(!memcg))
1706 return;
1707
1708 if (atomic_read(&memcg->moving_account) <= 0)
1709 return;
1710
1711 spin_lock_irqsave(&memcg->move_lock, flags);
1712 if (memcg != page->mem_cgroup) {
1713 spin_unlock_irqrestore(&memcg->move_lock, flags);
1714 goto again;
1715 }
1716
1717 /*
1718 * When charge migration first begins, we can have locked and
1719 * unlocked page stat updates happening concurrently. Track
1720 * the task who has the lock for unlock_page_memcg().
1721 */
1722 memcg->move_lock_task = current;
1723 memcg->move_lock_flags = flags;
1724
1725 return;
1726 }
1727 EXPORT_SYMBOL(lock_page_memcg);
1728
1729 /**
1730 * unlock_page_memcg - unlock a page->mem_cgroup binding
1731 * @page: the page
1732 */
1733 void unlock_page_memcg(struct page *page)
1734 {
1735 struct mem_cgroup *memcg = page->mem_cgroup;
1736
1737 if (memcg && memcg->move_lock_task == current) {
1738 unsigned long flags = memcg->move_lock_flags;
1739
1740 memcg->move_lock_task = NULL;
1741 memcg->move_lock_flags = 0;
1742
1743 spin_unlock_irqrestore(&memcg->move_lock, flags);
1744 }
1745
1746 rcu_read_unlock();
1747 }
1748 EXPORT_SYMBOL(unlock_page_memcg);
1749
1750 /*
1751 * size of first charge trial. "32" comes from vmscan.c's magic value.
1752 * TODO: maybe necessary to use big numbers in big irons.
1753 */
1754 #define CHARGE_BATCH 32U
1755 struct memcg_stock_pcp {
1756 struct mem_cgroup *cached; /* this never be root cgroup */
1757 unsigned int nr_pages;
1758 struct work_struct work;
1759 unsigned long flags;
1760 #define FLUSHING_CACHED_CHARGE 0
1761 };
1762 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1763 static DEFINE_MUTEX(percpu_charge_mutex);
1764
1765 /**
1766 * consume_stock: Try to consume stocked charge on this cpu.
1767 * @memcg: memcg to consume from.
1768 * @nr_pages: how many pages to charge.
1769 *
1770 * The charges will only happen if @memcg matches the current cpu's memcg
1771 * stock, and at least @nr_pages are available in that stock. Failure to
1772 * service an allocation will refill the stock.
1773 *
1774 * returns true if successful, false otherwise.
1775 */
1776 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1777 {
1778 struct memcg_stock_pcp *stock;
1779 bool ret = false;
1780
1781 if (nr_pages > CHARGE_BATCH)
1782 return ret;
1783
1784 stock = &get_cpu_var(memcg_stock);
1785 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
1786 stock->nr_pages -= nr_pages;
1787 ret = true;
1788 }
1789 put_cpu_var(memcg_stock);
1790 return ret;
1791 }
1792
1793 /*
1794 * Returns stocks cached in percpu and reset cached information.
1795 */
1796 static void drain_stock(struct memcg_stock_pcp *stock)
1797 {
1798 struct mem_cgroup *old = stock->cached;
1799
1800 if (stock->nr_pages) {
1801 page_counter_uncharge(&old->memory, stock->nr_pages);
1802 if (do_memsw_account())
1803 page_counter_uncharge(&old->memsw, stock->nr_pages);
1804 css_put_many(&old->css, stock->nr_pages);
1805 stock->nr_pages = 0;
1806 }
1807 stock->cached = NULL;
1808 }
1809
1810 /*
1811 * This must be called under preempt disabled or must be called by
1812 * a thread which is pinned to local cpu.
1813 */
1814 static void drain_local_stock(struct work_struct *dummy)
1815 {
1816 struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
1817 drain_stock(stock);
1818 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1819 }
1820
1821 /*
1822 * Cache charges(val) to local per_cpu area.
1823 * This will be consumed by consume_stock() function, later.
1824 */
1825 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1826 {
1827 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
1828
1829 if (stock->cached != memcg) { /* reset if necessary */
1830 drain_stock(stock);
1831 stock->cached = memcg;
1832 }
1833 stock->nr_pages += nr_pages;
1834 put_cpu_var(memcg_stock);
1835 }
1836
1837 /*
1838 * Drains all per-CPU charge caches for given root_memcg resp. subtree
1839 * of the hierarchy under it.
1840 */
1841 static void drain_all_stock(struct mem_cgroup *root_memcg)
1842 {
1843 int cpu, curcpu;
1844
1845 /* If someone's already draining, avoid adding running more workers. */
1846 if (!mutex_trylock(&percpu_charge_mutex))
1847 return;
1848 /* Notify other cpus that system-wide "drain" is running */
1849 get_online_cpus();
1850 curcpu = get_cpu();
1851 for_each_online_cpu(cpu) {
1852 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1853 struct mem_cgroup *memcg;
1854
1855 memcg = stock->cached;
1856 if (!memcg || !stock->nr_pages)
1857 continue;
1858 if (!mem_cgroup_is_descendant(memcg, root_memcg))
1859 continue;
1860 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1861 if (cpu == curcpu)
1862 drain_local_stock(&stock->work);
1863 else
1864 schedule_work_on(cpu, &stock->work);
1865 }
1866 }
1867 put_cpu();
1868 put_online_cpus();
1869 mutex_unlock(&percpu_charge_mutex);
1870 }
1871
1872 static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
1873 unsigned long action,
1874 void *hcpu)
1875 {
1876 int cpu = (unsigned long)hcpu;
1877 struct memcg_stock_pcp *stock;
1878
1879 if (action == CPU_ONLINE)
1880 return NOTIFY_OK;
1881
1882 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1883 return NOTIFY_OK;
1884
1885 stock = &per_cpu(memcg_stock, cpu);
1886 drain_stock(stock);
1887 return NOTIFY_OK;
1888 }
1889
1890 static void reclaim_high(struct mem_cgroup *memcg,
1891 unsigned int nr_pages,
1892 gfp_t gfp_mask)
1893 {
1894 do {
1895 if (page_counter_read(&memcg->memory) <= memcg->high)
1896 continue;
1897 mem_cgroup_events(memcg, MEMCG_HIGH, 1);
1898 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
1899 } while ((memcg = parent_mem_cgroup(memcg)));
1900 }
1901
1902 static void high_work_func(struct work_struct *work)
1903 {
1904 struct mem_cgroup *memcg;
1905
1906 memcg = container_of(work, struct mem_cgroup, high_work);
1907 reclaim_high(memcg, CHARGE_BATCH, GFP_KERNEL);
1908 }
1909
1910 /*
1911 * Scheduled by try_charge() to be executed from the userland return path
1912 * and reclaims memory over the high limit.
1913 */
1914 void mem_cgroup_handle_over_high(void)
1915 {
1916 unsigned int nr_pages = current->memcg_nr_pages_over_high;
1917 struct mem_cgroup *memcg;
1918
1919 if (likely(!nr_pages))
1920 return;
1921
1922 memcg = get_mem_cgroup_from_mm(current->mm);
1923 reclaim_high(memcg, nr_pages, GFP_KERNEL);
1924 css_put(&memcg->css);
1925 current->memcg_nr_pages_over_high = 0;
1926 }
1927
1928 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
1929 unsigned int nr_pages)
1930 {
1931 unsigned int batch = max(CHARGE_BATCH, nr_pages);
1932 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1933 struct mem_cgroup *mem_over_limit;
1934 struct page_counter *counter;
1935 unsigned long nr_reclaimed;
1936 bool may_swap = true;
1937 bool drained = false;
1938
1939 if (mem_cgroup_is_root(memcg))
1940 return 0;
1941 retry:
1942 if (consume_stock(memcg, nr_pages))
1943 return 0;
1944
1945 if (!do_memsw_account() ||
1946 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
1947 if (page_counter_try_charge(&memcg->memory, batch, &counter))
1948 goto done_restock;
1949 if (do_memsw_account())
1950 page_counter_uncharge(&memcg->memsw, batch);
1951 mem_over_limit = mem_cgroup_from_counter(counter, memory);
1952 } else {
1953 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
1954 may_swap = false;
1955 }
1956
1957 if (batch > nr_pages) {
1958 batch = nr_pages;
1959 goto retry;
1960 }
1961
1962 /*
1963 * Unlike in global OOM situations, memcg is not in a physical
1964 * memory shortage. Allow dying and OOM-killed tasks to
1965 * bypass the last charges so that they can exit quickly and
1966 * free their memory.
1967 */
1968 if (unlikely(test_thread_flag(TIF_MEMDIE) ||
1969 fatal_signal_pending(current) ||
1970 current->flags & PF_EXITING))
1971 goto force;
1972
1973 if (unlikely(task_in_memcg_oom(current)))
1974 goto nomem;
1975
1976 if (!gfpflags_allow_blocking(gfp_mask))
1977 goto nomem;
1978
1979 mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1);
1980
1981 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
1982 gfp_mask, may_swap);
1983
1984 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
1985 goto retry;
1986
1987 if (!drained) {
1988 drain_all_stock(mem_over_limit);
1989 drained = true;
1990 goto retry;
1991 }
1992
1993 if (gfp_mask & __GFP_NORETRY)
1994 goto nomem;
1995 /*
1996 * Even though the limit is exceeded at this point, reclaim
1997 * may have been able to free some pages. Retry the charge
1998 * before killing the task.
1999 *
2000 * Only for regular pages, though: huge pages are rather
2001 * unlikely to succeed so close to the limit, and we fall back
2002 * to regular pages anyway in case of failure.
2003 */
2004 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2005 goto retry;
2006 /*
2007 * At task move, charge accounts can be doubly counted. So, it's
2008 * better to wait until the end of task_move if something is going on.
2009 */
2010 if (mem_cgroup_wait_acct_move(mem_over_limit))
2011 goto retry;
2012
2013 if (nr_retries--)
2014 goto retry;
2015
2016 if (gfp_mask & __GFP_NOFAIL)
2017 goto force;
2018
2019 if (fatal_signal_pending(current))
2020 goto force;
2021
2022 mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1);
2023
2024 mem_cgroup_oom(mem_over_limit, gfp_mask,
2025 get_order(nr_pages * PAGE_SIZE));
2026 nomem:
2027 if (!(gfp_mask & __GFP_NOFAIL))
2028 return -ENOMEM;
2029 force:
2030 /*
2031 * The allocation either can't fail or will lead to more memory
2032 * being freed very soon. Allow memory usage go over the limit
2033 * temporarily by force charging it.
2034 */
2035 page_counter_charge(&memcg->memory, nr_pages);
2036 if (do_memsw_account())
2037 page_counter_charge(&memcg->memsw, nr_pages);
2038 css_get_many(&memcg->css, nr_pages);
2039
2040 return 0;
2041
2042 done_restock:
2043 css_get_many(&memcg->css, batch);
2044 if (batch > nr_pages)
2045 refill_stock(memcg, batch - nr_pages);
2046
2047 /*
2048 * If the hierarchy is above the normal consumption range, schedule
2049 * reclaim on returning to userland. We can perform reclaim here
2050 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2051 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2052 * not recorded as it most likely matches current's and won't
2053 * change in the meantime. As high limit is checked again before
2054 * reclaim, the cost of mismatch is negligible.
2055 */
2056 do {
2057 if (page_counter_read(&memcg->memory) > memcg->high) {
2058 /* Don't bother a random interrupted task */
2059 if (in_interrupt()) {
2060 schedule_work(&memcg->high_work);
2061 break;
2062 }
2063 current->memcg_nr_pages_over_high += batch;
2064 set_notify_resume(current);
2065 break;
2066 }
2067 } while ((memcg = parent_mem_cgroup(memcg)));
2068
2069 return 0;
2070 }
2071
2072 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2073 {
2074 if (mem_cgroup_is_root(memcg))
2075 return;
2076
2077 page_counter_uncharge(&memcg->memory, nr_pages);
2078 if (do_memsw_account())
2079 page_counter_uncharge(&memcg->memsw, nr_pages);
2080
2081 css_put_many(&memcg->css, nr_pages);
2082 }
2083
2084 static void lock_page_lru(struct page *page, int *isolated)
2085 {
2086 struct zone *zone = page_zone(page);
2087
2088 spin_lock_irq(&zone->lru_lock);
2089 if (PageLRU(page)) {
2090 struct lruvec *lruvec;
2091
2092 lruvec = mem_cgroup_page_lruvec(page, zone);
2093 ClearPageLRU(page);
2094 del_page_from_lru_list(page, lruvec, page_lru(page));
2095 *isolated = 1;
2096 } else
2097 *isolated = 0;
2098 }
2099
2100 static void unlock_page_lru(struct page *page, int isolated)
2101 {
2102 struct zone *zone = page_zone(page);
2103
2104 if (isolated) {
2105 struct lruvec *lruvec;
2106
2107 lruvec = mem_cgroup_page_lruvec(page, zone);
2108 VM_BUG_ON_PAGE(PageLRU(page), page);
2109 SetPageLRU(page);
2110 add_page_to_lru_list(page, lruvec, page_lru(page));
2111 }
2112 spin_unlock_irq(&zone->lru_lock);
2113 }
2114
2115 static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2116 bool lrucare)
2117 {
2118 int isolated;
2119
2120 VM_BUG_ON_PAGE(page->mem_cgroup, page);
2121
2122 /*
2123 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2124 * may already be on some other mem_cgroup's LRU. Take care of it.
2125 */
2126 if (lrucare)
2127 lock_page_lru(page, &isolated);
2128
2129 /*
2130 * Nobody should be changing or seriously looking at
2131 * page->mem_cgroup at this point:
2132 *
2133 * - the page is uncharged
2134 *
2135 * - the page is off-LRU
2136 *
2137 * - an anonymous fault has exclusive page access, except for
2138 * a locked page table
2139 *
2140 * - a page cache insertion, a swapin fault, or a migration
2141 * have the page locked
2142 */
2143 page->mem_cgroup = memcg;
2144
2145 if (lrucare)
2146 unlock_page_lru(page, isolated);
2147 }
2148
2149 #ifndef CONFIG_SLOB
2150 static int memcg_alloc_cache_id(void)
2151 {
2152 int id, size;
2153 int err;
2154
2155 id = ida_simple_get(&memcg_cache_ida,
2156 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2157 if (id < 0)
2158 return id;
2159
2160 if (id < memcg_nr_cache_ids)
2161 return id;
2162
2163 /*
2164 * There's no space for the new id in memcg_caches arrays,
2165 * so we have to grow them.
2166 */
2167 down_write(&memcg_cache_ids_sem);
2168
2169 size = 2 * (id + 1);
2170 if (size < MEMCG_CACHES_MIN_SIZE)
2171 size = MEMCG_CACHES_MIN_SIZE;
2172 else if (size > MEMCG_CACHES_MAX_SIZE)
2173 size = MEMCG_CACHES_MAX_SIZE;
2174
2175 err = memcg_update_all_caches(size);
2176 if (!err)
2177 err = memcg_update_all_list_lrus(size);
2178 if (!err)
2179 memcg_nr_cache_ids = size;
2180
2181 up_write(&memcg_cache_ids_sem);
2182
2183 if (err) {
2184 ida_simple_remove(&memcg_cache_ida, id);
2185 return err;
2186 }
2187 return id;
2188 }
2189
2190 static void memcg_free_cache_id(int id)
2191 {
2192 ida_simple_remove(&memcg_cache_ida, id);
2193 }
2194
2195 struct memcg_kmem_cache_create_work {
2196 struct mem_cgroup *memcg;
2197 struct kmem_cache *cachep;
2198 struct work_struct work;
2199 };
2200
2201 static void memcg_kmem_cache_create_func(struct work_struct *w)
2202 {
2203 struct memcg_kmem_cache_create_work *cw =
2204 container_of(w, struct memcg_kmem_cache_create_work, work);
2205 struct mem_cgroup *memcg = cw->memcg;
2206 struct kmem_cache *cachep = cw->cachep;
2207
2208 memcg_create_kmem_cache(memcg, cachep);
2209
2210 css_put(&memcg->css);
2211 kfree(cw);
2212 }
2213
2214 /*
2215 * Enqueue the creation of a per-memcg kmem_cache.
2216 */
2217 static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2218 struct kmem_cache *cachep)
2219 {
2220 struct memcg_kmem_cache_create_work *cw;
2221
2222 cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
2223 if (!cw)
2224 return;
2225
2226 css_get(&memcg->css);
2227
2228 cw->memcg = memcg;
2229 cw->cachep = cachep;
2230 INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
2231
2232 schedule_work(&cw->work);
2233 }
2234
2235 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2236 struct kmem_cache *cachep)
2237 {
2238 /*
2239 * We need to stop accounting when we kmalloc, because if the
2240 * corresponding kmalloc cache is not yet created, the first allocation
2241 * in __memcg_schedule_kmem_cache_create will recurse.
2242 *
2243 * However, it is better to enclose the whole function. Depending on
2244 * the debugging options enabled, INIT_WORK(), for instance, can
2245 * trigger an allocation. This too, will make us recurse. Because at
2246 * this point we can't allow ourselves back into memcg_kmem_get_cache,
2247 * the safest choice is to do it like this, wrapping the whole function.
2248 */
2249 current->memcg_kmem_skip_account = 1;
2250 __memcg_schedule_kmem_cache_create(memcg, cachep);
2251 current->memcg_kmem_skip_account = 0;
2252 }
2253
2254 /*
2255 * Return the kmem_cache we're supposed to use for a slab allocation.
2256 * We try to use the current memcg's version of the cache.
2257 *
2258 * If the cache does not exist yet, if we are the first user of it,
2259 * we either create it immediately, if possible, or create it asynchronously
2260 * in a workqueue.
2261 * In the latter case, we will let the current allocation go through with
2262 * the original cache.
2263 *
2264 * Can't be called in interrupt context or from kernel threads.
2265 * This function needs to be called with rcu_read_lock() held.
2266 */
2267 struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
2268 {
2269 struct mem_cgroup *memcg;
2270 struct kmem_cache *memcg_cachep;
2271 int kmemcg_id;
2272
2273 VM_BUG_ON(!is_root_cache(cachep));
2274
2275 if (cachep->flags & SLAB_ACCOUNT)
2276 gfp |= __GFP_ACCOUNT;
2277
2278 if (!(gfp & __GFP_ACCOUNT))
2279 return cachep;
2280
2281 if (current->memcg_kmem_skip_account)
2282 return cachep;
2283
2284 memcg = get_mem_cgroup_from_mm(current->mm);
2285 kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2286 if (kmemcg_id < 0)
2287 goto out;
2288
2289 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
2290 if (likely(memcg_cachep))
2291 return memcg_cachep;
2292
2293 /*
2294 * If we are in a safe context (can wait, and not in interrupt
2295 * context), we could be be predictable and return right away.
2296 * This would guarantee that the allocation being performed
2297 * already belongs in the new cache.
2298 *
2299 * However, there are some clashes that can arrive from locking.
2300 * For instance, because we acquire the slab_mutex while doing
2301 * memcg_create_kmem_cache, this means no further allocation
2302 * could happen with the slab_mutex held. So it's better to
2303 * defer everything.
2304 */
2305 memcg_schedule_kmem_cache_create(memcg, cachep);
2306 out:
2307 css_put(&memcg->css);
2308 return cachep;
2309 }
2310
2311 void __memcg_kmem_put_cache(struct kmem_cache *cachep)
2312 {
2313 if (!is_root_cache(cachep))
2314 css_put(&cachep->memcg_params.memcg->css);
2315 }
2316
2317 int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2318 struct mem_cgroup *memcg)
2319 {
2320 unsigned int nr_pages = 1 << order;
2321 struct page_counter *counter;
2322 int ret;
2323
2324 ret = try_charge(memcg, gfp, nr_pages);
2325 if (ret)
2326 return ret;
2327
2328 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2329 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2330 cancel_charge(memcg, nr_pages);
2331 return -ENOMEM;
2332 }
2333
2334 page->mem_cgroup = memcg;
2335
2336 return 0;
2337 }
2338
2339 int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
2340 {
2341 struct mem_cgroup *memcg;
2342 int ret = 0;
2343
2344 memcg = get_mem_cgroup_from_mm(current->mm);
2345 if (!mem_cgroup_is_root(memcg))
2346 ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg);
2347 css_put(&memcg->css);
2348 return ret;
2349 }
2350
2351 void __memcg_kmem_uncharge(struct page *page, int order)
2352 {
2353 struct mem_cgroup *memcg = page->mem_cgroup;
2354 unsigned int nr_pages = 1 << order;
2355
2356 if (!memcg)
2357 return;
2358
2359 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
2360
2361 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2362 page_counter_uncharge(&memcg->kmem, nr_pages);
2363
2364 page_counter_uncharge(&memcg->memory, nr_pages);
2365 if (do_memsw_account())
2366 page_counter_uncharge(&memcg->memsw, nr_pages);
2367
2368 page->mem_cgroup = NULL;
2369 css_put_many(&memcg->css, nr_pages);
2370 }
2371 #endif /* !CONFIG_SLOB */
2372
2373 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2374
2375 /*
2376 * Because tail pages are not marked as "used", set it. We're under
2377 * zone->lru_lock and migration entries setup in all page mappings.
2378 */
2379 void mem_cgroup_split_huge_fixup(struct page *head)
2380 {
2381 int i;
2382
2383 if (mem_cgroup_disabled())
2384 return;
2385
2386 for (i = 1; i < HPAGE_PMD_NR; i++)
2387 head[i].mem_cgroup = head->mem_cgroup;
2388
2389 __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
2390 HPAGE_PMD_NR);
2391 }
2392 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2393
2394 #ifdef CONFIG_MEMCG_SWAP
2395 static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
2396 bool charge)
2397 {
2398 int val = (charge) ? 1 : -1;
2399 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
2400 }
2401
2402 /**
2403 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2404 * @entry: swap entry to be moved
2405 * @from: mem_cgroup which the entry is moved from
2406 * @to: mem_cgroup which the entry is moved to
2407 *
2408 * It succeeds only when the swap_cgroup's record for this entry is the same
2409 * as the mem_cgroup's id of @from.
2410 *
2411 * Returns 0 on success, -EINVAL on failure.
2412 *
2413 * The caller must have charged to @to, IOW, called page_counter_charge() about
2414 * both res and memsw, and called css_get().
2415 */
2416 static int mem_cgroup_move_swap_account(swp_entry_t entry,
2417 struct mem_cgroup *from, struct mem_cgroup *to)
2418 {
2419 unsigned short old_id, new_id;
2420
2421 old_id = mem_cgroup_id(from);
2422 new_id = mem_cgroup_id(to);
2423
2424 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2425 mem_cgroup_swap_statistics(from, false);
2426 mem_cgroup_swap_statistics(to, true);
2427 return 0;
2428 }
2429 return -EINVAL;
2430 }
2431 #else
2432 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2433 struct mem_cgroup *from, struct mem_cgroup *to)
2434 {
2435 return -EINVAL;
2436 }
2437 #endif
2438
2439 static DEFINE_MUTEX(memcg_limit_mutex);
2440
2441 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2442 unsigned long limit)
2443 {
2444 unsigned long curusage;
2445 unsigned long oldusage;
2446 bool enlarge = false;
2447 int retry_count;
2448 int ret;
2449
2450 /*
2451 * For keeping hierarchical_reclaim simple, how long we should retry
2452 * is depends on callers. We set our retry-count to be function
2453 * of # of children which we should visit in this loop.
2454 */
2455 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2456 mem_cgroup_count_children(memcg);
2457
2458 oldusage = page_counter_read(&memcg->memory);
2459
2460 do {
2461 if (signal_pending(current)) {
2462 ret = -EINTR;
2463 break;
2464 }
2465
2466 mutex_lock(&memcg_limit_mutex);
2467 if (limit > memcg->memsw.limit) {
2468 mutex_unlock(&memcg_limit_mutex);
2469 ret = -EINVAL;
2470 break;
2471 }
2472 if (limit > memcg->memory.limit)
2473 enlarge = true;
2474 ret = page_counter_limit(&memcg->memory, limit);
2475 mutex_unlock(&memcg_limit_mutex);
2476
2477 if (!ret)
2478 break;
2479
2480 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
2481
2482 curusage = page_counter_read(&memcg->memory);
2483 /* Usage is reduced ? */
2484 if (curusage >= oldusage)
2485 retry_count--;
2486 else
2487 oldusage = curusage;
2488 } while (retry_count);
2489
2490 if (!ret && enlarge)
2491 memcg_oom_recover(memcg);
2492
2493 return ret;
2494 }
2495
2496 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2497 unsigned long limit)
2498 {
2499 unsigned long curusage;
2500 unsigned long oldusage;
2501 bool enlarge = false;
2502 int retry_count;
2503 int ret;
2504
2505 /* see mem_cgroup_resize_res_limit */
2506 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2507 mem_cgroup_count_children(memcg);
2508
2509 oldusage = page_counter_read(&memcg->memsw);
2510
2511 do {
2512 if (signal_pending(current)) {
2513 ret = -EINTR;
2514 break;
2515 }
2516
2517 mutex_lock(&memcg_limit_mutex);
2518 if (limit < memcg->memory.limit) {
2519 mutex_unlock(&memcg_limit_mutex);
2520 ret = -EINVAL;
2521 break;
2522 }
2523 if (limit > memcg->memsw.limit)
2524 enlarge = true;
2525 ret = page_counter_limit(&memcg->memsw, limit);
2526 mutex_unlock(&memcg_limit_mutex);
2527
2528 if (!ret)
2529 break;
2530
2531 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
2532
2533 curusage = page_counter_read(&memcg->memsw);
2534 /* Usage is reduced ? */
2535 if (curusage >= oldusage)
2536 retry_count--;
2537 else
2538 oldusage = curusage;
2539 } while (retry_count);
2540
2541 if (!ret && enlarge)
2542 memcg_oom_recover(memcg);
2543
2544 return ret;
2545 }
2546
2547 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2548 gfp_t gfp_mask,
2549 unsigned long *total_scanned)
2550 {
2551 unsigned long nr_reclaimed = 0;
2552 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
2553 unsigned long reclaimed;
2554 int loop = 0;
2555 struct mem_cgroup_tree_per_zone *mctz;
2556 unsigned long excess;
2557 unsigned long nr_scanned;
2558
2559 if (order > 0)
2560 return 0;
2561
2562 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
2563 /*
2564 * This loop can run a while, specially if mem_cgroup's continuously
2565 * keep exceeding their soft limit and putting the system under
2566 * pressure
2567 */
2568 do {
2569 if (next_mz)
2570 mz = next_mz;
2571 else
2572 mz = mem_cgroup_largest_soft_limit_node(mctz);
2573 if (!mz)
2574 break;
2575
2576 nr_scanned = 0;
2577 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
2578 gfp_mask, &nr_scanned);
2579 nr_reclaimed += reclaimed;
2580 *total_scanned += nr_scanned;
2581 spin_lock_irq(&mctz->lock);
2582 __mem_cgroup_remove_exceeded(mz, mctz);
2583
2584 /*
2585 * If we failed to reclaim anything from this memory cgroup
2586 * it is time to move on to the next cgroup
2587 */
2588 next_mz = NULL;
2589 if (!reclaimed)
2590 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
2591
2592 excess = soft_limit_excess(mz->memcg);
2593 /*
2594 * One school of thought says that we should not add
2595 * back the node to the tree if reclaim returns 0.
2596 * But our reclaim could return 0, simply because due
2597 * to priority we are exposing a smaller subset of
2598 * memory to reclaim from. Consider this as a longer
2599 * term TODO.
2600 */
2601 /* If excess == 0, no tree ops */
2602 __mem_cgroup_insert_exceeded(mz, mctz, excess);
2603 spin_unlock_irq(&mctz->lock);
2604 css_put(&mz->memcg->css);
2605 loop++;
2606 /*
2607 * Could not reclaim anything and there are no more
2608 * mem cgroups to try or we seem to be looping without
2609 * reclaiming anything.
2610 */
2611 if (!nr_reclaimed &&
2612 (next_mz == NULL ||
2613 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2614 break;
2615 } while (!nr_reclaimed);
2616 if (next_mz)
2617 css_put(&next_mz->memcg->css);
2618 return nr_reclaimed;
2619 }
2620
2621 /*
2622 * Test whether @memcg has children, dead or alive. Note that this
2623 * function doesn't care whether @memcg has use_hierarchy enabled and
2624 * returns %true if there are child csses according to the cgroup
2625 * hierarchy. Testing use_hierarchy is the caller's responsiblity.
2626 */
2627 static inline bool memcg_has_children(struct mem_cgroup *memcg)
2628 {
2629 bool ret;
2630
2631 rcu_read_lock();
2632 ret = css_next_child(NULL, &memcg->css);
2633 rcu_read_unlock();
2634 return ret;
2635 }
2636
2637 /*
2638 * Reclaims as many pages from the given memcg as possible and moves
2639 * the rest to the parent.
2640 *
2641 * Caller is responsible for holding css reference for memcg.
2642 */
2643 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
2644 {
2645 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2646
2647 /* we call try-to-free pages for make this cgroup empty */
2648 lru_add_drain_all();
2649 /* try to free all pages in this cgroup */
2650 while (nr_retries && page_counter_read(&memcg->memory)) {
2651 int progress;
2652
2653 if (signal_pending(current))
2654 return -EINTR;
2655
2656 progress = try_to_free_mem_cgroup_pages(memcg, 1,
2657 GFP_KERNEL, true);
2658 if (!progress) {
2659 nr_retries--;
2660 /* maybe some writeback is necessary */
2661 congestion_wait(BLK_RW_ASYNC, HZ/10);
2662 }
2663
2664 }
2665
2666 return 0;
2667 }
2668
2669 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
2670 char *buf, size_t nbytes,
2671 loff_t off)
2672 {
2673 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2674
2675 if (mem_cgroup_is_root(memcg))
2676 return -EINVAL;
2677 return mem_cgroup_force_empty(memcg) ?: nbytes;
2678 }
2679
2680 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
2681 struct cftype *cft)
2682 {
2683 return mem_cgroup_from_css(css)->use_hierarchy;
2684 }
2685
2686 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
2687 struct cftype *cft, u64 val)
2688 {
2689 int retval = 0;
2690 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2691 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
2692
2693 if (memcg->use_hierarchy == val)
2694 return 0;
2695
2696 /*
2697 * If parent's use_hierarchy is set, we can't make any modifications
2698 * in the child subtrees. If it is unset, then the change can
2699 * occur, provided the current cgroup has no children.
2700 *
2701 * For the root cgroup, parent_mem is NULL, we allow value to be
2702 * set if there are no children.
2703 */
2704 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
2705 (val == 1 || val == 0)) {
2706 if (!memcg_has_children(memcg))
2707 memcg->use_hierarchy = val;
2708 else
2709 retval = -EBUSY;
2710 } else
2711 retval = -EINVAL;
2712
2713 return retval;
2714 }
2715
2716 static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat)
2717 {
2718 struct mem_cgroup *iter;
2719 int i;
2720
2721 memset(stat, 0, sizeof(*stat) * MEMCG_NR_STAT);
2722
2723 for_each_mem_cgroup_tree(iter, memcg) {
2724 for (i = 0; i < MEMCG_NR_STAT; i++)
2725 stat[i] += mem_cgroup_read_stat(iter, i);
2726 }
2727 }
2728
2729 static void tree_events(struct mem_cgroup *memcg, unsigned long *events)
2730 {
2731 struct mem_cgroup *iter;
2732 int i;
2733
2734 memset(events, 0, sizeof(*events) * MEMCG_NR_EVENTS);
2735
2736 for_each_mem_cgroup_tree(iter, memcg) {
2737 for (i = 0; i < MEMCG_NR_EVENTS; i++)
2738 events[i] += mem_cgroup_read_events(iter, i);
2739 }
2740 }
2741
2742 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
2743 {
2744 unsigned long val = 0;
2745
2746 if (mem_cgroup_is_root(memcg)) {
2747 struct mem_cgroup *iter;
2748
2749 for_each_mem_cgroup_tree(iter, memcg) {
2750 val += mem_cgroup_read_stat(iter,
2751 MEM_CGROUP_STAT_CACHE);
2752 val += mem_cgroup_read_stat(iter,
2753 MEM_CGROUP_STAT_RSS);
2754 if (swap)
2755 val += mem_cgroup_read_stat(iter,
2756 MEM_CGROUP_STAT_SWAP);
2757 }
2758 } else {
2759 if (!swap)
2760 val = page_counter_read(&memcg->memory);
2761 else
2762 val = page_counter_read(&memcg->memsw);
2763 }
2764 return val;
2765 }
2766
2767 enum {
2768 RES_USAGE,
2769 RES_LIMIT,
2770 RES_MAX_USAGE,
2771 RES_FAILCNT,
2772 RES_SOFT_LIMIT,
2773 };
2774
2775 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
2776 struct cftype *cft)
2777 {
2778 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2779 struct page_counter *counter;
2780
2781 switch (MEMFILE_TYPE(cft->private)) {
2782 case _MEM:
2783 counter = &memcg->memory;
2784 break;
2785 case _MEMSWAP:
2786 counter = &memcg->memsw;
2787 break;
2788 case _KMEM:
2789 counter = &memcg->kmem;
2790 break;
2791 case _TCP:
2792 counter = &memcg->tcpmem;
2793 break;
2794 default:
2795 BUG();
2796 }
2797
2798 switch (MEMFILE_ATTR(cft->private)) {
2799 case RES_USAGE:
2800 if (counter == &memcg->memory)
2801 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
2802 if (counter == &memcg->memsw)
2803 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
2804 return (u64)page_counter_read(counter) * PAGE_SIZE;
2805 case RES_LIMIT:
2806 return (u64)counter->limit * PAGE_SIZE;
2807 case RES_MAX_USAGE:
2808 return (u64)counter->watermark * PAGE_SIZE;
2809 case RES_FAILCNT:
2810 return counter->failcnt;
2811 case RES_SOFT_LIMIT:
2812 return (u64)memcg->soft_limit * PAGE_SIZE;
2813 default:
2814 BUG();
2815 }
2816 }
2817
2818 #ifndef CONFIG_SLOB
2819 static int memcg_online_kmem(struct mem_cgroup *memcg)
2820 {
2821 int memcg_id;
2822
2823 if (cgroup_memory_nokmem)
2824 return 0;
2825
2826 BUG_ON(memcg->kmemcg_id >= 0);
2827 BUG_ON(memcg->kmem_state);
2828
2829 memcg_id = memcg_alloc_cache_id();
2830 if (memcg_id < 0)
2831 return memcg_id;
2832
2833 static_branch_inc(&memcg_kmem_enabled_key);
2834 /*
2835 * A memory cgroup is considered kmem-online as soon as it gets
2836 * kmemcg_id. Setting the id after enabling static branching will
2837 * guarantee no one starts accounting before all call sites are
2838 * patched.
2839 */
2840 memcg->kmemcg_id = memcg_id;
2841 memcg->kmem_state = KMEM_ONLINE;
2842
2843 return 0;
2844 }
2845
2846 static void memcg_offline_kmem(struct mem_cgroup *memcg)
2847 {
2848 struct cgroup_subsys_state *css;
2849 struct mem_cgroup *parent, *child;
2850 int kmemcg_id;
2851
2852 if (memcg->kmem_state != KMEM_ONLINE)
2853 return;
2854 /*
2855 * Clear the online state before clearing memcg_caches array
2856 * entries. The slab_mutex in memcg_deactivate_kmem_caches()
2857 * guarantees that no cache will be created for this cgroup
2858 * after we are done (see memcg_create_kmem_cache()).
2859 */
2860 memcg->kmem_state = KMEM_ALLOCATED;
2861
2862 memcg_deactivate_kmem_caches(memcg);
2863
2864 kmemcg_id = memcg->kmemcg_id;
2865 BUG_ON(kmemcg_id < 0);
2866
2867 parent = parent_mem_cgroup(memcg);
2868 if (!parent)
2869 parent = root_mem_cgroup;
2870
2871 /*
2872 * Change kmemcg_id of this cgroup and all its descendants to the
2873 * parent's id, and then move all entries from this cgroup's list_lrus
2874 * to ones of the parent. After we have finished, all list_lrus
2875 * corresponding to this cgroup are guaranteed to remain empty. The
2876 * ordering is imposed by list_lru_node->lock taken by
2877 * memcg_drain_all_list_lrus().
2878 */
2879 css_for_each_descendant_pre(css, &memcg->css) {
2880 child = mem_cgroup_from_css(css);
2881 BUG_ON(child->kmemcg_id != kmemcg_id);
2882 child->kmemcg_id = parent->kmemcg_id;
2883 if (!memcg->use_hierarchy)
2884 break;
2885 }
2886 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
2887
2888 memcg_free_cache_id(kmemcg_id);
2889 }
2890
2891 static void memcg_free_kmem(struct mem_cgroup *memcg)
2892 {
2893 /* css_alloc() failed, offlining didn't happen */
2894 if (unlikely(memcg->kmem_state == KMEM_ONLINE))
2895 memcg_offline_kmem(memcg);
2896
2897 if (memcg->kmem_state == KMEM_ALLOCATED) {
2898 memcg_destroy_kmem_caches(memcg);
2899 static_branch_dec(&memcg_kmem_enabled_key);
2900 WARN_ON(page_counter_read(&memcg->kmem));
2901 }
2902 }
2903 #else
2904 static int memcg_online_kmem(struct mem_cgroup *memcg)
2905 {
2906 return 0;
2907 }
2908 static void memcg_offline_kmem(struct mem_cgroup *memcg)
2909 {
2910 }
2911 static void memcg_free_kmem(struct mem_cgroup *memcg)
2912 {
2913 }
2914 #endif /* !CONFIG_SLOB */
2915
2916 static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
2917 unsigned long limit)
2918 {
2919 int ret;
2920
2921 mutex_lock(&memcg_limit_mutex);
2922 ret = page_counter_limit(&memcg->kmem, limit);
2923 mutex_unlock(&memcg_limit_mutex);
2924 return ret;
2925 }
2926
2927 static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
2928 {
2929 int ret;
2930
2931 mutex_lock(&memcg_limit_mutex);
2932
2933 ret = page_counter_limit(&memcg->tcpmem, limit);
2934 if (ret)
2935 goto out;
2936
2937 if (!memcg->tcpmem_active) {
2938 /*
2939 * The active flag needs to be written after the static_key
2940 * update. This is what guarantees that the socket activation
2941 * function is the last one to run. See sock_update_memcg() for
2942 * details, and note that we don't mark any socket as belonging
2943 * to this memcg until that flag is up.
2944 *
2945 * We need to do this, because static_keys will span multiple
2946 * sites, but we can't control their order. If we mark a socket
2947 * as accounted, but the accounting functions are not patched in
2948 * yet, we'll lose accounting.
2949 *
2950 * We never race with the readers in sock_update_memcg(),
2951 * because when this value change, the code to process it is not
2952 * patched in yet.
2953 */
2954 static_branch_inc(&memcg_sockets_enabled_key);
2955 memcg->tcpmem_active = true;
2956 }
2957 out:
2958 mutex_unlock(&memcg_limit_mutex);
2959 return ret;
2960 }
2961
2962 /*
2963 * The user of this function is...
2964 * RES_LIMIT.
2965 */
2966 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
2967 char *buf, size_t nbytes, loff_t off)
2968 {
2969 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2970 unsigned long nr_pages;
2971 int ret;
2972
2973 buf = strstrip(buf);
2974 ret = page_counter_memparse(buf, "-1", &nr_pages);
2975 if (ret)
2976 return ret;
2977
2978 switch (MEMFILE_ATTR(of_cft(of)->private)) {
2979 case RES_LIMIT:
2980 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
2981 ret = -EINVAL;
2982 break;
2983 }
2984 switch (MEMFILE_TYPE(of_cft(of)->private)) {
2985 case _MEM:
2986 ret = mem_cgroup_resize_limit(memcg, nr_pages);
2987 break;
2988 case _MEMSWAP:
2989 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages);
2990 break;
2991 case _KMEM:
2992 ret = memcg_update_kmem_limit(memcg, nr_pages);
2993 break;
2994 case _TCP:
2995 ret = memcg_update_tcp_limit(memcg, nr_pages);
2996 break;
2997 }
2998 break;
2999 case RES_SOFT_LIMIT:
3000 memcg->soft_limit = nr_pages;
3001 ret = 0;
3002 break;
3003 }
3004 return ret ?: nbytes;
3005 }
3006
3007 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3008 size_t nbytes, loff_t off)
3009 {
3010 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3011 struct page_counter *counter;
3012
3013 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3014 case _MEM:
3015 counter = &memcg->memory;
3016 break;
3017 case _MEMSWAP:
3018 counter = &memcg->memsw;
3019 break;
3020 case _KMEM:
3021 counter = &memcg->kmem;
3022 break;
3023 case _TCP:
3024 counter = &memcg->tcpmem;
3025 break;
3026 default:
3027 BUG();
3028 }
3029
3030 switch (MEMFILE_ATTR(of_cft(of)->private)) {
3031 case RES_MAX_USAGE:
3032 page_counter_reset_watermark(counter);
3033 break;
3034 case RES_FAILCNT:
3035 counter->failcnt = 0;
3036 break;
3037 default:
3038 BUG();
3039 }
3040
3041 return nbytes;
3042 }
3043
3044 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3045 struct cftype *cft)
3046 {
3047 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3048 }
3049
3050 #ifdef CONFIG_MMU
3051 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3052 struct cftype *cft, u64 val)
3053 {
3054 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3055
3056 if (val & ~MOVE_MASK)
3057 return -EINVAL;
3058
3059 /*
3060 * No kind of locking is needed in here, because ->can_attach() will
3061 * check this value once in the beginning of the process, and then carry
3062 * on with stale data. This means that changes to this value will only
3063 * affect task migrations starting after the change.
3064 */
3065 memcg->move_charge_at_immigrate = val;
3066 return 0;
3067 }
3068 #else
3069 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3070 struct cftype *cft, u64 val)
3071 {
3072 return -ENOSYS;
3073 }
3074 #endif
3075
3076 #ifdef CONFIG_NUMA
3077 static int memcg_numa_stat_show(struct seq_file *m, void *v)
3078 {
3079 struct numa_stat {
3080 const char *name;
3081 unsigned int lru_mask;
3082 };
3083
3084 static const struct numa_stat stats[] = {
3085 { "total", LRU_ALL },
3086 { "file", LRU_ALL_FILE },
3087 { "anon", LRU_ALL_ANON },
3088 { "unevictable", BIT(LRU_UNEVICTABLE) },
3089 };
3090 const struct numa_stat *stat;
3091 int nid;
3092 unsigned long nr;
3093 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3094
3095 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3096 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3097 seq_printf(m, "%s=%lu", stat->name, nr);
3098 for_each_node_state(nid, N_MEMORY) {
3099 nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3100 stat->lru_mask);
3101 seq_printf(m, " N%d=%lu", nid, nr);
3102 }
3103 seq_putc(m, '\n');
3104 }
3105
3106 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3107 struct mem_cgroup *iter;
3108
3109 nr = 0;
3110 for_each_mem_cgroup_tree(iter, memcg)
3111 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3112 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3113 for_each_node_state(nid, N_MEMORY) {
3114 nr = 0;
3115 for_each_mem_cgroup_tree(iter, memcg)
3116 nr += mem_cgroup_node_nr_lru_pages(
3117 iter, nid, stat->lru_mask);
3118 seq_printf(m, " N%d=%lu", nid, nr);
3119 }
3120 seq_putc(m, '\n');
3121 }
3122
3123 return 0;
3124 }
3125 #endif /* CONFIG_NUMA */
3126
3127 static int memcg_stat_show(struct seq_file *m, void *v)
3128 {
3129 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3130 unsigned long memory, memsw;
3131 struct mem_cgroup *mi;
3132 unsigned int i;
3133
3134 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) !=
3135 MEM_CGROUP_STAT_NSTATS);
3136 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) !=
3137 MEM_CGROUP_EVENTS_NSTATS);
3138 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3139
3140 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3141 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
3142 continue;
3143 seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
3144 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
3145 }
3146
3147 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
3148 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
3149 mem_cgroup_read_events(memcg, i));
3150
3151 for (i = 0; i < NR_LRU_LISTS; i++)
3152 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3153 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
3154
3155 /* Hierarchical information */
3156 memory = memsw = PAGE_COUNTER_MAX;
3157 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3158 memory = min(memory, mi->memory.limit);
3159 memsw = min(memsw, mi->memsw.limit);
3160 }
3161 seq_printf(m, "hierarchical_memory_limit %llu\n",
3162 (u64)memory * PAGE_SIZE);
3163 if (do_memsw_account())
3164 seq_printf(m, "hierarchical_memsw_limit %llu\n",
3165 (u64)memsw * PAGE_SIZE);
3166
3167 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3168 unsigned long long val = 0;
3169
3170 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
3171 continue;
3172 for_each_mem_cgroup_tree(mi, memcg)
3173 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
3174 seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
3175 }
3176
3177 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
3178 unsigned long long val = 0;
3179
3180 for_each_mem_cgroup_tree(mi, memcg)
3181 val += mem_cgroup_read_events(mi, i);
3182 seq_printf(m, "total_%s %llu\n",
3183 mem_cgroup_events_names[i], val);
3184 }
3185
3186 for (i = 0; i < NR_LRU_LISTS; i++) {
3187 unsigned long long val = 0;
3188
3189 for_each_mem_cgroup_tree(mi, memcg)
3190 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
3191 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
3192 }
3193
3194 #ifdef CONFIG_DEBUG_VM
3195 {
3196 int nid, zid;
3197 struct mem_cgroup_per_zone *mz;
3198 struct zone_reclaim_stat *rstat;
3199 unsigned long recent_rotated[2] = {0, 0};
3200 unsigned long recent_scanned[2] = {0, 0};
3201
3202 for_each_online_node(nid)
3203 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
3204 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
3205 rstat = &mz->lruvec.reclaim_stat;
3206
3207 recent_rotated[0] += rstat->recent_rotated[0];
3208 recent_rotated[1] += rstat->recent_rotated[1];
3209 recent_scanned[0] += rstat->recent_scanned[0];
3210 recent_scanned[1] += rstat->recent_scanned[1];
3211 }
3212 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3213 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3214 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3215 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
3216 }
3217 #endif
3218
3219 return 0;
3220 }
3221
3222 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3223 struct cftype *cft)
3224 {
3225 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3226
3227 return mem_cgroup_swappiness(memcg);
3228 }
3229
3230 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3231 struct cftype *cft, u64 val)
3232 {
3233 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3234
3235 if (val > 100)
3236 return -EINVAL;
3237
3238 if (css->parent)
3239 memcg->swappiness = val;
3240 else
3241 vm_swappiness = val;
3242
3243 return 0;
3244 }
3245
3246 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3247 {
3248 struct mem_cgroup_threshold_ary *t;
3249 unsigned long usage;
3250 int i;
3251
3252 rcu_read_lock();
3253 if (!swap)
3254 t = rcu_dereference(memcg->thresholds.primary);
3255 else
3256 t = rcu_dereference(memcg->memsw_thresholds.primary);
3257
3258 if (!t)
3259 goto unlock;
3260
3261 usage = mem_cgroup_usage(memcg, swap);
3262
3263 /*
3264 * current_threshold points to threshold just below or equal to usage.
3265 * If it's not true, a threshold was crossed after last
3266 * call of __mem_cgroup_threshold().
3267 */
3268 i = t->current_threshold;
3269
3270 /*
3271 * Iterate backward over array of thresholds starting from
3272 * current_threshold and check if a threshold is crossed.
3273 * If none of thresholds below usage is crossed, we read
3274 * only one element of the array here.
3275 */
3276 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3277 eventfd_signal(t->entries[i].eventfd, 1);
3278
3279 /* i = current_threshold + 1 */
3280 i++;
3281
3282 /*
3283 * Iterate forward over array of thresholds starting from
3284 * current_threshold+1 and check if a threshold is crossed.
3285 * If none of thresholds above usage is crossed, we read
3286 * only one element of the array here.
3287 */
3288 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3289 eventfd_signal(t->entries[i].eventfd, 1);
3290
3291 /* Update current_threshold */
3292 t->current_threshold = i - 1;
3293 unlock:
3294 rcu_read_unlock();
3295 }
3296
3297 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3298 {
3299 while (memcg) {
3300 __mem_cgroup_threshold(memcg, false);
3301 if (do_memsw_account())
3302 __mem_cgroup_threshold(memcg, true);
3303
3304 memcg = parent_mem_cgroup(memcg);
3305 }
3306 }
3307
3308 static int compare_thresholds(const void *a, const void *b)
3309 {
3310 const struct mem_cgroup_threshold *_a = a;
3311 const struct mem_cgroup_threshold *_b = b;
3312
3313 if (_a->threshold > _b->threshold)
3314 return 1;
3315
3316 if (_a->threshold < _b->threshold)
3317 return -1;
3318
3319 return 0;
3320 }
3321
3322 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
3323 {
3324 struct mem_cgroup_eventfd_list *ev;
3325
3326 spin_lock(&memcg_oom_lock);
3327
3328 list_for_each_entry(ev, &memcg->oom_notify, list)
3329 eventfd_signal(ev->eventfd, 1);
3330
3331 spin_unlock(&memcg_oom_lock);
3332 return 0;
3333 }
3334
3335 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
3336 {
3337 struct mem_cgroup *iter;
3338
3339 for_each_mem_cgroup_tree(iter, memcg)
3340 mem_cgroup_oom_notify_cb(iter);
3341 }
3342
3343 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3344 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
3345 {
3346 struct mem_cgroup_thresholds *thresholds;
3347 struct mem_cgroup_threshold_ary *new;
3348 unsigned long threshold;
3349 unsigned long usage;
3350 int i, size, ret;
3351
3352 ret = page_counter_memparse(args, "-1", &threshold);
3353 if (ret)
3354 return ret;
3355
3356 mutex_lock(&memcg->thresholds_lock);
3357
3358 if (type == _MEM) {
3359 thresholds = &memcg->thresholds;
3360 usage = mem_cgroup_usage(memcg, false);
3361 } else if (type == _MEMSWAP) {
3362 thresholds = &memcg->memsw_thresholds;
3363 usage = mem_cgroup_usage(memcg, true);
3364 } else
3365 BUG();
3366
3367 /* Check if a threshold crossed before adding a new one */
3368 if (thresholds->primary)
3369 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3370
3371 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3372
3373 /* Allocate memory for new array of thresholds */
3374 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3375 GFP_KERNEL);
3376 if (!new) {
3377 ret = -ENOMEM;
3378 goto unlock;
3379 }
3380 new->size = size;
3381
3382 /* Copy thresholds (if any) to new array */
3383 if (thresholds->primary) {
3384 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3385 sizeof(struct mem_cgroup_threshold));
3386 }
3387
3388 /* Add new threshold */
3389 new->entries[size - 1].eventfd = eventfd;
3390 new->entries[size - 1].threshold = threshold;
3391
3392 /* Sort thresholds. Registering of new threshold isn't time-critical */
3393 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3394 compare_thresholds, NULL);
3395
3396 /* Find current threshold */
3397 new->current_threshold = -1;
3398 for (i = 0; i < size; i++) {
3399 if (new->entries[i].threshold <= usage) {
3400 /*
3401 * new->current_threshold will not be used until
3402 * rcu_assign_pointer(), so it's safe to increment
3403 * it here.
3404 */
3405 ++new->current_threshold;
3406 } else
3407 break;
3408 }
3409
3410 /* Free old spare buffer and save old primary buffer as spare */
3411 kfree(thresholds->spare);
3412 thresholds->spare = thresholds->primary;
3413
3414 rcu_assign_pointer(thresholds->primary, new);
3415
3416 /* To be sure that nobody uses thresholds */
3417 synchronize_rcu();
3418
3419 unlock:
3420 mutex_unlock(&memcg->thresholds_lock);
3421
3422 return ret;
3423 }
3424
3425 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3426 struct eventfd_ctx *eventfd, const char *args)
3427 {
3428 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
3429 }
3430
3431 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
3432 struct eventfd_ctx *eventfd, const char *args)
3433 {
3434 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
3435 }
3436
3437 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3438 struct eventfd_ctx *eventfd, enum res_type type)
3439 {
3440 struct mem_cgroup_thresholds *thresholds;
3441 struct mem_cgroup_threshold_ary *new;
3442 unsigned long usage;
3443 int i, j, size;
3444
3445 mutex_lock(&memcg->thresholds_lock);
3446
3447 if (type == _MEM) {
3448 thresholds = &memcg->thresholds;
3449 usage = mem_cgroup_usage(memcg, false);
3450 } else if (type == _MEMSWAP) {
3451 thresholds = &memcg->memsw_thresholds;
3452 usage = mem_cgroup_usage(memcg, true);
3453 } else
3454 BUG();
3455
3456 if (!thresholds->primary)
3457 goto unlock;
3458
3459 /* Check if a threshold crossed before removing */
3460 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3461
3462 /* Calculate new number of threshold */
3463 size = 0;
3464 for (i = 0; i < thresholds->primary->size; i++) {
3465 if (thresholds->primary->entries[i].eventfd != eventfd)
3466 size++;
3467 }
3468
3469 new = thresholds->spare;
3470
3471 /* Set thresholds array to NULL if we don't have thresholds */
3472 if (!size) {
3473 kfree(new);
3474 new = NULL;
3475 goto swap_buffers;
3476 }
3477
3478 new->size = size;
3479
3480 /* Copy thresholds and find current threshold */
3481 new->current_threshold = -1;
3482 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3483 if (thresholds->primary->entries[i].eventfd == eventfd)
3484 continue;
3485
3486 new->entries[j] = thresholds->primary->entries[i];
3487 if (new->entries[j].threshold <= usage) {
3488 /*
3489 * new->current_threshold will not be used
3490 * until rcu_assign_pointer(), so it's safe to increment
3491 * it here.
3492 */
3493 ++new->current_threshold;
3494 }
3495 j++;
3496 }
3497
3498 swap_buffers:
3499 /* Swap primary and spare array */
3500 thresholds->spare = thresholds->primary;
3501
3502 rcu_assign_pointer(thresholds->primary, new);
3503
3504 /* To be sure that nobody uses thresholds */
3505 synchronize_rcu();
3506
3507 /* If all events are unregistered, free the spare array */
3508 if (!new) {
3509 kfree(thresholds->spare);
3510 thresholds->spare = NULL;
3511 }
3512 unlock:
3513 mutex_unlock(&memcg->thresholds_lock);
3514 }
3515
3516 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3517 struct eventfd_ctx *eventfd)
3518 {
3519 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
3520 }
3521
3522 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3523 struct eventfd_ctx *eventfd)
3524 {
3525 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
3526 }
3527
3528 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
3529 struct eventfd_ctx *eventfd, const char *args)
3530 {
3531 struct mem_cgroup_eventfd_list *event;
3532
3533 event = kmalloc(sizeof(*event), GFP_KERNEL);
3534 if (!event)
3535 return -ENOMEM;
3536
3537 spin_lock(&memcg_oom_lock);
3538
3539 event->eventfd = eventfd;
3540 list_add(&event->list, &memcg->oom_notify);
3541
3542 /* already in OOM ? */
3543 if (memcg->under_oom)
3544 eventfd_signal(eventfd, 1);
3545 spin_unlock(&memcg_oom_lock);
3546
3547 return 0;
3548 }
3549
3550 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
3551 struct eventfd_ctx *eventfd)
3552 {
3553 struct mem_cgroup_eventfd_list *ev, *tmp;
3554
3555 spin_lock(&memcg_oom_lock);
3556
3557 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
3558 if (ev->eventfd == eventfd) {
3559 list_del(&ev->list);
3560 kfree(ev);
3561 }
3562 }
3563
3564 spin_unlock(&memcg_oom_lock);
3565 }
3566
3567 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3568 {
3569 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3570
3571 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
3572 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
3573 return 0;
3574 }
3575
3576 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3577 struct cftype *cft, u64 val)
3578 {
3579 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3580
3581 /* cannot set to root cgroup and only 0 and 1 are allowed */
3582 if (!css->parent || !((val == 0) || (val == 1)))
3583 return -EINVAL;
3584
3585 memcg->oom_kill_disable = val;
3586 if (!val)
3587 memcg_oom_recover(memcg);
3588
3589 return 0;
3590 }
3591
3592 #ifdef CONFIG_CGROUP_WRITEBACK
3593
3594 struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
3595 {
3596 return &memcg->cgwb_list;
3597 }
3598
3599 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3600 {
3601 return wb_domain_init(&memcg->cgwb_domain, gfp);
3602 }
3603
3604 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3605 {
3606 wb_domain_exit(&memcg->cgwb_domain);
3607 }
3608
3609 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3610 {
3611 wb_domain_size_changed(&memcg->cgwb_domain);
3612 }
3613
3614 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3615 {
3616 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3617
3618 if (!memcg->css.parent)
3619 return NULL;
3620
3621 return &memcg->cgwb_domain;
3622 }
3623
3624 /**
3625 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3626 * @wb: bdi_writeback in question
3627 * @pfilepages: out parameter for number of file pages
3628 * @pheadroom: out parameter for number of allocatable pages according to memcg
3629 * @pdirty: out parameter for number of dirty pages
3630 * @pwriteback: out parameter for number of pages under writeback
3631 *
3632 * Determine the numbers of file, headroom, dirty, and writeback pages in
3633 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
3634 * is a bit more involved.
3635 *
3636 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
3637 * headroom is calculated as the lowest headroom of itself and the
3638 * ancestors. Note that this doesn't consider the actual amount of
3639 * available memory in the system. The caller should further cap
3640 * *@pheadroom accordingly.
3641 */
3642 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3643 unsigned long *pheadroom, unsigned long *pdirty,
3644 unsigned long *pwriteback)
3645 {
3646 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3647 struct mem_cgroup *parent;
3648
3649 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY);
3650
3651 /* this should eventually include NR_UNSTABLE_NFS */
3652 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
3653 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3654 (1 << LRU_ACTIVE_FILE));
3655 *pheadroom = PAGE_COUNTER_MAX;
3656
3657 while ((parent = parent_mem_cgroup(memcg))) {
3658 unsigned long ceiling = min(memcg->memory.limit, memcg->high);
3659 unsigned long used = page_counter_read(&memcg->memory);
3660
3661 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3662 memcg = parent;
3663 }
3664 }
3665
3666 #else /* CONFIG_CGROUP_WRITEBACK */
3667
3668 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3669 {
3670 return 0;
3671 }
3672
3673 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3674 {
3675 }
3676
3677 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3678 {
3679 }
3680
3681 #endif /* CONFIG_CGROUP_WRITEBACK */
3682
3683 /*
3684 * DO NOT USE IN NEW FILES.
3685 *
3686 * "cgroup.event_control" implementation.
3687 *
3688 * This is way over-engineered. It tries to support fully configurable
3689 * events for each user. Such level of flexibility is completely
3690 * unnecessary especially in the light of the planned unified hierarchy.
3691 *
3692 * Please deprecate this and replace with something simpler if at all
3693 * possible.
3694 */
3695
3696 /*
3697 * Unregister event and free resources.
3698 *
3699 * Gets called from workqueue.
3700 */
3701 static void memcg_event_remove(struct work_struct *work)
3702 {
3703 struct mem_cgroup_event *event =
3704 container_of(work, struct mem_cgroup_event, remove);
3705 struct mem_cgroup *memcg = event->memcg;
3706
3707 remove_wait_queue(event->wqh, &event->wait);
3708
3709 event->unregister_event(memcg, event->eventfd);
3710
3711 /* Notify userspace the event is going away. */
3712 eventfd_signal(event->eventfd, 1);
3713
3714 eventfd_ctx_put(event->eventfd);
3715 kfree(event);
3716 css_put(&memcg->css);
3717 }
3718
3719 /*
3720 * Gets called on POLLHUP on eventfd when user closes it.
3721 *
3722 * Called with wqh->lock held and interrupts disabled.
3723 */
3724 static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
3725 int sync, void *key)
3726 {
3727 struct mem_cgroup_event *event =
3728 container_of(wait, struct mem_cgroup_event, wait);
3729 struct mem_cgroup *memcg = event->memcg;
3730 unsigned long flags = (unsigned long)key;
3731
3732 if (flags & POLLHUP) {
3733 /*
3734 * If the event has been detached at cgroup removal, we
3735 * can simply return knowing the other side will cleanup
3736 * for us.
3737 *
3738 * We can't race against event freeing since the other
3739 * side will require wqh->lock via remove_wait_queue(),
3740 * which we hold.
3741 */
3742 spin_lock(&memcg->event_list_lock);
3743 if (!list_empty(&event->list)) {
3744 list_del_init(&event->list);
3745 /*
3746 * We are in atomic context, but cgroup_event_remove()
3747 * may sleep, so we have to call it in workqueue.
3748 */
3749 schedule_work(&event->remove);
3750 }
3751 spin_unlock(&memcg->event_list_lock);
3752 }
3753
3754 return 0;
3755 }
3756
3757 static void memcg_event_ptable_queue_proc(struct file *file,
3758 wait_queue_head_t *wqh, poll_table *pt)
3759 {
3760 struct mem_cgroup_event *event =
3761 container_of(pt, struct mem_cgroup_event, pt);
3762
3763 event->wqh = wqh;
3764 add_wait_queue(wqh, &event->wait);
3765 }
3766
3767 /*
3768 * DO NOT USE IN NEW FILES.
3769 *
3770 * Parse input and register new cgroup event handler.
3771 *
3772 * Input must be in format '<event_fd> <control_fd> <args>'.
3773 * Interpretation of args is defined by control file implementation.
3774 */
3775 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
3776 char *buf, size_t nbytes, loff_t off)
3777 {
3778 struct cgroup_subsys_state *css = of_css(of);
3779 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3780 struct mem_cgroup_event *event;
3781 struct cgroup_subsys_state *cfile_css;
3782 unsigned int efd, cfd;
3783 struct fd efile;
3784 struct fd cfile;
3785 const char *name;
3786 char *endp;
3787 int ret;
3788
3789 buf = strstrip(buf);
3790
3791 efd = simple_strtoul(buf, &endp, 10);
3792 if (*endp != ' ')
3793 return -EINVAL;
3794 buf = endp + 1;
3795
3796 cfd = simple_strtoul(buf, &endp, 10);
3797 if ((*endp != ' ') && (*endp != '\0'))
3798 return -EINVAL;
3799 buf = endp + 1;
3800
3801 event = kzalloc(sizeof(*event), GFP_KERNEL);
3802 if (!event)
3803 return -ENOMEM;
3804
3805 event->memcg = memcg;
3806 INIT_LIST_HEAD(&event->list);
3807 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
3808 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
3809 INIT_WORK(&event->remove, memcg_event_remove);
3810
3811 efile = fdget(efd);
3812 if (!efile.file) {
3813 ret = -EBADF;
3814 goto out_kfree;
3815 }
3816
3817 event->eventfd = eventfd_ctx_fileget(efile.file);
3818 if (IS_ERR(event->eventfd)) {
3819 ret = PTR_ERR(event->eventfd);
3820 goto out_put_efile;
3821 }
3822
3823 cfile = fdget(cfd);
3824 if (!cfile.file) {
3825 ret = -EBADF;
3826 goto out_put_eventfd;
3827 }
3828
3829 /* the process need read permission on control file */
3830 /* AV: shouldn't we check that it's been opened for read instead? */
3831 ret = inode_permission(file_inode(cfile.file), MAY_READ);
3832 if (ret < 0)
3833 goto out_put_cfile;
3834
3835 /*
3836 * Determine the event callbacks and set them in @event. This used
3837 * to be done via struct cftype but cgroup core no longer knows
3838 * about these events. The following is crude but the whole thing
3839 * is for compatibility anyway.
3840 *
3841 * DO NOT ADD NEW FILES.
3842 */
3843 name = cfile.file->f_path.dentry->d_name.name;
3844
3845 if (!strcmp(name, "memory.usage_in_bytes")) {
3846 event->register_event = mem_cgroup_usage_register_event;
3847 event->unregister_event = mem_cgroup_usage_unregister_event;
3848 } else if (!strcmp(name, "memory.oom_control")) {
3849 event->register_event = mem_cgroup_oom_register_event;
3850 event->unregister_event = mem_cgroup_oom_unregister_event;
3851 } else if (!strcmp(name, "memory.pressure_level")) {
3852 event->register_event = vmpressure_register_event;
3853 event->unregister_event = vmpressure_unregister_event;
3854 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
3855 event->register_event = memsw_cgroup_usage_register_event;
3856 event->unregister_event = memsw_cgroup_usage_unregister_event;
3857 } else {
3858 ret = -EINVAL;
3859 goto out_put_cfile;
3860 }
3861
3862 /*
3863 * Verify @cfile should belong to @css. Also, remaining events are
3864 * automatically removed on cgroup destruction but the removal is
3865 * asynchronous, so take an extra ref on @css.
3866 */
3867 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
3868 &memory_cgrp_subsys);
3869 ret = -EINVAL;
3870 if (IS_ERR(cfile_css))
3871 goto out_put_cfile;
3872 if (cfile_css != css) {
3873 css_put(cfile_css);
3874 goto out_put_cfile;
3875 }
3876
3877 ret = event->register_event(memcg, event->eventfd, buf);
3878 if (ret)
3879 goto out_put_css;
3880
3881 efile.file->f_op->poll(efile.file, &event->pt);
3882
3883 spin_lock(&memcg->event_list_lock);
3884 list_add(&event->list, &memcg->event_list);
3885 spin_unlock(&memcg->event_list_lock);
3886
3887 fdput(cfile);
3888 fdput(efile);
3889
3890 return nbytes;
3891
3892 out_put_css:
3893 css_put(css);
3894 out_put_cfile:
3895 fdput(cfile);
3896 out_put_eventfd:
3897 eventfd_ctx_put(event->eventfd);
3898 out_put_efile:
3899 fdput(efile);
3900 out_kfree:
3901 kfree(event);
3902
3903 return ret;
3904 }
3905
3906 static struct cftype mem_cgroup_legacy_files[] = {
3907 {
3908 .name = "usage_in_bytes",
3909 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
3910 .read_u64 = mem_cgroup_read_u64,
3911 },
3912 {
3913 .name = "max_usage_in_bytes",
3914 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
3915 .write = mem_cgroup_reset,
3916 .read_u64 = mem_cgroup_read_u64,
3917 },
3918 {
3919 .name = "limit_in_bytes",
3920 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
3921 .write = mem_cgroup_write,
3922 .read_u64 = mem_cgroup_read_u64,
3923 },
3924 {
3925 .name = "soft_limit_in_bytes",
3926 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
3927 .write = mem_cgroup_write,
3928 .read_u64 = mem_cgroup_read_u64,
3929 },
3930 {
3931 .name = "failcnt",
3932 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
3933 .write = mem_cgroup_reset,
3934 .read_u64 = mem_cgroup_read_u64,
3935 },
3936 {
3937 .name = "stat",
3938 .seq_show = memcg_stat_show,
3939 },
3940 {
3941 .name = "force_empty",
3942 .write = mem_cgroup_force_empty_write,
3943 },
3944 {
3945 .name = "use_hierarchy",
3946 .write_u64 = mem_cgroup_hierarchy_write,
3947 .read_u64 = mem_cgroup_hierarchy_read,
3948 },
3949 {
3950 .name = "cgroup.event_control", /* XXX: for compat */
3951 .write = memcg_write_event_control,
3952 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
3953 },
3954 {
3955 .name = "swappiness",
3956 .read_u64 = mem_cgroup_swappiness_read,
3957 .write_u64 = mem_cgroup_swappiness_write,
3958 },
3959 {
3960 .name = "move_charge_at_immigrate",
3961 .read_u64 = mem_cgroup_move_charge_read,
3962 .write_u64 = mem_cgroup_move_charge_write,
3963 },
3964 {
3965 .name = "oom_control",
3966 .seq_show = mem_cgroup_oom_control_read,
3967 .write_u64 = mem_cgroup_oom_control_write,
3968 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
3969 },
3970 {
3971 .name = "pressure_level",
3972 },
3973 #ifdef CONFIG_NUMA
3974 {
3975 .name = "numa_stat",
3976 .seq_show = memcg_numa_stat_show,
3977 },
3978 #endif
3979 {
3980 .name = "kmem.limit_in_bytes",
3981 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
3982 .write = mem_cgroup_write,
3983 .read_u64 = mem_cgroup_read_u64,
3984 },
3985 {
3986 .name = "kmem.usage_in_bytes",
3987 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
3988 .read_u64 = mem_cgroup_read_u64,
3989 },
3990 {
3991 .name = "kmem.failcnt",
3992 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
3993 .write = mem_cgroup_reset,
3994 .read_u64 = mem_cgroup_read_u64,
3995 },
3996 {
3997 .name = "kmem.max_usage_in_bytes",
3998 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
3999 .write = mem_cgroup_reset,
4000 .read_u64 = mem_cgroup_read_u64,
4001 },
4002 #ifdef CONFIG_SLABINFO
4003 {
4004 .name = "kmem.slabinfo",
4005 .seq_start = slab_start,
4006 .seq_next = slab_next,
4007 .seq_stop = slab_stop,
4008 .seq_show = memcg_slab_show,
4009 },
4010 #endif
4011 {
4012 .name = "kmem.tcp.limit_in_bytes",
4013 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
4014 .write = mem_cgroup_write,
4015 .read_u64 = mem_cgroup_read_u64,
4016 },
4017 {
4018 .name = "kmem.tcp.usage_in_bytes",
4019 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
4020 .read_u64 = mem_cgroup_read_u64,
4021 },
4022 {
4023 .name = "kmem.tcp.failcnt",
4024 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
4025 .write = mem_cgroup_reset,
4026 .read_u64 = mem_cgroup_read_u64,
4027 },
4028 {
4029 .name = "kmem.tcp.max_usage_in_bytes",
4030 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4031 .write = mem_cgroup_reset,
4032 .read_u64 = mem_cgroup_read_u64,
4033 },
4034 { }, /* terminate */
4035 };
4036
4037 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4038 {
4039 struct mem_cgroup_per_node *pn;
4040 struct mem_cgroup_per_zone *mz;
4041 int zone, tmp = node;
4042 /*
4043 * This routine is called against possible nodes.
4044 * But it's BUG to call kmalloc() against offline node.
4045 *
4046 * TODO: this routine can waste much memory for nodes which will
4047 * never be onlined. It's better to use memory hotplug callback
4048 * function.
4049 */
4050 if (!node_state(node, N_NORMAL_MEMORY))
4051 tmp = -1;
4052 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4053 if (!pn)
4054 return 1;
4055
4056 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4057 mz = &pn->zoneinfo[zone];
4058 lruvec_init(&mz->lruvec);
4059 mz->usage_in_excess = 0;
4060 mz->on_tree = false;
4061 mz->memcg = memcg;
4062 }
4063 memcg->nodeinfo[node] = pn;
4064 return 0;
4065 }
4066
4067 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4068 {
4069 kfree(memcg->nodeinfo[node]);
4070 }
4071
4072 static void mem_cgroup_free(struct mem_cgroup *memcg)
4073 {
4074 int node;
4075
4076 memcg_wb_domain_exit(memcg);
4077 for_each_node(node)
4078 free_mem_cgroup_per_zone_info(memcg, node);
4079 free_percpu(memcg->stat);
4080 kfree(memcg);
4081 }
4082
4083 static struct mem_cgroup *mem_cgroup_alloc(void)
4084 {
4085 struct mem_cgroup *memcg;
4086 size_t size;
4087 int node;
4088
4089 size = sizeof(struct mem_cgroup);
4090 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
4091
4092 memcg = kzalloc(size, GFP_KERNEL);
4093 if (!memcg)
4094 return NULL;
4095
4096 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4097 if (!memcg->stat)
4098 goto fail;
4099
4100 for_each_node(node)
4101 if (alloc_mem_cgroup_per_zone_info(memcg, node))
4102 goto fail;
4103
4104 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4105 goto fail;
4106
4107 INIT_WORK(&memcg->high_work, high_work_func);
4108 memcg->last_scanned_node = MAX_NUMNODES;
4109 INIT_LIST_HEAD(&memcg->oom_notify);
4110 mutex_init(&memcg->thresholds_lock);
4111 spin_lock_init(&memcg->move_lock);
4112 vmpressure_init(&memcg->vmpressure);
4113 INIT_LIST_HEAD(&memcg->event_list);
4114 spin_lock_init(&memcg->event_list_lock);
4115 memcg->socket_pressure = jiffies;
4116 #ifndef CONFIG_SLOB
4117 memcg->kmemcg_id = -1;
4118 #endif
4119 #ifdef CONFIG_CGROUP_WRITEBACK
4120 INIT_LIST_HEAD(&memcg->cgwb_list);
4121 #endif
4122 return memcg;
4123 fail:
4124 mem_cgroup_free(memcg);
4125 return NULL;
4126 }
4127
4128 static struct cgroup_subsys_state * __ref
4129 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4130 {
4131 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
4132 struct mem_cgroup *memcg;
4133 long error = -ENOMEM;
4134
4135 memcg = mem_cgroup_alloc();
4136 if (!memcg)
4137 return ERR_PTR(error);
4138
4139 memcg->high = PAGE_COUNTER_MAX;
4140 memcg->soft_limit = PAGE_COUNTER_MAX;
4141 if (parent) {
4142 memcg->swappiness = mem_cgroup_swappiness(parent);
4143 memcg->oom_kill_disable = parent->oom_kill_disable;
4144 }
4145 if (parent && parent->use_hierarchy) {
4146 memcg->use_hierarchy = true;
4147 page_counter_init(&memcg->memory, &parent->memory);
4148 page_counter_init(&memcg->swap, &parent->swap);
4149 page_counter_init(&memcg->memsw, &parent->memsw);
4150 page_counter_init(&memcg->kmem, &parent->kmem);
4151 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
4152 } else {
4153 page_counter_init(&memcg->memory, NULL);
4154 page_counter_init(&memcg->swap, NULL);
4155 page_counter_init(&memcg->memsw, NULL);
4156 page_counter_init(&memcg->kmem, NULL);
4157 page_counter_init(&memcg->tcpmem, NULL);
4158 /*
4159 * Deeper hierachy with use_hierarchy == false doesn't make
4160 * much sense so let cgroup subsystem know about this
4161 * unfortunate state in our controller.
4162 */
4163 if (parent != root_mem_cgroup)
4164 memory_cgrp_subsys.broken_hierarchy = true;
4165 }
4166
4167 /* The following stuff does not apply to the root */
4168 if (!parent) {
4169 root_mem_cgroup = memcg;
4170 return &memcg->css;
4171 }
4172
4173 error = memcg_online_kmem(memcg);
4174 if (error)
4175 goto fail;
4176
4177 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4178 static_branch_inc(&memcg_sockets_enabled_key);
4179
4180 return &memcg->css;
4181 fail:
4182 mem_cgroup_free(memcg);
4183 return NULL;
4184 }
4185
4186 static int
4187 mem_cgroup_css_online(struct cgroup_subsys_state *css)
4188 {
4189 if (css->id > MEM_CGROUP_ID_MAX)
4190 return -ENOSPC;
4191
4192 return 0;
4193 }
4194
4195 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
4196 {
4197 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4198 struct mem_cgroup_event *event, *tmp;
4199
4200 /*
4201 * Unregister events and notify userspace.
4202 * Notify userspace about cgroup removing only after rmdir of cgroup
4203 * directory to avoid race between userspace and kernelspace.
4204 */
4205 spin_lock(&memcg->event_list_lock);
4206 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
4207 list_del_init(&event->list);
4208 schedule_work(&event->remove);
4209 }
4210 spin_unlock(&memcg->event_list_lock);
4211
4212 memcg_offline_kmem(memcg);
4213 wb_memcg_offline(memcg);
4214 }
4215
4216 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
4217 {
4218 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4219
4220 invalidate_reclaim_iterators(memcg);
4221 }
4222
4223 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
4224 {
4225 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4226
4227 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4228 static_branch_dec(&memcg_sockets_enabled_key);
4229
4230 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
4231 static_branch_dec(&memcg_sockets_enabled_key);
4232
4233 vmpressure_cleanup(&memcg->vmpressure);
4234 cancel_work_sync(&memcg->high_work);
4235 mem_cgroup_remove_from_trees(memcg);
4236 memcg_free_kmem(memcg);
4237 mem_cgroup_free(memcg);
4238 }
4239
4240 /**
4241 * mem_cgroup_css_reset - reset the states of a mem_cgroup
4242 * @css: the target css
4243 *
4244 * Reset the states of the mem_cgroup associated with @css. This is
4245 * invoked when the userland requests disabling on the default hierarchy
4246 * but the memcg is pinned through dependency. The memcg should stop
4247 * applying policies and should revert to the vanilla state as it may be
4248 * made visible again.
4249 *
4250 * The current implementation only resets the essential configurations.
4251 * This needs to be expanded to cover all the visible parts.
4252 */
4253 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4254 {
4255 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4256
4257 page_counter_limit(&memcg->memory, PAGE_COUNTER_MAX);
4258 page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX);
4259 page_counter_limit(&memcg->memsw, PAGE_COUNTER_MAX);
4260 page_counter_limit(&memcg->kmem, PAGE_COUNTER_MAX);
4261 page_counter_limit(&memcg->tcpmem, PAGE_COUNTER_MAX);
4262 memcg->low = 0;
4263 memcg->high = PAGE_COUNTER_MAX;
4264 memcg->soft_limit = PAGE_COUNTER_MAX;
4265 memcg_wb_domain_size_changed(memcg);
4266 }
4267
4268 #ifdef CONFIG_MMU
4269 /* Handlers for move charge at task migration. */
4270 static int mem_cgroup_do_precharge(unsigned long count)
4271 {
4272 int ret;
4273
4274 /* Try a single bulk charge without reclaim first, kswapd may wake */
4275 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
4276 if (!ret) {
4277 mc.precharge += count;
4278 return ret;
4279 }
4280
4281 /* Try charges one by one with reclaim */
4282 while (count--) {
4283 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
4284 if (ret)
4285 return ret;
4286 mc.precharge++;
4287 cond_resched();
4288 }
4289 return 0;
4290 }
4291
4292 /**
4293 * get_mctgt_type - get target type of moving charge
4294 * @vma: the vma the pte to be checked belongs
4295 * @addr: the address corresponding to the pte to be checked
4296 * @ptent: the pte to be checked
4297 * @target: the pointer the target page or swap ent will be stored(can be NULL)
4298 *
4299 * Returns
4300 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
4301 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4302 * move charge. if @target is not NULL, the page is stored in target->page
4303 * with extra refcnt got(Callers should handle it).
4304 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4305 * target for charge migration. if @target is not NULL, the entry is stored
4306 * in target->ent.
4307 *
4308 * Called with pte lock held.
4309 */
4310 union mc_target {
4311 struct page *page;
4312 swp_entry_t ent;
4313 };
4314
4315 enum mc_target_type {
4316 MC_TARGET_NONE = 0,
4317 MC_TARGET_PAGE,
4318 MC_TARGET_SWAP,
4319 };
4320
4321 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4322 unsigned long addr, pte_t ptent)
4323 {
4324 struct page *page = vm_normal_page(vma, addr, ptent);
4325
4326 if (!page || !page_mapped(page))
4327 return NULL;
4328 if (PageAnon(page)) {
4329 if (!(mc.flags & MOVE_ANON))
4330 return NULL;
4331 } else {
4332 if (!(mc.flags & MOVE_FILE))
4333 return NULL;
4334 }
4335 if (!get_page_unless_zero(page))
4336 return NULL;
4337
4338 return page;
4339 }
4340
4341 #ifdef CONFIG_SWAP
4342 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4343 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4344 {
4345 struct page *page = NULL;
4346 swp_entry_t ent = pte_to_swp_entry(ptent);
4347
4348 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
4349 return NULL;
4350 /*
4351 * Because lookup_swap_cache() updates some statistics counter,
4352 * we call find_get_page() with swapper_space directly.
4353 */
4354 page = find_get_page(swap_address_space(ent), ent.val);
4355 if (do_memsw_account())
4356 entry->val = ent.val;
4357
4358 return page;
4359 }
4360 #else
4361 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4362 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4363 {
4364 return NULL;
4365 }
4366 #endif
4367
4368 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4369 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4370 {
4371 struct page *page = NULL;
4372 struct address_space *mapping;
4373 pgoff_t pgoff;
4374
4375 if (!vma->vm_file) /* anonymous vma */
4376 return NULL;
4377 if (!(mc.flags & MOVE_FILE))
4378 return NULL;
4379
4380 mapping = vma->vm_file->f_mapping;
4381 pgoff = linear_page_index(vma, addr);
4382
4383 /* page is moved even if it's not RSS of this task(page-faulted). */
4384 #ifdef CONFIG_SWAP
4385 /* shmem/tmpfs may report page out on swap: account for that too. */
4386 if (shmem_mapping(mapping)) {
4387 page = find_get_entry(mapping, pgoff);
4388 if (radix_tree_exceptional_entry(page)) {
4389 swp_entry_t swp = radix_to_swp_entry(page);
4390 if (do_memsw_account())
4391 *entry = swp;
4392 page = find_get_page(swap_address_space(swp), swp.val);
4393 }
4394 } else
4395 page = find_get_page(mapping, pgoff);
4396 #else
4397 page = find_get_page(mapping, pgoff);
4398 #endif
4399 return page;
4400 }
4401
4402 /**
4403 * mem_cgroup_move_account - move account of the page
4404 * @page: the page
4405 * @nr_pages: number of regular pages (>1 for huge pages)
4406 * @from: mem_cgroup which the page is moved from.
4407 * @to: mem_cgroup which the page is moved to. @from != @to.
4408 *
4409 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
4410 *
4411 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
4412 * from old cgroup.
4413 */
4414 static int mem_cgroup_move_account(struct page *page,
4415 bool compound,
4416 struct mem_cgroup *from,
4417 struct mem_cgroup *to)
4418 {
4419 unsigned long flags;
4420 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
4421 int ret;
4422 bool anon;
4423
4424 VM_BUG_ON(from == to);
4425 VM_BUG_ON_PAGE(PageLRU(page), page);
4426 VM_BUG_ON(compound && !PageTransHuge(page));
4427
4428 /*
4429 * Prevent mem_cgroup_migrate() from looking at
4430 * page->mem_cgroup of its source page while we change it.
4431 */
4432 ret = -EBUSY;
4433 if (!trylock_page(page))
4434 goto out;
4435
4436 ret = -EINVAL;
4437 if (page->mem_cgroup != from)
4438 goto out_unlock;
4439
4440 anon = PageAnon(page);
4441
4442 spin_lock_irqsave(&from->move_lock, flags);
4443
4444 if (!anon && page_mapped(page)) {
4445 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4446 nr_pages);
4447 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4448 nr_pages);
4449 }
4450
4451 /*
4452 * move_lock grabbed above and caller set from->moving_account, so
4453 * mem_cgroup_update_page_stat() will serialize updates to PageDirty.
4454 * So mapping should be stable for dirty pages.
4455 */
4456 if (!anon && PageDirty(page)) {
4457 struct address_space *mapping = page_mapping(page);
4458
4459 if (mapping_cap_account_dirty(mapping)) {
4460 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY],
4461 nr_pages);
4462 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY],
4463 nr_pages);
4464 }
4465 }
4466
4467 if (PageWriteback(page)) {
4468 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4469 nr_pages);
4470 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4471 nr_pages);
4472 }
4473
4474 /*
4475 * It is safe to change page->mem_cgroup here because the page
4476 * is referenced, charged, and isolated - we can't race with
4477 * uncharging, charging, migration, or LRU putback.
4478 */
4479
4480 /* caller should have done css_get */
4481 page->mem_cgroup = to;
4482 spin_unlock_irqrestore(&from->move_lock, flags);
4483
4484 ret = 0;
4485
4486 local_irq_disable();
4487 mem_cgroup_charge_statistics(to, page, compound, nr_pages);
4488 memcg_check_events(to, page);
4489 mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
4490 memcg_check_events(from, page);
4491 local_irq_enable();
4492 out_unlock:
4493 unlock_page(page);
4494 out:
4495 return ret;
4496 }
4497
4498 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
4499 unsigned long addr, pte_t ptent, union mc_target *target)
4500 {
4501 struct page *page = NULL;
4502 enum mc_target_type ret = MC_TARGET_NONE;
4503 swp_entry_t ent = { .val = 0 };
4504
4505 if (pte_present(ptent))
4506 page = mc_handle_present_pte(vma, addr, ptent);
4507 else if (is_swap_pte(ptent))
4508 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
4509 else if (pte_none(ptent))
4510 page = mc_handle_file_pte(vma, addr, ptent, &ent);
4511
4512 if (!page && !ent.val)
4513 return ret;
4514 if (page) {
4515 /*
4516 * Do only loose check w/o serialization.
4517 * mem_cgroup_move_account() checks the page is valid or
4518 * not under LRU exclusion.
4519 */
4520 if (page->mem_cgroup == mc.from) {
4521 ret = MC_TARGET_PAGE;
4522 if (target)
4523 target->page = page;
4524 }
4525 if (!ret || !target)
4526 put_page(page);
4527 }
4528 /* There is a swap entry and a page doesn't exist or isn't charged */
4529 if (ent.val && !ret &&
4530 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
4531 ret = MC_TARGET_SWAP;
4532 if (target)
4533 target->ent = ent;
4534 }
4535 return ret;
4536 }
4537
4538 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4539 /*
4540 * We don't consider swapping or file mapped pages because THP does not
4541 * support them for now.
4542 * Caller should make sure that pmd_trans_huge(pmd) is true.
4543 */
4544 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4545 unsigned long addr, pmd_t pmd, union mc_target *target)
4546 {
4547 struct page *page = NULL;
4548 enum mc_target_type ret = MC_TARGET_NONE;
4549
4550 page = pmd_page(pmd);
4551 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
4552 if (!(mc.flags & MOVE_ANON))
4553 return ret;
4554 if (page->mem_cgroup == mc.from) {
4555 ret = MC_TARGET_PAGE;
4556 if (target) {
4557 get_page(page);
4558 target->page = page;
4559 }
4560 }
4561 return ret;
4562 }
4563 #else
4564 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4565 unsigned long addr, pmd_t pmd, union mc_target *target)
4566 {
4567 return MC_TARGET_NONE;
4568 }
4569 #endif
4570
4571 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4572 unsigned long addr, unsigned long end,
4573 struct mm_walk *walk)
4574 {
4575 struct vm_area_struct *vma = walk->vma;
4576 pte_t *pte;
4577 spinlock_t *ptl;
4578
4579 ptl = pmd_trans_huge_lock(pmd, vma);
4580 if (ptl) {
4581 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
4582 mc.precharge += HPAGE_PMD_NR;
4583 spin_unlock(ptl);
4584 return 0;
4585 }
4586
4587 if (pmd_trans_unstable(pmd))
4588 return 0;
4589 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4590 for (; addr != end; pte++, addr += PAGE_SIZE)
4591 if (get_mctgt_type(vma, addr, *pte, NULL))
4592 mc.precharge++; /* increment precharge temporarily */
4593 pte_unmap_unlock(pte - 1, ptl);
4594 cond_resched();
4595
4596 return 0;
4597 }
4598
4599 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4600 {
4601 unsigned long precharge;
4602
4603 struct mm_walk mem_cgroup_count_precharge_walk = {
4604 .pmd_entry = mem_cgroup_count_precharge_pte_range,
4605 .mm = mm,
4606 };
4607 down_read(&mm->mmap_sem);
4608 walk_page_range(0, ~0UL, &mem_cgroup_count_precharge_walk);
4609 up_read(&mm->mmap_sem);
4610
4611 precharge = mc.precharge;
4612 mc.precharge = 0;
4613
4614 return precharge;
4615 }
4616
4617 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4618 {
4619 unsigned long precharge = mem_cgroup_count_precharge(mm);
4620
4621 VM_BUG_ON(mc.moving_task);
4622 mc.moving_task = current;
4623 return mem_cgroup_do_precharge(precharge);
4624 }
4625
4626 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
4627 static void __mem_cgroup_clear_mc(void)
4628 {
4629 struct mem_cgroup *from = mc.from;
4630 struct mem_cgroup *to = mc.to;
4631
4632 /* we must uncharge all the leftover precharges from mc.to */
4633 if (mc.precharge) {
4634 cancel_charge(mc.to, mc.precharge);
4635 mc.precharge = 0;
4636 }
4637 /*
4638 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4639 * we must uncharge here.
4640 */
4641 if (mc.moved_charge) {
4642 cancel_charge(mc.from, mc.moved_charge);
4643 mc.moved_charge = 0;
4644 }
4645 /* we must fixup refcnts and charges */
4646 if (mc.moved_swap) {
4647 /* uncharge swap account from the old cgroup */
4648 if (!mem_cgroup_is_root(mc.from))
4649 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
4650
4651 /*
4652 * we charged both to->memory and to->memsw, so we
4653 * should uncharge to->memory.
4654 */
4655 if (!mem_cgroup_is_root(mc.to))
4656 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
4657
4658 css_put_many(&mc.from->css, mc.moved_swap);
4659
4660 /* we've already done css_get(mc.to) */
4661 mc.moved_swap = 0;
4662 }
4663 memcg_oom_recover(from);
4664 memcg_oom_recover(to);
4665 wake_up_all(&mc.waitq);
4666 }
4667
4668 static void mem_cgroup_clear_mc(void)
4669 {
4670 /*
4671 * we must clear moving_task before waking up waiters at the end of
4672 * task migration.
4673 */
4674 mc.moving_task = NULL;
4675 __mem_cgroup_clear_mc();
4676 spin_lock(&mc.lock);
4677 mc.from = NULL;
4678 mc.to = NULL;
4679 spin_unlock(&mc.lock);
4680 }
4681
4682 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4683 {
4684 struct cgroup_subsys_state *css;
4685 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
4686 struct mem_cgroup *from;
4687 struct task_struct *leader, *p;
4688 struct mm_struct *mm;
4689 unsigned long move_flags;
4690 int ret = 0;
4691
4692 /* charge immigration isn't supported on the default hierarchy */
4693 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4694 return 0;
4695
4696 /*
4697 * Multi-process migrations only happen on the default hierarchy
4698 * where charge immigration is not used. Perform charge
4699 * immigration if @tset contains a leader and whine if there are
4700 * multiple.
4701 */
4702 p = NULL;
4703 cgroup_taskset_for_each_leader(leader, css, tset) {
4704 WARN_ON_ONCE(p);
4705 p = leader;
4706 memcg = mem_cgroup_from_css(css);
4707 }
4708 if (!p)
4709 return 0;
4710
4711 /*
4712 * We are now commited to this value whatever it is. Changes in this
4713 * tunable will only affect upcoming migrations, not the current one.
4714 * So we need to save it, and keep it going.
4715 */
4716 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
4717 if (!move_flags)
4718 return 0;
4719
4720 from = mem_cgroup_from_task(p);
4721
4722 VM_BUG_ON(from == memcg);
4723
4724 mm = get_task_mm(p);
4725 if (!mm)
4726 return 0;
4727 /* We move charges only when we move a owner of the mm */
4728 if (mm->owner == p) {
4729 VM_BUG_ON(mc.from);
4730 VM_BUG_ON(mc.to);
4731 VM_BUG_ON(mc.precharge);
4732 VM_BUG_ON(mc.moved_charge);
4733 VM_BUG_ON(mc.moved_swap);
4734
4735 spin_lock(&mc.lock);
4736 mc.from = from;
4737 mc.to = memcg;
4738 mc.flags = move_flags;
4739 spin_unlock(&mc.lock);
4740 /* We set mc.moving_task later */
4741
4742 ret = mem_cgroup_precharge_mc(mm);
4743 if (ret)
4744 mem_cgroup_clear_mc();
4745 }
4746 mmput(mm);
4747 return ret;
4748 }
4749
4750 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4751 {
4752 if (mc.to)
4753 mem_cgroup_clear_mc();
4754 }
4755
4756 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4757 unsigned long addr, unsigned long end,
4758 struct mm_walk *walk)
4759 {
4760 int ret = 0;
4761 struct vm_area_struct *vma = walk->vma;
4762 pte_t *pte;
4763 spinlock_t *ptl;
4764 enum mc_target_type target_type;
4765 union mc_target target;
4766 struct page *page;
4767
4768 ptl = pmd_trans_huge_lock(pmd, vma);
4769 if (ptl) {
4770 if (mc.precharge < HPAGE_PMD_NR) {
4771 spin_unlock(ptl);
4772 return 0;
4773 }
4774 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
4775 if (target_type == MC_TARGET_PAGE) {
4776 page = target.page;
4777 if (!isolate_lru_page(page)) {
4778 if (!mem_cgroup_move_account(page, true,
4779 mc.from, mc.to)) {
4780 mc.precharge -= HPAGE_PMD_NR;
4781 mc.moved_charge += HPAGE_PMD_NR;
4782 }
4783 putback_lru_page(page);
4784 }
4785 put_page(page);
4786 }
4787 spin_unlock(ptl);
4788 return 0;
4789 }
4790
4791 if (pmd_trans_unstable(pmd))
4792 return 0;
4793 retry:
4794 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4795 for (; addr != end; addr += PAGE_SIZE) {
4796 pte_t ptent = *(pte++);
4797 swp_entry_t ent;
4798
4799 if (!mc.precharge)
4800 break;
4801
4802 switch (get_mctgt_type(vma, addr, ptent, &target)) {
4803 case MC_TARGET_PAGE:
4804 page = target.page;
4805 /*
4806 * We can have a part of the split pmd here. Moving it
4807 * can be done but it would be too convoluted so simply
4808 * ignore such a partial THP and keep it in original
4809 * memcg. There should be somebody mapping the head.
4810 */
4811 if (PageTransCompound(page))
4812 goto put;
4813 if (isolate_lru_page(page))
4814 goto put;
4815 if (!mem_cgroup_move_account(page, false,
4816 mc.from, mc.to)) {
4817 mc.precharge--;
4818 /* we uncharge from mc.from later. */
4819 mc.moved_charge++;
4820 }
4821 putback_lru_page(page);
4822 put: /* get_mctgt_type() gets the page */
4823 put_page(page);
4824 break;
4825 case MC_TARGET_SWAP:
4826 ent = target.ent;
4827 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
4828 mc.precharge--;
4829 /* we fixup refcnts and charges later. */
4830 mc.moved_swap++;
4831 }
4832 break;
4833 default:
4834 break;
4835 }
4836 }
4837 pte_unmap_unlock(pte - 1, ptl);
4838 cond_resched();
4839
4840 if (addr != end) {
4841 /*
4842 * We have consumed all precharges we got in can_attach().
4843 * We try charge one by one, but don't do any additional
4844 * charges to mc.to if we have failed in charge once in attach()
4845 * phase.
4846 */
4847 ret = mem_cgroup_do_precharge(1);
4848 if (!ret)
4849 goto retry;
4850 }
4851
4852 return ret;
4853 }
4854
4855 static void mem_cgroup_move_charge(struct mm_struct *mm)
4856 {
4857 struct mm_walk mem_cgroup_move_charge_walk = {
4858 .pmd_entry = mem_cgroup_move_charge_pte_range,
4859 .mm = mm,
4860 };
4861
4862 lru_add_drain_all();
4863 /*
4864 * Signal lock_page_memcg() to take the memcg's move_lock
4865 * while we're moving its pages to another memcg. Then wait
4866 * for already started RCU-only updates to finish.
4867 */
4868 atomic_inc(&mc.from->moving_account);
4869 synchronize_rcu();
4870 retry:
4871 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
4872 /*
4873 * Someone who are holding the mmap_sem might be waiting in
4874 * waitq. So we cancel all extra charges, wake up all waiters,
4875 * and retry. Because we cancel precharges, we might not be able
4876 * to move enough charges, but moving charge is a best-effort
4877 * feature anyway, so it wouldn't be a big problem.
4878 */
4879 __mem_cgroup_clear_mc();
4880 cond_resched();
4881 goto retry;
4882 }
4883 /*
4884 * When we have consumed all precharges and failed in doing
4885 * additional charge, the page walk just aborts.
4886 */
4887 walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
4888 up_read(&mm->mmap_sem);
4889 atomic_dec(&mc.from->moving_account);
4890 }
4891
4892 static void mem_cgroup_move_task(struct cgroup_taskset *tset)
4893 {
4894 struct cgroup_subsys_state *css;
4895 struct task_struct *p = cgroup_taskset_first(tset, &css);
4896 struct mm_struct *mm = get_task_mm(p);
4897
4898 if (mm) {
4899 if (mc.to)
4900 mem_cgroup_move_charge(mm);
4901 mmput(mm);
4902 }
4903 if (mc.to)
4904 mem_cgroup_clear_mc();
4905 }
4906 #else /* !CONFIG_MMU */
4907 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4908 {
4909 return 0;
4910 }
4911 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4912 {
4913 }
4914 static void mem_cgroup_move_task(struct cgroup_taskset *tset)
4915 {
4916 }
4917 #endif
4918
4919 /*
4920 * Cgroup retains root cgroups across [un]mount cycles making it necessary
4921 * to verify whether we're attached to the default hierarchy on each mount
4922 * attempt.
4923 */
4924 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
4925 {
4926 /*
4927 * use_hierarchy is forced on the default hierarchy. cgroup core
4928 * guarantees that @root doesn't have any children, so turning it
4929 * on for the root memcg is enough.
4930 */
4931 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4932 root_mem_cgroup->use_hierarchy = true;
4933 else
4934 root_mem_cgroup->use_hierarchy = false;
4935 }
4936
4937 static u64 memory_current_read(struct cgroup_subsys_state *css,
4938 struct cftype *cft)
4939 {
4940 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4941
4942 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
4943 }
4944
4945 static int memory_low_show(struct seq_file *m, void *v)
4946 {
4947 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
4948 unsigned long low = READ_ONCE(memcg->low);
4949
4950 if (low == PAGE_COUNTER_MAX)
4951 seq_puts(m, "max\n");
4952 else
4953 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
4954
4955 return 0;
4956 }
4957
4958 static ssize_t memory_low_write(struct kernfs_open_file *of,
4959 char *buf, size_t nbytes, loff_t off)
4960 {
4961 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4962 unsigned long low;
4963 int err;
4964
4965 buf = strstrip(buf);
4966 err = page_counter_memparse(buf, "max", &low);
4967 if (err)
4968 return err;
4969
4970 memcg->low = low;
4971
4972 return nbytes;
4973 }
4974
4975 static int memory_high_show(struct seq_file *m, void *v)
4976 {
4977 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
4978 unsigned long high = READ_ONCE(memcg->high);
4979
4980 if (high == PAGE_COUNTER_MAX)
4981 seq_puts(m, "max\n");
4982 else
4983 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
4984
4985 return 0;
4986 }
4987
4988 static ssize_t memory_high_write(struct kernfs_open_file *of,
4989 char *buf, size_t nbytes, loff_t off)
4990 {
4991 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
4992 unsigned long nr_pages;
4993 unsigned long high;
4994 int err;
4995
4996 buf = strstrip(buf);
4997 err = page_counter_memparse(buf, "max", &high);
4998 if (err)
4999 return err;
5000
5001 memcg->high = high;
5002
5003 nr_pages = page_counter_read(&memcg->memory);
5004 if (nr_pages > high)
5005 try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
5006 GFP_KERNEL, true);
5007
5008 memcg_wb_domain_size_changed(memcg);
5009 return nbytes;
5010 }
5011
5012 static int memory_max_show(struct seq_file *m, void *v)
5013 {
5014 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5015 unsigned long max = READ_ONCE(memcg->memory.limit);
5016
5017 if (max == PAGE_COUNTER_MAX)
5018 seq_puts(m, "max\n");
5019 else
5020 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5021
5022 return 0;
5023 }
5024
5025 static ssize_t memory_max_write(struct kernfs_open_file *of,
5026 char *buf, size_t nbytes, loff_t off)
5027 {
5028 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5029 unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
5030 bool drained = false;
5031 unsigned long max;
5032 int err;
5033
5034 buf = strstrip(buf);
5035 err = page_counter_memparse(buf, "max", &max);
5036 if (err)
5037 return err;
5038
5039 xchg(&memcg->memory.limit, max);
5040
5041 for (;;) {
5042 unsigned long nr_pages = page_counter_read(&memcg->memory);
5043
5044 if (nr_pages <= max)
5045 break;
5046
5047 if (signal_pending(current)) {
5048 err = -EINTR;
5049 break;
5050 }
5051
5052 if (!drained) {
5053 drain_all_stock(memcg);
5054 drained = true;
5055 continue;
5056 }
5057
5058 if (nr_reclaims) {
5059 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
5060 GFP_KERNEL, true))
5061 nr_reclaims--;
5062 continue;
5063 }
5064
5065 mem_cgroup_events(memcg, MEMCG_OOM, 1);
5066 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
5067 break;
5068 }
5069
5070 memcg_wb_domain_size_changed(memcg);
5071 return nbytes;
5072 }
5073
5074 static int memory_events_show(struct seq_file *m, void *v)
5075 {
5076 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5077
5078 seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW));
5079 seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH));
5080 seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX));
5081 seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM));
5082
5083 return 0;
5084 }
5085
5086 static int memory_stat_show(struct seq_file *m, void *v)
5087 {
5088 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5089 unsigned long stat[MEMCG_NR_STAT];
5090 unsigned long events[MEMCG_NR_EVENTS];
5091 int i;
5092
5093 /*
5094 * Provide statistics on the state of the memory subsystem as
5095 * well as cumulative event counters that show past behavior.
5096 *
5097 * This list is ordered following a combination of these gradients:
5098 * 1) generic big picture -> specifics and details
5099 * 2) reflecting userspace activity -> reflecting kernel heuristics
5100 *
5101 * Current memory state:
5102 */
5103
5104 tree_stat(memcg, stat);
5105 tree_events(memcg, events);
5106
5107 seq_printf(m, "anon %llu\n",
5108 (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE);
5109 seq_printf(m, "file %llu\n",
5110 (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE);
5111 seq_printf(m, "kernel_stack %llu\n",
5112 (u64)stat[MEMCG_KERNEL_STACK] * PAGE_SIZE);
5113 seq_printf(m, "slab %llu\n",
5114 (u64)(stat[MEMCG_SLAB_RECLAIMABLE] +
5115 stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
5116 seq_printf(m, "sock %llu\n",
5117 (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
5118
5119 seq_printf(m, "file_mapped %llu\n",
5120 (u64)stat[MEM_CGROUP_STAT_FILE_MAPPED] * PAGE_SIZE);
5121 seq_printf(m, "file_dirty %llu\n",
5122 (u64)stat[MEM_CGROUP_STAT_DIRTY] * PAGE_SIZE);
5123 seq_printf(m, "file_writeback %llu\n",
5124 (u64)stat[MEM_CGROUP_STAT_WRITEBACK] * PAGE_SIZE);
5125
5126 for (i = 0; i < NR_LRU_LISTS; i++) {
5127 struct mem_cgroup *mi;
5128 unsigned long val = 0;
5129
5130 for_each_mem_cgroup_tree(mi, memcg)
5131 val += mem_cgroup_nr_lru_pages(mi, BIT(i));
5132 seq_printf(m, "%s %llu\n",
5133 mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE);
5134 }
5135
5136 seq_printf(m, "slab_reclaimable %llu\n",
5137 (u64)stat[MEMCG_SLAB_RECLAIMABLE] * PAGE_SIZE);
5138 seq_printf(m, "slab_unreclaimable %llu\n",
5139 (u64)stat[MEMCG_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
5140
5141 /* Accumulated memory events */
5142
5143 seq_printf(m, "pgfault %lu\n",
5144 events[MEM_CGROUP_EVENTS_PGFAULT]);
5145 seq_printf(m, "pgmajfault %lu\n",
5146 events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
5147
5148 return 0;
5149 }
5150
5151 static struct cftype memory_files[] = {
5152 {
5153 .name = "current",
5154 .flags = CFTYPE_NOT_ON_ROOT,
5155 .read_u64 = memory_current_read,
5156 },
5157 {
5158 .name = "low",
5159 .flags = CFTYPE_NOT_ON_ROOT,
5160 .seq_show = memory_low_show,
5161 .write = memory_low_write,
5162 },
5163 {
5164 .name = "high",
5165 .flags = CFTYPE_NOT_ON_ROOT,
5166 .seq_show = memory_high_show,
5167 .write = memory_high_write,
5168 },
5169 {
5170 .name = "max",
5171 .flags = CFTYPE_NOT_ON_ROOT,
5172 .seq_show = memory_max_show,
5173 .write = memory_max_write,
5174 },
5175 {
5176 .name = "events",
5177 .flags = CFTYPE_NOT_ON_ROOT,
5178 .file_offset = offsetof(struct mem_cgroup, events_file),
5179 .seq_show = memory_events_show,
5180 },
5181 {
5182 .name = "stat",
5183 .flags = CFTYPE_NOT_ON_ROOT,
5184 .seq_show = memory_stat_show,
5185 },
5186 { } /* terminate */
5187 };
5188
5189 struct cgroup_subsys memory_cgrp_subsys = {
5190 .css_alloc = mem_cgroup_css_alloc,
5191 .css_online = mem_cgroup_css_online,
5192 .css_offline = mem_cgroup_css_offline,
5193 .css_released = mem_cgroup_css_released,
5194 .css_free = mem_cgroup_css_free,
5195 .css_reset = mem_cgroup_css_reset,
5196 .can_attach = mem_cgroup_can_attach,
5197 .cancel_attach = mem_cgroup_cancel_attach,
5198 .attach = mem_cgroup_move_task,
5199 .bind = mem_cgroup_bind,
5200 .dfl_cftypes = memory_files,
5201 .legacy_cftypes = mem_cgroup_legacy_files,
5202 .early_init = 0,
5203 };
5204
5205 /**
5206 * mem_cgroup_low - check if memory consumption is below the normal range
5207 * @root: the highest ancestor to consider
5208 * @memcg: the memory cgroup to check
5209 *
5210 * Returns %true if memory consumption of @memcg, and that of all
5211 * configurable ancestors up to @root, is below the normal range.
5212 */
5213 bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
5214 {
5215 if (mem_cgroup_disabled())
5216 return false;
5217
5218 /*
5219 * The toplevel group doesn't have a configurable range, so
5220 * it's never low when looked at directly, and it is not
5221 * considered an ancestor when assessing the hierarchy.
5222 */
5223
5224 if (memcg == root_mem_cgroup)
5225 return false;
5226
5227 if (page_counter_read(&memcg->memory) >= memcg->low)
5228 return false;
5229
5230 while (memcg != root) {
5231 memcg = parent_mem_cgroup(memcg);
5232
5233 if (memcg == root_mem_cgroup)
5234 break;
5235
5236 if (page_counter_read(&memcg->memory) >= memcg->low)
5237 return false;
5238 }
5239 return true;
5240 }
5241
5242 /**
5243 * mem_cgroup_try_charge - try charging a page
5244 * @page: page to charge
5245 * @mm: mm context of the victim
5246 * @gfp_mask: reclaim mode
5247 * @memcgp: charged memcg return
5248 *
5249 * Try to charge @page to the memcg that @mm belongs to, reclaiming
5250 * pages according to @gfp_mask if necessary.
5251 *
5252 * Returns 0 on success, with *@memcgp pointing to the charged memcg.
5253 * Otherwise, an error code is returned.
5254 *
5255 * After page->mapping has been set up, the caller must finalize the
5256 * charge with mem_cgroup_commit_charge(). Or abort the transaction
5257 * with mem_cgroup_cancel_charge() in case page instantiation fails.
5258 */
5259 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
5260 gfp_t gfp_mask, struct mem_cgroup **memcgp,
5261 bool compound)
5262 {
5263 struct mem_cgroup *memcg = NULL;
5264 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5265 int ret = 0;
5266
5267 if (mem_cgroup_disabled())
5268 goto out;
5269
5270 if (PageSwapCache(page)) {
5271 /*
5272 * Every swap fault against a single page tries to charge the
5273 * page, bail as early as possible. shmem_unuse() encounters
5274 * already charged pages, too. The USED bit is protected by
5275 * the page lock, which serializes swap cache removal, which
5276 * in turn serializes uncharging.
5277 */
5278 VM_BUG_ON_PAGE(!PageLocked(page), page);
5279 if (page->mem_cgroup)
5280 goto out;
5281
5282 if (do_swap_account) {
5283 swp_entry_t ent = { .val = page_private(page), };
5284 unsigned short id = lookup_swap_cgroup_id(ent);
5285
5286 rcu_read_lock();
5287 memcg = mem_cgroup_from_id(id);
5288 if (memcg && !css_tryget_online(&memcg->css))
5289 memcg = NULL;
5290 rcu_read_unlock();
5291 }
5292 }
5293
5294 if (!memcg)
5295 memcg = get_mem_cgroup_from_mm(mm);
5296
5297 ret = try_charge(memcg, gfp_mask, nr_pages);
5298
5299 css_put(&memcg->css);
5300 out:
5301 *memcgp = memcg;
5302 return ret;
5303 }
5304
5305 /**
5306 * mem_cgroup_commit_charge - commit a page charge
5307 * @page: page to charge
5308 * @memcg: memcg to charge the page to
5309 * @lrucare: page might be on LRU already
5310 *
5311 * Finalize a charge transaction started by mem_cgroup_try_charge(),
5312 * after page->mapping has been set up. This must happen atomically
5313 * as part of the page instantiation, i.e. under the page table lock
5314 * for anonymous pages, under the page lock for page and swap cache.
5315 *
5316 * In addition, the page must not be on the LRU during the commit, to
5317 * prevent racing with task migration. If it might be, use @lrucare.
5318 *
5319 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
5320 */
5321 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5322 bool lrucare, bool compound)
5323 {
5324 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5325
5326 VM_BUG_ON_PAGE(!page->mapping, page);
5327 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
5328
5329 if (mem_cgroup_disabled())
5330 return;
5331 /*
5332 * Swap faults will attempt to charge the same page multiple
5333 * times. But reuse_swap_page() might have removed the page
5334 * from swapcache already, so we can't check PageSwapCache().
5335 */
5336 if (!memcg)
5337 return;
5338
5339 commit_charge(page, memcg, lrucare);
5340
5341 local_irq_disable();
5342 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
5343 memcg_check_events(memcg, page);
5344 local_irq_enable();
5345
5346 if (do_memsw_account() && PageSwapCache(page)) {
5347 swp_entry_t entry = { .val = page_private(page) };
5348 /*
5349 * The swap entry might not get freed for a long time,
5350 * let's not wait for it. The page already received a
5351 * memory+swap charge, drop the swap entry duplicate.
5352 */
5353 mem_cgroup_uncharge_swap(entry);
5354 }
5355 }
5356
5357 /**
5358 * mem_cgroup_cancel_charge - cancel a page charge
5359 * @page: page to charge
5360 * @memcg: memcg to charge the page to
5361 *
5362 * Cancel a charge transaction started by mem_cgroup_try_charge().
5363 */
5364 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
5365 bool compound)
5366 {
5367 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5368
5369 if (mem_cgroup_disabled())
5370 return;
5371 /*
5372 * Swap faults will attempt to charge the same page multiple
5373 * times. But reuse_swap_page() might have removed the page
5374 * from swapcache already, so we can't check PageSwapCache().
5375 */
5376 if (!memcg)
5377 return;
5378
5379 cancel_charge(memcg, nr_pages);
5380 }
5381
5382 static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
5383 unsigned long nr_anon, unsigned long nr_file,
5384 unsigned long nr_huge, struct page *dummy_page)
5385 {
5386 unsigned long nr_pages = nr_anon + nr_file;
5387 unsigned long flags;
5388
5389 if (!mem_cgroup_is_root(memcg)) {
5390 page_counter_uncharge(&memcg->memory, nr_pages);
5391 if (do_memsw_account())
5392 page_counter_uncharge(&memcg->memsw, nr_pages);
5393 memcg_oom_recover(memcg);
5394 }
5395
5396 local_irq_save(flags);
5397 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
5398 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
5399 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
5400 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
5401 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
5402 memcg_check_events(memcg, dummy_page);
5403 local_irq_restore(flags);
5404
5405 if (!mem_cgroup_is_root(memcg))
5406 css_put_many(&memcg->css, nr_pages);
5407 }
5408
5409 static void uncharge_list(struct list_head *page_list)
5410 {
5411 struct mem_cgroup *memcg = NULL;
5412 unsigned long nr_anon = 0;
5413 unsigned long nr_file = 0;
5414 unsigned long nr_huge = 0;
5415 unsigned long pgpgout = 0;
5416 struct list_head *next;
5417 struct page *page;
5418
5419 /*
5420 * Note that the list can be a single page->lru; hence the
5421 * do-while loop instead of a simple list_for_each_entry().
5422 */
5423 next = page_list->next;
5424 do {
5425 unsigned int nr_pages = 1;
5426
5427 page = list_entry(next, struct page, lru);
5428 next = page->lru.next;
5429
5430 VM_BUG_ON_PAGE(PageLRU(page), page);
5431 VM_BUG_ON_PAGE(page_count(page), page);
5432
5433 if (!page->mem_cgroup)
5434 continue;
5435
5436 /*
5437 * Nobody should be changing or seriously looking at
5438 * page->mem_cgroup at this point, we have fully
5439 * exclusive access to the page.
5440 */
5441
5442 if (memcg != page->mem_cgroup) {
5443 if (memcg) {
5444 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5445 nr_huge, page);
5446 pgpgout = nr_anon = nr_file = nr_huge = 0;
5447 }
5448 memcg = page->mem_cgroup;
5449 }
5450
5451 if (PageTransHuge(page)) {
5452 nr_pages <<= compound_order(page);
5453 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5454 nr_huge += nr_pages;
5455 }
5456
5457 if (PageAnon(page))
5458 nr_anon += nr_pages;
5459 else
5460 nr_file += nr_pages;
5461
5462 page->mem_cgroup = NULL;
5463
5464 pgpgout++;
5465 } while (next != page_list);
5466
5467 if (memcg)
5468 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5469 nr_huge, page);
5470 }
5471
5472 /**
5473 * mem_cgroup_uncharge - uncharge a page
5474 * @page: page to uncharge
5475 *
5476 * Uncharge a page previously charged with mem_cgroup_try_charge() and
5477 * mem_cgroup_commit_charge().
5478 */
5479 void mem_cgroup_uncharge(struct page *page)
5480 {
5481 if (mem_cgroup_disabled())
5482 return;
5483
5484 /* Don't touch page->lru of any random page, pre-check: */
5485 if (!page->mem_cgroup)
5486 return;
5487
5488 INIT_LIST_HEAD(&page->lru);
5489 uncharge_list(&page->lru);
5490 }
5491
5492 /**
5493 * mem_cgroup_uncharge_list - uncharge a list of page
5494 * @page_list: list of pages to uncharge
5495 *
5496 * Uncharge a list of pages previously charged with
5497 * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
5498 */
5499 void mem_cgroup_uncharge_list(struct list_head *page_list)
5500 {
5501 if (mem_cgroup_disabled())
5502 return;
5503
5504 if (!list_empty(page_list))
5505 uncharge_list(page_list);
5506 }
5507
5508 /**
5509 * mem_cgroup_migrate - charge a page's replacement
5510 * @oldpage: currently circulating page
5511 * @newpage: replacement page
5512 *
5513 * Charge @newpage as a replacement page for @oldpage. @oldpage will
5514 * be uncharged upon free.
5515 *
5516 * Both pages must be locked, @newpage->mapping must be set up.
5517 */
5518 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
5519 {
5520 struct mem_cgroup *memcg;
5521 unsigned int nr_pages;
5522 bool compound;
5523
5524 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
5525 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
5526 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
5527 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
5528 newpage);
5529
5530 if (mem_cgroup_disabled())
5531 return;
5532
5533 /* Page cache replacement: new page already charged? */
5534 if (newpage->mem_cgroup)
5535 return;
5536
5537 /* Swapcache readahead pages can get replaced before being charged */
5538 memcg = oldpage->mem_cgroup;
5539 if (!memcg)
5540 return;
5541
5542 /* Force-charge the new page. The old one will be freed soon */
5543 compound = PageTransHuge(newpage);
5544 nr_pages = compound ? hpage_nr_pages(newpage) : 1;
5545
5546 page_counter_charge(&memcg->memory, nr_pages);
5547 if (do_memsw_account())
5548 page_counter_charge(&memcg->memsw, nr_pages);
5549 css_get_many(&memcg->css, nr_pages);
5550
5551 commit_charge(newpage, memcg, false);
5552
5553 local_irq_disable();
5554 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
5555 memcg_check_events(memcg, newpage);
5556 local_irq_enable();
5557 }
5558
5559 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
5560 EXPORT_SYMBOL(memcg_sockets_enabled_key);
5561
5562 void sock_update_memcg(struct sock *sk)
5563 {
5564 struct mem_cgroup *memcg;
5565
5566 /* Socket cloning can throw us here with sk_cgrp already
5567 * filled. It won't however, necessarily happen from
5568 * process context. So the test for root memcg given
5569 * the current task's memcg won't help us in this case.
5570 *
5571 * Respecting the original socket's memcg is a better
5572 * decision in this case.
5573 */
5574 if (sk->sk_memcg) {
5575 BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
5576 css_get(&sk->sk_memcg->css);
5577 return;
5578 }
5579
5580 rcu_read_lock();
5581 memcg = mem_cgroup_from_task(current);
5582 if (memcg == root_mem_cgroup)
5583 goto out;
5584 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
5585 goto out;
5586 if (css_tryget_online(&memcg->css))
5587 sk->sk_memcg = memcg;
5588 out:
5589 rcu_read_unlock();
5590 }
5591 EXPORT_SYMBOL(sock_update_memcg);
5592
5593 void sock_release_memcg(struct sock *sk)
5594 {
5595 WARN_ON(!sk->sk_memcg);
5596 css_put(&sk->sk_memcg->css);
5597 }
5598
5599 /**
5600 * mem_cgroup_charge_skmem - charge socket memory
5601 * @memcg: memcg to charge
5602 * @nr_pages: number of pages to charge
5603 *
5604 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
5605 * @memcg's configured limit, %false if the charge had to be forced.
5606 */
5607 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5608 {
5609 gfp_t gfp_mask = GFP_KERNEL;
5610
5611 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5612 struct page_counter *fail;
5613
5614 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
5615 memcg->tcpmem_pressure = 0;
5616 return true;
5617 }
5618 page_counter_charge(&memcg->tcpmem, nr_pages);
5619 memcg->tcpmem_pressure = 1;
5620 return false;
5621 }
5622
5623 /* Don't block in the packet receive path */
5624 if (in_softirq())
5625 gfp_mask = GFP_NOWAIT;
5626
5627 this_cpu_add(memcg->stat->count[MEMCG_SOCK], nr_pages);
5628
5629 if (try_charge(memcg, gfp_mask, nr_pages) == 0)
5630 return true;
5631
5632 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
5633 return false;
5634 }
5635
5636 /**
5637 * mem_cgroup_uncharge_skmem - uncharge socket memory
5638 * @memcg - memcg to uncharge
5639 * @nr_pages - number of pages to uncharge
5640 */
5641 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5642 {
5643 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5644 page_counter_uncharge(&memcg->tcpmem, nr_pages);
5645 return;
5646 }
5647
5648 this_cpu_sub(memcg->stat->count[MEMCG_SOCK], nr_pages);
5649
5650 page_counter_uncharge(&memcg->memory, nr_pages);
5651 css_put_many(&memcg->css, nr_pages);
5652 }
5653
5654 static int __init cgroup_memory(char *s)
5655 {
5656 char *token;
5657
5658 while ((token = strsep(&s, ",")) != NULL) {
5659 if (!*token)
5660 continue;
5661 if (!strcmp(token, "nosocket"))
5662 cgroup_memory_nosocket = true;
5663 if (!strcmp(token, "nokmem"))
5664 cgroup_memory_nokmem = true;
5665 }
5666 return 0;
5667 }
5668 __setup("cgroup.memory=", cgroup_memory);
5669
5670 /*
5671 * subsys_initcall() for memory controller.
5672 *
5673 * Some parts like hotcpu_notifier() have to be initialized from this context
5674 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
5675 * everything that doesn't depend on a specific mem_cgroup structure should
5676 * be initialized from here.
5677 */
5678 static int __init mem_cgroup_init(void)
5679 {
5680 int cpu, node;
5681
5682 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
5683
5684 for_each_possible_cpu(cpu)
5685 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5686 drain_local_stock);
5687
5688 for_each_node(node) {
5689 struct mem_cgroup_tree_per_node *rtpn;
5690 int zone;
5691
5692 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
5693 node_online(node) ? node : NUMA_NO_NODE);
5694
5695 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
5696 struct mem_cgroup_tree_per_zone *rtpz;
5697
5698 rtpz = &rtpn->rb_tree_per_zone[zone];
5699 rtpz->rb_root = RB_ROOT;
5700 spin_lock_init(&rtpz->lock);
5701 }
5702 soft_limit_tree.rb_tree_per_node[node] = rtpn;
5703 }
5704
5705 return 0;
5706 }
5707 subsys_initcall(mem_cgroup_init);
5708
5709 #ifdef CONFIG_MEMCG_SWAP
5710 /**
5711 * mem_cgroup_swapout - transfer a memsw charge to swap
5712 * @page: page whose memsw charge to transfer
5713 * @entry: swap entry to move the charge to
5714 *
5715 * Transfer the memsw charge of @page to @entry.
5716 */
5717 void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5718 {
5719 struct mem_cgroup *memcg;
5720 unsigned short oldid;
5721
5722 VM_BUG_ON_PAGE(PageLRU(page), page);
5723 VM_BUG_ON_PAGE(page_count(page), page);
5724
5725 if (!do_memsw_account())
5726 return;
5727
5728 memcg = page->mem_cgroup;
5729
5730 /* Readahead page, never charged */
5731 if (!memcg)
5732 return;
5733
5734 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5735 VM_BUG_ON_PAGE(oldid, page);
5736 mem_cgroup_swap_statistics(memcg, true);
5737
5738 page->mem_cgroup = NULL;
5739
5740 if (!mem_cgroup_is_root(memcg))
5741 page_counter_uncharge(&memcg->memory, 1);
5742
5743 /*
5744 * Interrupts should be disabled here because the caller holds the
5745 * mapping->tree_lock lock which is taken with interrupts-off. It is
5746 * important here to have the interrupts disabled because it is the
5747 * only synchronisation we have for udpating the per-CPU variables.
5748 */
5749 VM_BUG_ON(!irqs_disabled());
5750 mem_cgroup_charge_statistics(memcg, page, false, -1);
5751 memcg_check_events(memcg, page);
5752 }
5753
5754 /*
5755 * mem_cgroup_try_charge_swap - try charging a swap entry
5756 * @page: page being added to swap
5757 * @entry: swap entry to charge
5758 *
5759 * Try to charge @entry to the memcg that @page belongs to.
5760 *
5761 * Returns 0 on success, -ENOMEM on failure.
5762 */
5763 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
5764 {
5765 struct mem_cgroup *memcg;
5766 struct page_counter *counter;
5767 unsigned short oldid;
5768
5769 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
5770 return 0;
5771
5772 memcg = page->mem_cgroup;
5773
5774 /* Readahead page, never charged */
5775 if (!memcg)
5776 return 0;
5777
5778 if (!mem_cgroup_is_root(memcg) &&
5779 !page_counter_try_charge(&memcg->swap, 1, &counter))
5780 return -ENOMEM;
5781
5782 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5783 VM_BUG_ON_PAGE(oldid, page);
5784 mem_cgroup_swap_statistics(memcg, true);
5785
5786 css_get(&memcg->css);
5787 return 0;
5788 }
5789
5790 /**
5791 * mem_cgroup_uncharge_swap - uncharge a swap entry
5792 * @entry: swap entry to uncharge
5793 *
5794 * Drop the swap charge associated with @entry.
5795 */
5796 void mem_cgroup_uncharge_swap(swp_entry_t entry)
5797 {
5798 struct mem_cgroup *memcg;
5799 unsigned short id;
5800
5801 if (!do_swap_account)
5802 return;
5803
5804 id = swap_cgroup_record(entry, 0);
5805 rcu_read_lock();
5806 memcg = mem_cgroup_from_id(id);
5807 if (memcg) {
5808 if (!mem_cgroup_is_root(memcg)) {
5809 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5810 page_counter_uncharge(&memcg->swap, 1);
5811 else
5812 page_counter_uncharge(&memcg->memsw, 1);
5813 }
5814 mem_cgroup_swap_statistics(memcg, false);
5815 css_put(&memcg->css);
5816 }
5817 rcu_read_unlock();
5818 }
5819
5820 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
5821 {
5822 long nr_swap_pages = get_nr_swap_pages();
5823
5824 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5825 return nr_swap_pages;
5826 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
5827 nr_swap_pages = min_t(long, nr_swap_pages,
5828 READ_ONCE(memcg->swap.limit) -
5829 page_counter_read(&memcg->swap));
5830 return nr_swap_pages;
5831 }
5832
5833 bool mem_cgroup_swap_full(struct page *page)
5834 {
5835 struct mem_cgroup *memcg;
5836
5837 VM_BUG_ON_PAGE(!PageLocked(page), page);
5838
5839 if (vm_swap_full())
5840 return true;
5841 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5842 return false;
5843
5844 memcg = page->mem_cgroup;
5845 if (!memcg)
5846 return false;
5847
5848 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
5849 if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit)
5850 return true;
5851
5852 return false;
5853 }
5854
5855 /* for remember boot option*/
5856 #ifdef CONFIG_MEMCG_SWAP_ENABLED
5857 static int really_do_swap_account __initdata = 1;
5858 #else
5859 static int really_do_swap_account __initdata;
5860 #endif
5861
5862 static int __init enable_swap_account(char *s)
5863 {
5864 if (!strcmp(s, "1"))
5865 really_do_swap_account = 1;
5866 else if (!strcmp(s, "0"))
5867 really_do_swap_account = 0;
5868 return 1;
5869 }
5870 __setup("swapaccount=", enable_swap_account);
5871
5872 static u64 swap_current_read(struct cgroup_subsys_state *css,
5873 struct cftype *cft)
5874 {
5875 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5876
5877 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
5878 }
5879
5880 static int swap_max_show(struct seq_file *m, void *v)
5881 {
5882 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5883 unsigned long max = READ_ONCE(memcg->swap.limit);
5884
5885 if (max == PAGE_COUNTER_MAX)
5886 seq_puts(m, "max\n");
5887 else
5888 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5889
5890 return 0;
5891 }
5892
5893 static ssize_t swap_max_write(struct kernfs_open_file *of,
5894 char *buf, size_t nbytes, loff_t off)
5895 {
5896 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5897 unsigned long max;
5898 int err;
5899
5900 buf = strstrip(buf);
5901 err = page_counter_memparse(buf, "max", &max);
5902 if (err)
5903 return err;
5904
5905 mutex_lock(&memcg_limit_mutex);
5906 err = page_counter_limit(&memcg->swap, max);
5907 mutex_unlock(&memcg_limit_mutex);
5908 if (err)
5909 return err;
5910
5911 return nbytes;
5912 }
5913
5914 static struct cftype swap_files[] = {
5915 {
5916 .name = "swap.current",
5917 .flags = CFTYPE_NOT_ON_ROOT,
5918 .read_u64 = swap_current_read,
5919 },
5920 {
5921 .name = "swap.max",
5922 .flags = CFTYPE_NOT_ON_ROOT,
5923 .seq_show = swap_max_show,
5924 .write = swap_max_write,
5925 },
5926 { } /* terminate */
5927 };
5928
5929 static struct cftype memsw_cgroup_files[] = {
5930 {
5931 .name = "memsw.usage_in_bytes",
5932 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
5933 .read_u64 = mem_cgroup_read_u64,
5934 },
5935 {
5936 .name = "memsw.max_usage_in_bytes",
5937 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
5938 .write = mem_cgroup_reset,
5939 .read_u64 = mem_cgroup_read_u64,
5940 },
5941 {
5942 .name = "memsw.limit_in_bytes",
5943 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
5944 .write = mem_cgroup_write,
5945 .read_u64 = mem_cgroup_read_u64,
5946 },
5947 {
5948 .name = "memsw.failcnt",
5949 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
5950 .write = mem_cgroup_reset,
5951 .read_u64 = mem_cgroup_read_u64,
5952 },
5953 { }, /* terminate */
5954 };
5955
5956 static int __init mem_cgroup_swap_init(void)
5957 {
5958 if (!mem_cgroup_disabled() && really_do_swap_account) {
5959 do_swap_account = 1;
5960 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
5961 swap_files));
5962 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
5963 memsw_cgroup_files));
5964 }
5965 return 0;
5966 }
5967 subsys_initcall(mem_cgroup_swap_init);
5968
5969 #endif /* CONFIG_MEMCG_SWAP */
This page took 0.160682 seconds and 5 git commands to generate.