mm: memcontrol: do not uncharge old page in page cache replacement
[deliverable/linux.git] / mm / memcontrol.c
1 /* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
9 * Memory thresholds
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
12 *
13 * Kernel Memory Controller
14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
15 * Authors: Glauber Costa and Suleiman Souhlal
16 *
17 * Native page reclaim
18 * Charge lifetime sanitation
19 * Lockless page tracking & accounting
20 * Unified hierarchy configuration model
21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
22 *
23 * This program is free software; you can redistribute it and/or modify
24 * it under the terms of the GNU General Public License as published by
25 * the Free Software Foundation; either version 2 of the License, or
26 * (at your option) any later version.
27 *
28 * This program is distributed in the hope that it will be useful,
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 * GNU General Public License for more details.
32 */
33
34 #include <linux/page_counter.h>
35 #include <linux/memcontrol.h>
36 #include <linux/cgroup.h>
37 #include <linux/mm.h>
38 #include <linux/hugetlb.h>
39 #include <linux/pagemap.h>
40 #include <linux/smp.h>
41 #include <linux/page-flags.h>
42 #include <linux/backing-dev.h>
43 #include <linux/bit_spinlock.h>
44 #include <linux/rcupdate.h>
45 #include <linux/limits.h>
46 #include <linux/export.h>
47 #include <linux/mutex.h>
48 #include <linux/rbtree.h>
49 #include <linux/slab.h>
50 #include <linux/swap.h>
51 #include <linux/swapops.h>
52 #include <linux/spinlock.h>
53 #include <linux/eventfd.h>
54 #include <linux/poll.h>
55 #include <linux/sort.h>
56 #include <linux/fs.h>
57 #include <linux/seq_file.h>
58 #include <linux/vmpressure.h>
59 #include <linux/mm_inline.h>
60 #include <linux/swap_cgroup.h>
61 #include <linux/cpu.h>
62 #include <linux/oom.h>
63 #include <linux/lockdep.h>
64 #include <linux/file.h>
65 #include <linux/tracehook.h>
66 #include "internal.h"
67 #include <net/sock.h>
68 #include <net/ip.h>
69 #include "slab.h"
70
71 #include <asm/uaccess.h>
72
73 #include <trace/events/vmscan.h>
74
75 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
76 EXPORT_SYMBOL(memory_cgrp_subsys);
77
78 struct mem_cgroup *root_mem_cgroup __read_mostly;
79
80 #define MEM_CGROUP_RECLAIM_RETRIES 5
81
82 /* Socket memory accounting disabled? */
83 static bool cgroup_memory_nosocket;
84
85 /* Kernel memory accounting disabled? */
86 static bool cgroup_memory_nokmem;
87
88 /* Whether the swap controller is active */
89 #ifdef CONFIG_MEMCG_SWAP
90 int do_swap_account __read_mostly;
91 #else
92 #define do_swap_account 0
93 #endif
94
95 /* Whether legacy memory+swap accounting is active */
96 static bool do_memsw_account(void)
97 {
98 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
99 }
100
101 static const char * const mem_cgroup_stat_names[] = {
102 "cache",
103 "rss",
104 "rss_huge",
105 "mapped_file",
106 "dirty",
107 "writeback",
108 "swap",
109 };
110
111 static const char * const mem_cgroup_events_names[] = {
112 "pgpgin",
113 "pgpgout",
114 "pgfault",
115 "pgmajfault",
116 };
117
118 static const char * const mem_cgroup_lru_names[] = {
119 "inactive_anon",
120 "active_anon",
121 "inactive_file",
122 "active_file",
123 "unevictable",
124 };
125
126 #define THRESHOLDS_EVENTS_TARGET 128
127 #define SOFTLIMIT_EVENTS_TARGET 1024
128 #define NUMAINFO_EVENTS_TARGET 1024
129
130 /*
131 * Cgroups above their limits are maintained in a RB-Tree, independent of
132 * their hierarchy representation
133 */
134
135 struct mem_cgroup_tree_per_zone {
136 struct rb_root rb_root;
137 spinlock_t lock;
138 };
139
140 struct mem_cgroup_tree_per_node {
141 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
142 };
143
144 struct mem_cgroup_tree {
145 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
146 };
147
148 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
149
150 /* for OOM */
151 struct mem_cgroup_eventfd_list {
152 struct list_head list;
153 struct eventfd_ctx *eventfd;
154 };
155
156 /*
157 * cgroup_event represents events which userspace want to receive.
158 */
159 struct mem_cgroup_event {
160 /*
161 * memcg which the event belongs to.
162 */
163 struct mem_cgroup *memcg;
164 /*
165 * eventfd to signal userspace about the event.
166 */
167 struct eventfd_ctx *eventfd;
168 /*
169 * Each of these stored in a list by the cgroup.
170 */
171 struct list_head list;
172 /*
173 * register_event() callback will be used to add new userspace
174 * waiter for changes related to this event. Use eventfd_signal()
175 * on eventfd to send notification to userspace.
176 */
177 int (*register_event)(struct mem_cgroup *memcg,
178 struct eventfd_ctx *eventfd, const char *args);
179 /*
180 * unregister_event() callback will be called when userspace closes
181 * the eventfd or on cgroup removing. This callback must be set,
182 * if you want provide notification functionality.
183 */
184 void (*unregister_event)(struct mem_cgroup *memcg,
185 struct eventfd_ctx *eventfd);
186 /*
187 * All fields below needed to unregister event when
188 * userspace closes eventfd.
189 */
190 poll_table pt;
191 wait_queue_head_t *wqh;
192 wait_queue_t wait;
193 struct work_struct remove;
194 };
195
196 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
197 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
198
199 /* Stuffs for move charges at task migration. */
200 /*
201 * Types of charges to be moved.
202 */
203 #define MOVE_ANON 0x1U
204 #define MOVE_FILE 0x2U
205 #define MOVE_MASK (MOVE_ANON | MOVE_FILE)
206
207 /* "mc" and its members are protected by cgroup_mutex */
208 static struct move_charge_struct {
209 spinlock_t lock; /* for from, to */
210 struct mem_cgroup *from;
211 struct mem_cgroup *to;
212 unsigned long flags;
213 unsigned long precharge;
214 unsigned long moved_charge;
215 unsigned long moved_swap;
216 struct task_struct *moving_task; /* a task moving charges */
217 wait_queue_head_t waitq; /* a waitq for other context */
218 } mc = {
219 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
220 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
221 };
222
223 /*
224 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
225 * limit reclaim to prevent infinite loops, if they ever occur.
226 */
227 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
228 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
229
230 enum charge_type {
231 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
232 MEM_CGROUP_CHARGE_TYPE_ANON,
233 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
234 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
235 NR_CHARGE_TYPE,
236 };
237
238 /* for encoding cft->private value on file */
239 enum res_type {
240 _MEM,
241 _MEMSWAP,
242 _OOM_TYPE,
243 _KMEM,
244 _TCP,
245 };
246
247 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
248 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
249 #define MEMFILE_ATTR(val) ((val) & 0xffff)
250 /* Used for OOM nofiier */
251 #define OOM_CONTROL (0)
252
253 /* Some nice accessors for the vmpressure. */
254 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
255 {
256 if (!memcg)
257 memcg = root_mem_cgroup;
258 return &memcg->vmpressure;
259 }
260
261 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
262 {
263 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
264 }
265
266 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
267 {
268 return (memcg == root_mem_cgroup);
269 }
270
271 /*
272 * We restrict the id in the range of [1, 65535], so it can fit into
273 * an unsigned short.
274 */
275 #define MEM_CGROUP_ID_MAX USHRT_MAX
276
277 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
278 {
279 return memcg->css.id;
280 }
281
282 /*
283 * A helper function to get mem_cgroup from ID. must be called under
284 * rcu_read_lock(). The caller is responsible for calling
285 * css_tryget_online() if the mem_cgroup is used for charging. (dropping
286 * refcnt from swap can be called against removed memcg.)
287 */
288 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
289 {
290 struct cgroup_subsys_state *css;
291
292 css = css_from_id(id, &memory_cgrp_subsys);
293 return mem_cgroup_from_css(css);
294 }
295
296 #ifndef CONFIG_SLOB
297 /*
298 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
299 * The main reason for not using cgroup id for this:
300 * this works better in sparse environments, where we have a lot of memcgs,
301 * but only a few kmem-limited. Or also, if we have, for instance, 200
302 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
303 * 200 entry array for that.
304 *
305 * The current size of the caches array is stored in memcg_nr_cache_ids. It
306 * will double each time we have to increase it.
307 */
308 static DEFINE_IDA(memcg_cache_ida);
309 int memcg_nr_cache_ids;
310
311 /* Protects memcg_nr_cache_ids */
312 static DECLARE_RWSEM(memcg_cache_ids_sem);
313
314 void memcg_get_cache_ids(void)
315 {
316 down_read(&memcg_cache_ids_sem);
317 }
318
319 void memcg_put_cache_ids(void)
320 {
321 up_read(&memcg_cache_ids_sem);
322 }
323
324 /*
325 * MIN_SIZE is different than 1, because we would like to avoid going through
326 * the alloc/free process all the time. In a small machine, 4 kmem-limited
327 * cgroups is a reasonable guess. In the future, it could be a parameter or
328 * tunable, but that is strictly not necessary.
329 *
330 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
331 * this constant directly from cgroup, but it is understandable that this is
332 * better kept as an internal representation in cgroup.c. In any case, the
333 * cgrp_id space is not getting any smaller, and we don't have to necessarily
334 * increase ours as well if it increases.
335 */
336 #define MEMCG_CACHES_MIN_SIZE 4
337 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
338
339 /*
340 * A lot of the calls to the cache allocation functions are expected to be
341 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
342 * conditional to this static branch, we'll have to allow modules that does
343 * kmem_cache_alloc and the such to see this symbol as well
344 */
345 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
346 EXPORT_SYMBOL(memcg_kmem_enabled_key);
347
348 #endif /* !CONFIG_SLOB */
349
350 static struct mem_cgroup_per_zone *
351 mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
352 {
353 int nid = zone_to_nid(zone);
354 int zid = zone_idx(zone);
355
356 return &memcg->nodeinfo[nid]->zoneinfo[zid];
357 }
358
359 /**
360 * mem_cgroup_css_from_page - css of the memcg associated with a page
361 * @page: page of interest
362 *
363 * If memcg is bound to the default hierarchy, css of the memcg associated
364 * with @page is returned. The returned css remains associated with @page
365 * until it is released.
366 *
367 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
368 * is returned.
369 */
370 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
371 {
372 struct mem_cgroup *memcg;
373
374 memcg = page->mem_cgroup;
375
376 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
377 memcg = root_mem_cgroup;
378
379 return &memcg->css;
380 }
381
382 /**
383 * page_cgroup_ino - return inode number of the memcg a page is charged to
384 * @page: the page
385 *
386 * Look up the closest online ancestor of the memory cgroup @page is charged to
387 * and return its inode number or 0 if @page is not charged to any cgroup. It
388 * is safe to call this function without holding a reference to @page.
389 *
390 * Note, this function is inherently racy, because there is nothing to prevent
391 * the cgroup inode from getting torn down and potentially reallocated a moment
392 * after page_cgroup_ino() returns, so it only should be used by callers that
393 * do not care (such as procfs interfaces).
394 */
395 ino_t page_cgroup_ino(struct page *page)
396 {
397 struct mem_cgroup *memcg;
398 unsigned long ino = 0;
399
400 rcu_read_lock();
401 memcg = READ_ONCE(page->mem_cgroup);
402 while (memcg && !(memcg->css.flags & CSS_ONLINE))
403 memcg = parent_mem_cgroup(memcg);
404 if (memcg)
405 ino = cgroup_ino(memcg->css.cgroup);
406 rcu_read_unlock();
407 return ino;
408 }
409
410 static struct mem_cgroup_per_zone *
411 mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page)
412 {
413 int nid = page_to_nid(page);
414 int zid = page_zonenum(page);
415
416 return &memcg->nodeinfo[nid]->zoneinfo[zid];
417 }
418
419 static struct mem_cgroup_tree_per_zone *
420 soft_limit_tree_node_zone(int nid, int zid)
421 {
422 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
423 }
424
425 static struct mem_cgroup_tree_per_zone *
426 soft_limit_tree_from_page(struct page *page)
427 {
428 int nid = page_to_nid(page);
429 int zid = page_zonenum(page);
430
431 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
432 }
433
434 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_zone *mz,
435 struct mem_cgroup_tree_per_zone *mctz,
436 unsigned long new_usage_in_excess)
437 {
438 struct rb_node **p = &mctz->rb_root.rb_node;
439 struct rb_node *parent = NULL;
440 struct mem_cgroup_per_zone *mz_node;
441
442 if (mz->on_tree)
443 return;
444
445 mz->usage_in_excess = new_usage_in_excess;
446 if (!mz->usage_in_excess)
447 return;
448 while (*p) {
449 parent = *p;
450 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
451 tree_node);
452 if (mz->usage_in_excess < mz_node->usage_in_excess)
453 p = &(*p)->rb_left;
454 /*
455 * We can't avoid mem cgroups that are over their soft
456 * limit by the same amount
457 */
458 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
459 p = &(*p)->rb_right;
460 }
461 rb_link_node(&mz->tree_node, parent, p);
462 rb_insert_color(&mz->tree_node, &mctz->rb_root);
463 mz->on_tree = true;
464 }
465
466 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
467 struct mem_cgroup_tree_per_zone *mctz)
468 {
469 if (!mz->on_tree)
470 return;
471 rb_erase(&mz->tree_node, &mctz->rb_root);
472 mz->on_tree = false;
473 }
474
475 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
476 struct mem_cgroup_tree_per_zone *mctz)
477 {
478 unsigned long flags;
479
480 spin_lock_irqsave(&mctz->lock, flags);
481 __mem_cgroup_remove_exceeded(mz, mctz);
482 spin_unlock_irqrestore(&mctz->lock, flags);
483 }
484
485 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
486 {
487 unsigned long nr_pages = page_counter_read(&memcg->memory);
488 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
489 unsigned long excess = 0;
490
491 if (nr_pages > soft_limit)
492 excess = nr_pages - soft_limit;
493
494 return excess;
495 }
496
497 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
498 {
499 unsigned long excess;
500 struct mem_cgroup_per_zone *mz;
501 struct mem_cgroup_tree_per_zone *mctz;
502
503 mctz = soft_limit_tree_from_page(page);
504 /*
505 * Necessary to update all ancestors when hierarchy is used.
506 * because their event counter is not touched.
507 */
508 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
509 mz = mem_cgroup_page_zoneinfo(memcg, page);
510 excess = soft_limit_excess(memcg);
511 /*
512 * We have to update the tree if mz is on RB-tree or
513 * mem is over its softlimit.
514 */
515 if (excess || mz->on_tree) {
516 unsigned long flags;
517
518 spin_lock_irqsave(&mctz->lock, flags);
519 /* if on-tree, remove it */
520 if (mz->on_tree)
521 __mem_cgroup_remove_exceeded(mz, mctz);
522 /*
523 * Insert again. mz->usage_in_excess will be updated.
524 * If excess is 0, no tree ops.
525 */
526 __mem_cgroup_insert_exceeded(mz, mctz, excess);
527 spin_unlock_irqrestore(&mctz->lock, flags);
528 }
529 }
530 }
531
532 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
533 {
534 struct mem_cgroup_tree_per_zone *mctz;
535 struct mem_cgroup_per_zone *mz;
536 int nid, zid;
537
538 for_each_node(nid) {
539 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
540 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
541 mctz = soft_limit_tree_node_zone(nid, zid);
542 mem_cgroup_remove_exceeded(mz, mctz);
543 }
544 }
545 }
546
547 static struct mem_cgroup_per_zone *
548 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
549 {
550 struct rb_node *rightmost = NULL;
551 struct mem_cgroup_per_zone *mz;
552
553 retry:
554 mz = NULL;
555 rightmost = rb_last(&mctz->rb_root);
556 if (!rightmost)
557 goto done; /* Nothing to reclaim from */
558
559 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
560 /*
561 * Remove the node now but someone else can add it back,
562 * we will to add it back at the end of reclaim to its correct
563 * position in the tree.
564 */
565 __mem_cgroup_remove_exceeded(mz, mctz);
566 if (!soft_limit_excess(mz->memcg) ||
567 !css_tryget_online(&mz->memcg->css))
568 goto retry;
569 done:
570 return mz;
571 }
572
573 static struct mem_cgroup_per_zone *
574 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
575 {
576 struct mem_cgroup_per_zone *mz;
577
578 spin_lock_irq(&mctz->lock);
579 mz = __mem_cgroup_largest_soft_limit_node(mctz);
580 spin_unlock_irq(&mctz->lock);
581 return mz;
582 }
583
584 /*
585 * Return page count for single (non recursive) @memcg.
586 *
587 * Implementation Note: reading percpu statistics for memcg.
588 *
589 * Both of vmstat[] and percpu_counter has threshold and do periodic
590 * synchronization to implement "quick" read. There are trade-off between
591 * reading cost and precision of value. Then, we may have a chance to implement
592 * a periodic synchronization of counter in memcg's counter.
593 *
594 * But this _read() function is used for user interface now. The user accounts
595 * memory usage by memory cgroup and he _always_ requires exact value because
596 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
597 * have to visit all online cpus and make sum. So, for now, unnecessary
598 * synchronization is not implemented. (just implemented for cpu hotplug)
599 *
600 * If there are kernel internal actions which can make use of some not-exact
601 * value, and reading all cpu value can be performance bottleneck in some
602 * common workload, threshold and synchronization as vmstat[] should be
603 * implemented.
604 */
605 static unsigned long
606 mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
607 {
608 long val = 0;
609 int cpu;
610
611 /* Per-cpu values can be negative, use a signed accumulator */
612 for_each_possible_cpu(cpu)
613 val += per_cpu(memcg->stat->count[idx], cpu);
614 /*
615 * Summing races with updates, so val may be negative. Avoid exposing
616 * transient negative values.
617 */
618 if (val < 0)
619 val = 0;
620 return val;
621 }
622
623 static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
624 enum mem_cgroup_events_index idx)
625 {
626 unsigned long val = 0;
627 int cpu;
628
629 for_each_possible_cpu(cpu)
630 val += per_cpu(memcg->stat->events[idx], cpu);
631 return val;
632 }
633
634 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
635 struct page *page,
636 bool compound, int nr_pages)
637 {
638 /*
639 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
640 * counted as CACHE even if it's on ANON LRU.
641 */
642 if (PageAnon(page))
643 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
644 nr_pages);
645 else
646 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
647 nr_pages);
648
649 if (compound) {
650 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
651 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
652 nr_pages);
653 }
654
655 /* pagein of a big page is an event. So, ignore page size */
656 if (nr_pages > 0)
657 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
658 else {
659 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
660 nr_pages = -nr_pages; /* for event */
661 }
662
663 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
664 }
665
666 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
667 int nid,
668 unsigned int lru_mask)
669 {
670 unsigned long nr = 0;
671 int zid;
672
673 VM_BUG_ON((unsigned)nid >= nr_node_ids);
674
675 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
676 struct mem_cgroup_per_zone *mz;
677 enum lru_list lru;
678
679 for_each_lru(lru) {
680 if (!(BIT(lru) & lru_mask))
681 continue;
682 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
683 nr += mz->lru_size[lru];
684 }
685 }
686 return nr;
687 }
688
689 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
690 unsigned int lru_mask)
691 {
692 unsigned long nr = 0;
693 int nid;
694
695 for_each_node_state(nid, N_MEMORY)
696 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
697 return nr;
698 }
699
700 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
701 enum mem_cgroup_events_target target)
702 {
703 unsigned long val, next;
704
705 val = __this_cpu_read(memcg->stat->nr_page_events);
706 next = __this_cpu_read(memcg->stat->targets[target]);
707 /* from time_after() in jiffies.h */
708 if ((long)next - (long)val < 0) {
709 switch (target) {
710 case MEM_CGROUP_TARGET_THRESH:
711 next = val + THRESHOLDS_EVENTS_TARGET;
712 break;
713 case MEM_CGROUP_TARGET_SOFTLIMIT:
714 next = val + SOFTLIMIT_EVENTS_TARGET;
715 break;
716 case MEM_CGROUP_TARGET_NUMAINFO:
717 next = val + NUMAINFO_EVENTS_TARGET;
718 break;
719 default:
720 break;
721 }
722 __this_cpu_write(memcg->stat->targets[target], next);
723 return true;
724 }
725 return false;
726 }
727
728 /*
729 * Check events in order.
730 *
731 */
732 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
733 {
734 /* threshold event is triggered in finer grain than soft limit */
735 if (unlikely(mem_cgroup_event_ratelimit(memcg,
736 MEM_CGROUP_TARGET_THRESH))) {
737 bool do_softlimit;
738 bool do_numainfo __maybe_unused;
739
740 do_softlimit = mem_cgroup_event_ratelimit(memcg,
741 MEM_CGROUP_TARGET_SOFTLIMIT);
742 #if MAX_NUMNODES > 1
743 do_numainfo = mem_cgroup_event_ratelimit(memcg,
744 MEM_CGROUP_TARGET_NUMAINFO);
745 #endif
746 mem_cgroup_threshold(memcg);
747 if (unlikely(do_softlimit))
748 mem_cgroup_update_tree(memcg, page);
749 #if MAX_NUMNODES > 1
750 if (unlikely(do_numainfo))
751 atomic_inc(&memcg->numainfo_events);
752 #endif
753 }
754 }
755
756 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
757 {
758 /*
759 * mm_update_next_owner() may clear mm->owner to NULL
760 * if it races with swapoff, page migration, etc.
761 * So this can be called with p == NULL.
762 */
763 if (unlikely(!p))
764 return NULL;
765
766 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
767 }
768 EXPORT_SYMBOL(mem_cgroup_from_task);
769
770 static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
771 {
772 struct mem_cgroup *memcg = NULL;
773
774 rcu_read_lock();
775 do {
776 /*
777 * Page cache insertions can happen withou an
778 * actual mm context, e.g. during disk probing
779 * on boot, loopback IO, acct() writes etc.
780 */
781 if (unlikely(!mm))
782 memcg = root_mem_cgroup;
783 else {
784 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
785 if (unlikely(!memcg))
786 memcg = root_mem_cgroup;
787 }
788 } while (!css_tryget_online(&memcg->css));
789 rcu_read_unlock();
790 return memcg;
791 }
792
793 /**
794 * mem_cgroup_iter - iterate over memory cgroup hierarchy
795 * @root: hierarchy root
796 * @prev: previously returned memcg, NULL on first invocation
797 * @reclaim: cookie for shared reclaim walks, NULL for full walks
798 *
799 * Returns references to children of the hierarchy below @root, or
800 * @root itself, or %NULL after a full round-trip.
801 *
802 * Caller must pass the return value in @prev on subsequent
803 * invocations for reference counting, or use mem_cgroup_iter_break()
804 * to cancel a hierarchy walk before the round-trip is complete.
805 *
806 * Reclaimers can specify a zone and a priority level in @reclaim to
807 * divide up the memcgs in the hierarchy among all concurrent
808 * reclaimers operating on the same zone and priority.
809 */
810 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
811 struct mem_cgroup *prev,
812 struct mem_cgroup_reclaim_cookie *reclaim)
813 {
814 struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
815 struct cgroup_subsys_state *css = NULL;
816 struct mem_cgroup *memcg = NULL;
817 struct mem_cgroup *pos = NULL;
818
819 if (mem_cgroup_disabled())
820 return NULL;
821
822 if (!root)
823 root = root_mem_cgroup;
824
825 if (prev && !reclaim)
826 pos = prev;
827
828 if (!root->use_hierarchy && root != root_mem_cgroup) {
829 if (prev)
830 goto out;
831 return root;
832 }
833
834 rcu_read_lock();
835
836 if (reclaim) {
837 struct mem_cgroup_per_zone *mz;
838
839 mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone);
840 iter = &mz->iter[reclaim->priority];
841
842 if (prev && reclaim->generation != iter->generation)
843 goto out_unlock;
844
845 while (1) {
846 pos = READ_ONCE(iter->position);
847 if (!pos || css_tryget(&pos->css))
848 break;
849 /*
850 * css reference reached zero, so iter->position will
851 * be cleared by ->css_released. However, we should not
852 * rely on this happening soon, because ->css_released
853 * is called from a work queue, and by busy-waiting we
854 * might block it. So we clear iter->position right
855 * away.
856 */
857 (void)cmpxchg(&iter->position, pos, NULL);
858 }
859 }
860
861 if (pos)
862 css = &pos->css;
863
864 for (;;) {
865 css = css_next_descendant_pre(css, &root->css);
866 if (!css) {
867 /*
868 * Reclaimers share the hierarchy walk, and a
869 * new one might jump in right at the end of
870 * the hierarchy - make sure they see at least
871 * one group and restart from the beginning.
872 */
873 if (!prev)
874 continue;
875 break;
876 }
877
878 /*
879 * Verify the css and acquire a reference. The root
880 * is provided by the caller, so we know it's alive
881 * and kicking, and don't take an extra reference.
882 */
883 memcg = mem_cgroup_from_css(css);
884
885 if (css == &root->css)
886 break;
887
888 if (css_tryget(css))
889 break;
890
891 memcg = NULL;
892 }
893
894 if (reclaim) {
895 /*
896 * The position could have already been updated by a competing
897 * thread, so check that the value hasn't changed since we read
898 * it to avoid reclaiming from the same cgroup twice.
899 */
900 (void)cmpxchg(&iter->position, pos, memcg);
901
902 if (pos)
903 css_put(&pos->css);
904
905 if (!memcg)
906 iter->generation++;
907 else if (!prev)
908 reclaim->generation = iter->generation;
909 }
910
911 out_unlock:
912 rcu_read_unlock();
913 out:
914 if (prev && prev != root)
915 css_put(&prev->css);
916
917 return memcg;
918 }
919
920 /**
921 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
922 * @root: hierarchy root
923 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
924 */
925 void mem_cgroup_iter_break(struct mem_cgroup *root,
926 struct mem_cgroup *prev)
927 {
928 if (!root)
929 root = root_mem_cgroup;
930 if (prev && prev != root)
931 css_put(&prev->css);
932 }
933
934 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
935 {
936 struct mem_cgroup *memcg = dead_memcg;
937 struct mem_cgroup_reclaim_iter *iter;
938 struct mem_cgroup_per_zone *mz;
939 int nid, zid;
940 int i;
941
942 while ((memcg = parent_mem_cgroup(memcg))) {
943 for_each_node(nid) {
944 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
945 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
946 for (i = 0; i <= DEF_PRIORITY; i++) {
947 iter = &mz->iter[i];
948 cmpxchg(&iter->position,
949 dead_memcg, NULL);
950 }
951 }
952 }
953 }
954 }
955
956 /*
957 * Iteration constructs for visiting all cgroups (under a tree). If
958 * loops are exited prematurely (break), mem_cgroup_iter_break() must
959 * be used for reference counting.
960 */
961 #define for_each_mem_cgroup_tree(iter, root) \
962 for (iter = mem_cgroup_iter(root, NULL, NULL); \
963 iter != NULL; \
964 iter = mem_cgroup_iter(root, iter, NULL))
965
966 #define for_each_mem_cgroup(iter) \
967 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
968 iter != NULL; \
969 iter = mem_cgroup_iter(NULL, iter, NULL))
970
971 /**
972 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
973 * @zone: zone of the wanted lruvec
974 * @memcg: memcg of the wanted lruvec
975 *
976 * Returns the lru list vector holding pages for the given @zone and
977 * @mem. This can be the global zone lruvec, if the memory controller
978 * is disabled.
979 */
980 struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
981 struct mem_cgroup *memcg)
982 {
983 struct mem_cgroup_per_zone *mz;
984 struct lruvec *lruvec;
985
986 if (mem_cgroup_disabled()) {
987 lruvec = &zone->lruvec;
988 goto out;
989 }
990
991 mz = mem_cgroup_zone_zoneinfo(memcg, zone);
992 lruvec = &mz->lruvec;
993 out:
994 /*
995 * Since a node can be onlined after the mem_cgroup was created,
996 * we have to be prepared to initialize lruvec->zone here;
997 * and if offlined then reonlined, we need to reinitialize it.
998 */
999 if (unlikely(lruvec->zone != zone))
1000 lruvec->zone = zone;
1001 return lruvec;
1002 }
1003
1004 /**
1005 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
1006 * @page: the page
1007 * @zone: zone of the page
1008 *
1009 * This function is only safe when following the LRU page isolation
1010 * and putback protocol: the LRU lock must be held, and the page must
1011 * either be PageLRU() or the caller must have isolated/allocated it.
1012 */
1013 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
1014 {
1015 struct mem_cgroup_per_zone *mz;
1016 struct mem_cgroup *memcg;
1017 struct lruvec *lruvec;
1018
1019 if (mem_cgroup_disabled()) {
1020 lruvec = &zone->lruvec;
1021 goto out;
1022 }
1023
1024 memcg = page->mem_cgroup;
1025 /*
1026 * Swapcache readahead pages are added to the LRU - and
1027 * possibly migrated - before they are charged.
1028 */
1029 if (!memcg)
1030 memcg = root_mem_cgroup;
1031
1032 mz = mem_cgroup_page_zoneinfo(memcg, page);
1033 lruvec = &mz->lruvec;
1034 out:
1035 /*
1036 * Since a node can be onlined after the mem_cgroup was created,
1037 * we have to be prepared to initialize lruvec->zone here;
1038 * and if offlined then reonlined, we need to reinitialize it.
1039 */
1040 if (unlikely(lruvec->zone != zone))
1041 lruvec->zone = zone;
1042 return lruvec;
1043 }
1044
1045 /**
1046 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1047 * @lruvec: mem_cgroup per zone lru vector
1048 * @lru: index of lru list the page is sitting on
1049 * @nr_pages: positive when adding or negative when removing
1050 *
1051 * This function must be called when a page is added to or removed from an
1052 * lru list.
1053 */
1054 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1055 int nr_pages)
1056 {
1057 struct mem_cgroup_per_zone *mz;
1058 unsigned long *lru_size;
1059
1060 if (mem_cgroup_disabled())
1061 return;
1062
1063 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1064 lru_size = mz->lru_size + lru;
1065 *lru_size += nr_pages;
1066 VM_BUG_ON((long)(*lru_size) < 0);
1067 }
1068
1069 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
1070 {
1071 struct mem_cgroup *task_memcg;
1072 struct task_struct *p;
1073 bool ret;
1074
1075 p = find_lock_task_mm(task);
1076 if (p) {
1077 task_memcg = get_mem_cgroup_from_mm(p->mm);
1078 task_unlock(p);
1079 } else {
1080 /*
1081 * All threads may have already detached their mm's, but the oom
1082 * killer still needs to detect if they have already been oom
1083 * killed to prevent needlessly killing additional tasks.
1084 */
1085 rcu_read_lock();
1086 task_memcg = mem_cgroup_from_task(task);
1087 css_get(&task_memcg->css);
1088 rcu_read_unlock();
1089 }
1090 ret = mem_cgroup_is_descendant(task_memcg, memcg);
1091 css_put(&task_memcg->css);
1092 return ret;
1093 }
1094
1095 /**
1096 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1097 * @memcg: the memory cgroup
1098 *
1099 * Returns the maximum amount of memory @mem can be charged with, in
1100 * pages.
1101 */
1102 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1103 {
1104 unsigned long margin = 0;
1105 unsigned long count;
1106 unsigned long limit;
1107
1108 count = page_counter_read(&memcg->memory);
1109 limit = READ_ONCE(memcg->memory.limit);
1110 if (count < limit)
1111 margin = limit - count;
1112
1113 if (do_memsw_account()) {
1114 count = page_counter_read(&memcg->memsw);
1115 limit = READ_ONCE(memcg->memsw.limit);
1116 if (count <= limit)
1117 margin = min(margin, limit - count);
1118 }
1119
1120 return margin;
1121 }
1122
1123 /*
1124 * A routine for checking "mem" is under move_account() or not.
1125 *
1126 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1127 * moving cgroups. This is for waiting at high-memory pressure
1128 * caused by "move".
1129 */
1130 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1131 {
1132 struct mem_cgroup *from;
1133 struct mem_cgroup *to;
1134 bool ret = false;
1135 /*
1136 * Unlike task_move routines, we access mc.to, mc.from not under
1137 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1138 */
1139 spin_lock(&mc.lock);
1140 from = mc.from;
1141 to = mc.to;
1142 if (!from)
1143 goto unlock;
1144
1145 ret = mem_cgroup_is_descendant(from, memcg) ||
1146 mem_cgroup_is_descendant(to, memcg);
1147 unlock:
1148 spin_unlock(&mc.lock);
1149 return ret;
1150 }
1151
1152 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1153 {
1154 if (mc.moving_task && current != mc.moving_task) {
1155 if (mem_cgroup_under_move(memcg)) {
1156 DEFINE_WAIT(wait);
1157 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1158 /* moving charge context might have finished. */
1159 if (mc.moving_task)
1160 schedule();
1161 finish_wait(&mc.waitq, &wait);
1162 return true;
1163 }
1164 }
1165 return false;
1166 }
1167
1168 #define K(x) ((x) << (PAGE_SHIFT-10))
1169 /**
1170 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
1171 * @memcg: The memory cgroup that went over limit
1172 * @p: Task that is going to be killed
1173 *
1174 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1175 * enabled
1176 */
1177 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1178 {
1179 /* oom_info_lock ensures that parallel ooms do not interleave */
1180 static DEFINE_MUTEX(oom_info_lock);
1181 struct mem_cgroup *iter;
1182 unsigned int i;
1183
1184 mutex_lock(&oom_info_lock);
1185 rcu_read_lock();
1186
1187 if (p) {
1188 pr_info("Task in ");
1189 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1190 pr_cont(" killed as a result of limit of ");
1191 } else {
1192 pr_info("Memory limit reached of cgroup ");
1193 }
1194
1195 pr_cont_cgroup_path(memcg->css.cgroup);
1196 pr_cont("\n");
1197
1198 rcu_read_unlock();
1199
1200 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1201 K((u64)page_counter_read(&memcg->memory)),
1202 K((u64)memcg->memory.limit), memcg->memory.failcnt);
1203 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1204 K((u64)page_counter_read(&memcg->memsw)),
1205 K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
1206 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1207 K((u64)page_counter_read(&memcg->kmem)),
1208 K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
1209
1210 for_each_mem_cgroup_tree(iter, memcg) {
1211 pr_info("Memory cgroup stats for ");
1212 pr_cont_cgroup_path(iter->css.cgroup);
1213 pr_cont(":");
1214
1215 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1216 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1217 continue;
1218 pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
1219 K(mem_cgroup_read_stat(iter, i)));
1220 }
1221
1222 for (i = 0; i < NR_LRU_LISTS; i++)
1223 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1224 K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1225
1226 pr_cont("\n");
1227 }
1228 mutex_unlock(&oom_info_lock);
1229 }
1230
1231 /*
1232 * This function returns the number of memcg under hierarchy tree. Returns
1233 * 1(self count) if no children.
1234 */
1235 static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1236 {
1237 int num = 0;
1238 struct mem_cgroup *iter;
1239
1240 for_each_mem_cgroup_tree(iter, memcg)
1241 num++;
1242 return num;
1243 }
1244
1245 /*
1246 * Return the memory (and swap, if configured) limit for a memcg.
1247 */
1248 static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
1249 {
1250 unsigned long limit;
1251
1252 limit = memcg->memory.limit;
1253 if (mem_cgroup_swappiness(memcg)) {
1254 unsigned long memsw_limit;
1255 unsigned long swap_limit;
1256
1257 memsw_limit = memcg->memsw.limit;
1258 swap_limit = memcg->swap.limit;
1259 swap_limit = min(swap_limit, (unsigned long)total_swap_pages);
1260 limit = min(limit + swap_limit, memsw_limit);
1261 }
1262 return limit;
1263 }
1264
1265 static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1266 int order)
1267 {
1268 struct oom_control oc = {
1269 .zonelist = NULL,
1270 .nodemask = NULL,
1271 .gfp_mask = gfp_mask,
1272 .order = order,
1273 };
1274 struct mem_cgroup *iter;
1275 unsigned long chosen_points = 0;
1276 unsigned long totalpages;
1277 unsigned int points = 0;
1278 struct task_struct *chosen = NULL;
1279
1280 mutex_lock(&oom_lock);
1281
1282 /*
1283 * If current has a pending SIGKILL or is exiting, then automatically
1284 * select it. The goal is to allow it to allocate so that it may
1285 * quickly exit and free its memory.
1286 */
1287 if (fatal_signal_pending(current) || task_will_free_mem(current)) {
1288 mark_oom_victim(current);
1289 goto unlock;
1290 }
1291
1292 check_panic_on_oom(&oc, CONSTRAINT_MEMCG, memcg);
1293 totalpages = mem_cgroup_get_limit(memcg) ? : 1;
1294 for_each_mem_cgroup_tree(iter, memcg) {
1295 struct css_task_iter it;
1296 struct task_struct *task;
1297
1298 css_task_iter_start(&iter->css, &it);
1299 while ((task = css_task_iter_next(&it))) {
1300 switch (oom_scan_process_thread(&oc, task, totalpages)) {
1301 case OOM_SCAN_SELECT:
1302 if (chosen)
1303 put_task_struct(chosen);
1304 chosen = task;
1305 chosen_points = ULONG_MAX;
1306 get_task_struct(chosen);
1307 /* fall through */
1308 case OOM_SCAN_CONTINUE:
1309 continue;
1310 case OOM_SCAN_ABORT:
1311 css_task_iter_end(&it);
1312 mem_cgroup_iter_break(memcg, iter);
1313 if (chosen)
1314 put_task_struct(chosen);
1315 goto unlock;
1316 case OOM_SCAN_OK:
1317 break;
1318 };
1319 points = oom_badness(task, memcg, NULL, totalpages);
1320 if (!points || points < chosen_points)
1321 continue;
1322 /* Prefer thread group leaders for display purposes */
1323 if (points == chosen_points &&
1324 thread_group_leader(chosen))
1325 continue;
1326
1327 if (chosen)
1328 put_task_struct(chosen);
1329 chosen = task;
1330 chosen_points = points;
1331 get_task_struct(chosen);
1332 }
1333 css_task_iter_end(&it);
1334 }
1335
1336 if (chosen) {
1337 points = chosen_points * 1000 / totalpages;
1338 oom_kill_process(&oc, chosen, points, totalpages, memcg,
1339 "Memory cgroup out of memory");
1340 }
1341 unlock:
1342 mutex_unlock(&oom_lock);
1343 }
1344
1345 #if MAX_NUMNODES > 1
1346
1347 /**
1348 * test_mem_cgroup_node_reclaimable
1349 * @memcg: the target memcg
1350 * @nid: the node ID to be checked.
1351 * @noswap : specify true here if the user wants flle only information.
1352 *
1353 * This function returns whether the specified memcg contains any
1354 * reclaimable pages on a node. Returns true if there are any reclaimable
1355 * pages in the node.
1356 */
1357 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1358 int nid, bool noswap)
1359 {
1360 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1361 return true;
1362 if (noswap || !total_swap_pages)
1363 return false;
1364 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1365 return true;
1366 return false;
1367
1368 }
1369
1370 /*
1371 * Always updating the nodemask is not very good - even if we have an empty
1372 * list or the wrong list here, we can start from some node and traverse all
1373 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1374 *
1375 */
1376 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1377 {
1378 int nid;
1379 /*
1380 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1381 * pagein/pageout changes since the last update.
1382 */
1383 if (!atomic_read(&memcg->numainfo_events))
1384 return;
1385 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1386 return;
1387
1388 /* make a nodemask where this memcg uses memory from */
1389 memcg->scan_nodes = node_states[N_MEMORY];
1390
1391 for_each_node_mask(nid, node_states[N_MEMORY]) {
1392
1393 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1394 node_clear(nid, memcg->scan_nodes);
1395 }
1396
1397 atomic_set(&memcg->numainfo_events, 0);
1398 atomic_set(&memcg->numainfo_updating, 0);
1399 }
1400
1401 /*
1402 * Selecting a node where we start reclaim from. Because what we need is just
1403 * reducing usage counter, start from anywhere is O,K. Considering
1404 * memory reclaim from current node, there are pros. and cons.
1405 *
1406 * Freeing memory from current node means freeing memory from a node which
1407 * we'll use or we've used. So, it may make LRU bad. And if several threads
1408 * hit limits, it will see a contention on a node. But freeing from remote
1409 * node means more costs for memory reclaim because of memory latency.
1410 *
1411 * Now, we use round-robin. Better algorithm is welcomed.
1412 */
1413 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1414 {
1415 int node;
1416
1417 mem_cgroup_may_update_nodemask(memcg);
1418 node = memcg->last_scanned_node;
1419
1420 node = next_node(node, memcg->scan_nodes);
1421 if (node == MAX_NUMNODES)
1422 node = first_node(memcg->scan_nodes);
1423 /*
1424 * We call this when we hit limit, not when pages are added to LRU.
1425 * No LRU may hold pages because all pages are UNEVICTABLE or
1426 * memcg is too small and all pages are not on LRU. In that case,
1427 * we use curret node.
1428 */
1429 if (unlikely(node == MAX_NUMNODES))
1430 node = numa_node_id();
1431
1432 memcg->last_scanned_node = node;
1433 return node;
1434 }
1435 #else
1436 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1437 {
1438 return 0;
1439 }
1440 #endif
1441
1442 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1443 struct zone *zone,
1444 gfp_t gfp_mask,
1445 unsigned long *total_scanned)
1446 {
1447 struct mem_cgroup *victim = NULL;
1448 int total = 0;
1449 int loop = 0;
1450 unsigned long excess;
1451 unsigned long nr_scanned;
1452 struct mem_cgroup_reclaim_cookie reclaim = {
1453 .zone = zone,
1454 .priority = 0,
1455 };
1456
1457 excess = soft_limit_excess(root_memcg);
1458
1459 while (1) {
1460 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1461 if (!victim) {
1462 loop++;
1463 if (loop >= 2) {
1464 /*
1465 * If we have not been able to reclaim
1466 * anything, it might because there are
1467 * no reclaimable pages under this hierarchy
1468 */
1469 if (!total)
1470 break;
1471 /*
1472 * We want to do more targeted reclaim.
1473 * excess >> 2 is not to excessive so as to
1474 * reclaim too much, nor too less that we keep
1475 * coming back to reclaim from this cgroup
1476 */
1477 if (total >= (excess >> 2) ||
1478 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1479 break;
1480 }
1481 continue;
1482 }
1483 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
1484 zone, &nr_scanned);
1485 *total_scanned += nr_scanned;
1486 if (!soft_limit_excess(root_memcg))
1487 break;
1488 }
1489 mem_cgroup_iter_break(root_memcg, victim);
1490 return total;
1491 }
1492
1493 #ifdef CONFIG_LOCKDEP
1494 static struct lockdep_map memcg_oom_lock_dep_map = {
1495 .name = "memcg_oom_lock",
1496 };
1497 #endif
1498
1499 static DEFINE_SPINLOCK(memcg_oom_lock);
1500
1501 /*
1502 * Check OOM-Killer is already running under our hierarchy.
1503 * If someone is running, return false.
1504 */
1505 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1506 {
1507 struct mem_cgroup *iter, *failed = NULL;
1508
1509 spin_lock(&memcg_oom_lock);
1510
1511 for_each_mem_cgroup_tree(iter, memcg) {
1512 if (iter->oom_lock) {
1513 /*
1514 * this subtree of our hierarchy is already locked
1515 * so we cannot give a lock.
1516 */
1517 failed = iter;
1518 mem_cgroup_iter_break(memcg, iter);
1519 break;
1520 } else
1521 iter->oom_lock = true;
1522 }
1523
1524 if (failed) {
1525 /*
1526 * OK, we failed to lock the whole subtree so we have
1527 * to clean up what we set up to the failing subtree
1528 */
1529 for_each_mem_cgroup_tree(iter, memcg) {
1530 if (iter == failed) {
1531 mem_cgroup_iter_break(memcg, iter);
1532 break;
1533 }
1534 iter->oom_lock = false;
1535 }
1536 } else
1537 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1538
1539 spin_unlock(&memcg_oom_lock);
1540
1541 return !failed;
1542 }
1543
1544 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1545 {
1546 struct mem_cgroup *iter;
1547
1548 spin_lock(&memcg_oom_lock);
1549 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
1550 for_each_mem_cgroup_tree(iter, memcg)
1551 iter->oom_lock = false;
1552 spin_unlock(&memcg_oom_lock);
1553 }
1554
1555 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1556 {
1557 struct mem_cgroup *iter;
1558
1559 spin_lock(&memcg_oom_lock);
1560 for_each_mem_cgroup_tree(iter, memcg)
1561 iter->under_oom++;
1562 spin_unlock(&memcg_oom_lock);
1563 }
1564
1565 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1566 {
1567 struct mem_cgroup *iter;
1568
1569 /*
1570 * When a new child is created while the hierarchy is under oom,
1571 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
1572 */
1573 spin_lock(&memcg_oom_lock);
1574 for_each_mem_cgroup_tree(iter, memcg)
1575 if (iter->under_oom > 0)
1576 iter->under_oom--;
1577 spin_unlock(&memcg_oom_lock);
1578 }
1579
1580 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1581
1582 struct oom_wait_info {
1583 struct mem_cgroup *memcg;
1584 wait_queue_t wait;
1585 };
1586
1587 static int memcg_oom_wake_function(wait_queue_t *wait,
1588 unsigned mode, int sync, void *arg)
1589 {
1590 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1591 struct mem_cgroup *oom_wait_memcg;
1592 struct oom_wait_info *oom_wait_info;
1593
1594 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1595 oom_wait_memcg = oom_wait_info->memcg;
1596
1597 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1598 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1599 return 0;
1600 return autoremove_wake_function(wait, mode, sync, arg);
1601 }
1602
1603 static void memcg_oom_recover(struct mem_cgroup *memcg)
1604 {
1605 /*
1606 * For the following lockless ->under_oom test, the only required
1607 * guarantee is that it must see the state asserted by an OOM when
1608 * this function is called as a result of userland actions
1609 * triggered by the notification of the OOM. This is trivially
1610 * achieved by invoking mem_cgroup_mark_under_oom() before
1611 * triggering notification.
1612 */
1613 if (memcg && memcg->under_oom)
1614 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1615 }
1616
1617 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1618 {
1619 if (!current->memcg_may_oom)
1620 return;
1621 /*
1622 * We are in the middle of the charge context here, so we
1623 * don't want to block when potentially sitting on a callstack
1624 * that holds all kinds of filesystem and mm locks.
1625 *
1626 * Also, the caller may handle a failed allocation gracefully
1627 * (like optional page cache readahead) and so an OOM killer
1628 * invocation might not even be necessary.
1629 *
1630 * That's why we don't do anything here except remember the
1631 * OOM context and then deal with it at the end of the page
1632 * fault when the stack is unwound, the locks are released,
1633 * and when we know whether the fault was overall successful.
1634 */
1635 css_get(&memcg->css);
1636 current->memcg_in_oom = memcg;
1637 current->memcg_oom_gfp_mask = mask;
1638 current->memcg_oom_order = order;
1639 }
1640
1641 /**
1642 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1643 * @handle: actually kill/wait or just clean up the OOM state
1644 *
1645 * This has to be called at the end of a page fault if the memcg OOM
1646 * handler was enabled.
1647 *
1648 * Memcg supports userspace OOM handling where failed allocations must
1649 * sleep on a waitqueue until the userspace task resolves the
1650 * situation. Sleeping directly in the charge context with all kinds
1651 * of locks held is not a good idea, instead we remember an OOM state
1652 * in the task and mem_cgroup_oom_synchronize() has to be called at
1653 * the end of the page fault to complete the OOM handling.
1654 *
1655 * Returns %true if an ongoing memcg OOM situation was detected and
1656 * completed, %false otherwise.
1657 */
1658 bool mem_cgroup_oom_synchronize(bool handle)
1659 {
1660 struct mem_cgroup *memcg = current->memcg_in_oom;
1661 struct oom_wait_info owait;
1662 bool locked;
1663
1664 /* OOM is global, do not handle */
1665 if (!memcg)
1666 return false;
1667
1668 if (!handle || oom_killer_disabled)
1669 goto cleanup;
1670
1671 owait.memcg = memcg;
1672 owait.wait.flags = 0;
1673 owait.wait.func = memcg_oom_wake_function;
1674 owait.wait.private = current;
1675 INIT_LIST_HEAD(&owait.wait.task_list);
1676
1677 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1678 mem_cgroup_mark_under_oom(memcg);
1679
1680 locked = mem_cgroup_oom_trylock(memcg);
1681
1682 if (locked)
1683 mem_cgroup_oom_notify(memcg);
1684
1685 if (locked && !memcg->oom_kill_disable) {
1686 mem_cgroup_unmark_under_oom(memcg);
1687 finish_wait(&memcg_oom_waitq, &owait.wait);
1688 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1689 current->memcg_oom_order);
1690 } else {
1691 schedule();
1692 mem_cgroup_unmark_under_oom(memcg);
1693 finish_wait(&memcg_oom_waitq, &owait.wait);
1694 }
1695
1696 if (locked) {
1697 mem_cgroup_oom_unlock(memcg);
1698 /*
1699 * There is no guarantee that an OOM-lock contender
1700 * sees the wakeups triggered by the OOM kill
1701 * uncharges. Wake any sleepers explicitely.
1702 */
1703 memcg_oom_recover(memcg);
1704 }
1705 cleanup:
1706 current->memcg_in_oom = NULL;
1707 css_put(&memcg->css);
1708 return true;
1709 }
1710
1711 /**
1712 * mem_cgroup_begin_page_stat - begin a page state statistics transaction
1713 * @page: page that is going to change accounted state
1714 *
1715 * This function must mark the beginning of an accounted page state
1716 * change to prevent double accounting when the page is concurrently
1717 * being moved to another memcg:
1718 *
1719 * memcg = mem_cgroup_begin_page_stat(page);
1720 * if (TestClearPageState(page))
1721 * mem_cgroup_update_page_stat(memcg, state, -1);
1722 * mem_cgroup_end_page_stat(memcg);
1723 */
1724 struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page)
1725 {
1726 struct mem_cgroup *memcg;
1727 unsigned long flags;
1728
1729 /*
1730 * The RCU lock is held throughout the transaction. The fast
1731 * path can get away without acquiring the memcg->move_lock
1732 * because page moving starts with an RCU grace period.
1733 *
1734 * The RCU lock also protects the memcg from being freed when
1735 * the page state that is going to change is the only thing
1736 * preventing the page from being uncharged.
1737 * E.g. end-writeback clearing PageWriteback(), which allows
1738 * migration to go ahead and uncharge the page before the
1739 * account transaction might be complete.
1740 */
1741 rcu_read_lock();
1742
1743 if (mem_cgroup_disabled())
1744 return NULL;
1745 again:
1746 memcg = page->mem_cgroup;
1747 if (unlikely(!memcg))
1748 return NULL;
1749
1750 if (atomic_read(&memcg->moving_account) <= 0)
1751 return memcg;
1752
1753 spin_lock_irqsave(&memcg->move_lock, flags);
1754 if (memcg != page->mem_cgroup) {
1755 spin_unlock_irqrestore(&memcg->move_lock, flags);
1756 goto again;
1757 }
1758
1759 /*
1760 * When charge migration first begins, we can have locked and
1761 * unlocked page stat updates happening concurrently. Track
1762 * the task who has the lock for mem_cgroup_end_page_stat().
1763 */
1764 memcg->move_lock_task = current;
1765 memcg->move_lock_flags = flags;
1766
1767 return memcg;
1768 }
1769 EXPORT_SYMBOL(mem_cgroup_begin_page_stat);
1770
1771 /**
1772 * mem_cgroup_end_page_stat - finish a page state statistics transaction
1773 * @memcg: the memcg that was accounted against
1774 */
1775 void mem_cgroup_end_page_stat(struct mem_cgroup *memcg)
1776 {
1777 if (memcg && memcg->move_lock_task == current) {
1778 unsigned long flags = memcg->move_lock_flags;
1779
1780 memcg->move_lock_task = NULL;
1781 memcg->move_lock_flags = 0;
1782
1783 spin_unlock_irqrestore(&memcg->move_lock, flags);
1784 }
1785
1786 rcu_read_unlock();
1787 }
1788 EXPORT_SYMBOL(mem_cgroup_end_page_stat);
1789
1790 /*
1791 * size of first charge trial. "32" comes from vmscan.c's magic value.
1792 * TODO: maybe necessary to use big numbers in big irons.
1793 */
1794 #define CHARGE_BATCH 32U
1795 struct memcg_stock_pcp {
1796 struct mem_cgroup *cached; /* this never be root cgroup */
1797 unsigned int nr_pages;
1798 struct work_struct work;
1799 unsigned long flags;
1800 #define FLUSHING_CACHED_CHARGE 0
1801 };
1802 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1803 static DEFINE_MUTEX(percpu_charge_mutex);
1804
1805 /**
1806 * consume_stock: Try to consume stocked charge on this cpu.
1807 * @memcg: memcg to consume from.
1808 * @nr_pages: how many pages to charge.
1809 *
1810 * The charges will only happen if @memcg matches the current cpu's memcg
1811 * stock, and at least @nr_pages are available in that stock. Failure to
1812 * service an allocation will refill the stock.
1813 *
1814 * returns true if successful, false otherwise.
1815 */
1816 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1817 {
1818 struct memcg_stock_pcp *stock;
1819 bool ret = false;
1820
1821 if (nr_pages > CHARGE_BATCH)
1822 return ret;
1823
1824 stock = &get_cpu_var(memcg_stock);
1825 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
1826 stock->nr_pages -= nr_pages;
1827 ret = true;
1828 }
1829 put_cpu_var(memcg_stock);
1830 return ret;
1831 }
1832
1833 /*
1834 * Returns stocks cached in percpu and reset cached information.
1835 */
1836 static void drain_stock(struct memcg_stock_pcp *stock)
1837 {
1838 struct mem_cgroup *old = stock->cached;
1839
1840 if (stock->nr_pages) {
1841 page_counter_uncharge(&old->memory, stock->nr_pages);
1842 if (do_memsw_account())
1843 page_counter_uncharge(&old->memsw, stock->nr_pages);
1844 css_put_many(&old->css, stock->nr_pages);
1845 stock->nr_pages = 0;
1846 }
1847 stock->cached = NULL;
1848 }
1849
1850 /*
1851 * This must be called under preempt disabled or must be called by
1852 * a thread which is pinned to local cpu.
1853 */
1854 static void drain_local_stock(struct work_struct *dummy)
1855 {
1856 struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
1857 drain_stock(stock);
1858 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1859 }
1860
1861 /*
1862 * Cache charges(val) to local per_cpu area.
1863 * This will be consumed by consume_stock() function, later.
1864 */
1865 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1866 {
1867 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
1868
1869 if (stock->cached != memcg) { /* reset if necessary */
1870 drain_stock(stock);
1871 stock->cached = memcg;
1872 }
1873 stock->nr_pages += nr_pages;
1874 put_cpu_var(memcg_stock);
1875 }
1876
1877 /*
1878 * Drains all per-CPU charge caches for given root_memcg resp. subtree
1879 * of the hierarchy under it.
1880 */
1881 static void drain_all_stock(struct mem_cgroup *root_memcg)
1882 {
1883 int cpu, curcpu;
1884
1885 /* If someone's already draining, avoid adding running more workers. */
1886 if (!mutex_trylock(&percpu_charge_mutex))
1887 return;
1888 /* Notify other cpus that system-wide "drain" is running */
1889 get_online_cpus();
1890 curcpu = get_cpu();
1891 for_each_online_cpu(cpu) {
1892 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1893 struct mem_cgroup *memcg;
1894
1895 memcg = stock->cached;
1896 if (!memcg || !stock->nr_pages)
1897 continue;
1898 if (!mem_cgroup_is_descendant(memcg, root_memcg))
1899 continue;
1900 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1901 if (cpu == curcpu)
1902 drain_local_stock(&stock->work);
1903 else
1904 schedule_work_on(cpu, &stock->work);
1905 }
1906 }
1907 put_cpu();
1908 put_online_cpus();
1909 mutex_unlock(&percpu_charge_mutex);
1910 }
1911
1912 static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
1913 unsigned long action,
1914 void *hcpu)
1915 {
1916 int cpu = (unsigned long)hcpu;
1917 struct memcg_stock_pcp *stock;
1918
1919 if (action == CPU_ONLINE)
1920 return NOTIFY_OK;
1921
1922 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1923 return NOTIFY_OK;
1924
1925 stock = &per_cpu(memcg_stock, cpu);
1926 drain_stock(stock);
1927 return NOTIFY_OK;
1928 }
1929
1930 static void reclaim_high(struct mem_cgroup *memcg,
1931 unsigned int nr_pages,
1932 gfp_t gfp_mask)
1933 {
1934 do {
1935 if (page_counter_read(&memcg->memory) <= memcg->high)
1936 continue;
1937 mem_cgroup_events(memcg, MEMCG_HIGH, 1);
1938 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
1939 } while ((memcg = parent_mem_cgroup(memcg)));
1940 }
1941
1942 static void high_work_func(struct work_struct *work)
1943 {
1944 struct mem_cgroup *memcg;
1945
1946 memcg = container_of(work, struct mem_cgroup, high_work);
1947 reclaim_high(memcg, CHARGE_BATCH, GFP_KERNEL);
1948 }
1949
1950 /*
1951 * Scheduled by try_charge() to be executed from the userland return path
1952 * and reclaims memory over the high limit.
1953 */
1954 void mem_cgroup_handle_over_high(void)
1955 {
1956 unsigned int nr_pages = current->memcg_nr_pages_over_high;
1957 struct mem_cgroup *memcg;
1958
1959 if (likely(!nr_pages))
1960 return;
1961
1962 memcg = get_mem_cgroup_from_mm(current->mm);
1963 reclaim_high(memcg, nr_pages, GFP_KERNEL);
1964 css_put(&memcg->css);
1965 current->memcg_nr_pages_over_high = 0;
1966 }
1967
1968 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
1969 unsigned int nr_pages)
1970 {
1971 unsigned int batch = max(CHARGE_BATCH, nr_pages);
1972 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1973 struct mem_cgroup *mem_over_limit;
1974 struct page_counter *counter;
1975 unsigned long nr_reclaimed;
1976 bool may_swap = true;
1977 bool drained = false;
1978
1979 if (mem_cgroup_is_root(memcg))
1980 return 0;
1981 retry:
1982 if (consume_stock(memcg, nr_pages))
1983 return 0;
1984
1985 if (!do_memsw_account() ||
1986 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
1987 if (page_counter_try_charge(&memcg->memory, batch, &counter))
1988 goto done_restock;
1989 if (do_memsw_account())
1990 page_counter_uncharge(&memcg->memsw, batch);
1991 mem_over_limit = mem_cgroup_from_counter(counter, memory);
1992 } else {
1993 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
1994 may_swap = false;
1995 }
1996
1997 if (batch > nr_pages) {
1998 batch = nr_pages;
1999 goto retry;
2000 }
2001
2002 /*
2003 * Unlike in global OOM situations, memcg is not in a physical
2004 * memory shortage. Allow dying and OOM-killed tasks to
2005 * bypass the last charges so that they can exit quickly and
2006 * free their memory.
2007 */
2008 if (unlikely(test_thread_flag(TIF_MEMDIE) ||
2009 fatal_signal_pending(current) ||
2010 current->flags & PF_EXITING))
2011 goto force;
2012
2013 if (unlikely(task_in_memcg_oom(current)))
2014 goto nomem;
2015
2016 if (!gfpflags_allow_blocking(gfp_mask))
2017 goto nomem;
2018
2019 mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1);
2020
2021 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2022 gfp_mask, may_swap);
2023
2024 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2025 goto retry;
2026
2027 if (!drained) {
2028 drain_all_stock(mem_over_limit);
2029 drained = true;
2030 goto retry;
2031 }
2032
2033 if (gfp_mask & __GFP_NORETRY)
2034 goto nomem;
2035 /*
2036 * Even though the limit is exceeded at this point, reclaim
2037 * may have been able to free some pages. Retry the charge
2038 * before killing the task.
2039 *
2040 * Only for regular pages, though: huge pages are rather
2041 * unlikely to succeed so close to the limit, and we fall back
2042 * to regular pages anyway in case of failure.
2043 */
2044 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2045 goto retry;
2046 /*
2047 * At task move, charge accounts can be doubly counted. So, it's
2048 * better to wait until the end of task_move if something is going on.
2049 */
2050 if (mem_cgroup_wait_acct_move(mem_over_limit))
2051 goto retry;
2052
2053 if (nr_retries--)
2054 goto retry;
2055
2056 if (gfp_mask & __GFP_NOFAIL)
2057 goto force;
2058
2059 if (fatal_signal_pending(current))
2060 goto force;
2061
2062 mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1);
2063
2064 mem_cgroup_oom(mem_over_limit, gfp_mask,
2065 get_order(nr_pages * PAGE_SIZE));
2066 nomem:
2067 if (!(gfp_mask & __GFP_NOFAIL))
2068 return -ENOMEM;
2069 force:
2070 /*
2071 * The allocation either can't fail or will lead to more memory
2072 * being freed very soon. Allow memory usage go over the limit
2073 * temporarily by force charging it.
2074 */
2075 page_counter_charge(&memcg->memory, nr_pages);
2076 if (do_memsw_account())
2077 page_counter_charge(&memcg->memsw, nr_pages);
2078 css_get_many(&memcg->css, nr_pages);
2079
2080 return 0;
2081
2082 done_restock:
2083 css_get_many(&memcg->css, batch);
2084 if (batch > nr_pages)
2085 refill_stock(memcg, batch - nr_pages);
2086
2087 /*
2088 * If the hierarchy is above the normal consumption range, schedule
2089 * reclaim on returning to userland. We can perform reclaim here
2090 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2091 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2092 * not recorded as it most likely matches current's and won't
2093 * change in the meantime. As high limit is checked again before
2094 * reclaim, the cost of mismatch is negligible.
2095 */
2096 do {
2097 if (page_counter_read(&memcg->memory) > memcg->high) {
2098 /* Don't bother a random interrupted task */
2099 if (in_interrupt()) {
2100 schedule_work(&memcg->high_work);
2101 break;
2102 }
2103 current->memcg_nr_pages_over_high += batch;
2104 set_notify_resume(current);
2105 break;
2106 }
2107 } while ((memcg = parent_mem_cgroup(memcg)));
2108
2109 return 0;
2110 }
2111
2112 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2113 {
2114 if (mem_cgroup_is_root(memcg))
2115 return;
2116
2117 page_counter_uncharge(&memcg->memory, nr_pages);
2118 if (do_memsw_account())
2119 page_counter_uncharge(&memcg->memsw, nr_pages);
2120
2121 css_put_many(&memcg->css, nr_pages);
2122 }
2123
2124 static void lock_page_lru(struct page *page, int *isolated)
2125 {
2126 struct zone *zone = page_zone(page);
2127
2128 spin_lock_irq(&zone->lru_lock);
2129 if (PageLRU(page)) {
2130 struct lruvec *lruvec;
2131
2132 lruvec = mem_cgroup_page_lruvec(page, zone);
2133 ClearPageLRU(page);
2134 del_page_from_lru_list(page, lruvec, page_lru(page));
2135 *isolated = 1;
2136 } else
2137 *isolated = 0;
2138 }
2139
2140 static void unlock_page_lru(struct page *page, int isolated)
2141 {
2142 struct zone *zone = page_zone(page);
2143
2144 if (isolated) {
2145 struct lruvec *lruvec;
2146
2147 lruvec = mem_cgroup_page_lruvec(page, zone);
2148 VM_BUG_ON_PAGE(PageLRU(page), page);
2149 SetPageLRU(page);
2150 add_page_to_lru_list(page, lruvec, page_lru(page));
2151 }
2152 spin_unlock_irq(&zone->lru_lock);
2153 }
2154
2155 static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2156 bool lrucare)
2157 {
2158 int isolated;
2159
2160 VM_BUG_ON_PAGE(page->mem_cgroup, page);
2161
2162 /*
2163 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2164 * may already be on some other mem_cgroup's LRU. Take care of it.
2165 */
2166 if (lrucare)
2167 lock_page_lru(page, &isolated);
2168
2169 /*
2170 * Nobody should be changing or seriously looking at
2171 * page->mem_cgroup at this point:
2172 *
2173 * - the page is uncharged
2174 *
2175 * - the page is off-LRU
2176 *
2177 * - an anonymous fault has exclusive page access, except for
2178 * a locked page table
2179 *
2180 * - a page cache insertion, a swapin fault, or a migration
2181 * have the page locked
2182 */
2183 page->mem_cgroup = memcg;
2184
2185 if (lrucare)
2186 unlock_page_lru(page, isolated);
2187 }
2188
2189 #ifndef CONFIG_SLOB
2190 static int memcg_alloc_cache_id(void)
2191 {
2192 int id, size;
2193 int err;
2194
2195 id = ida_simple_get(&memcg_cache_ida,
2196 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2197 if (id < 0)
2198 return id;
2199
2200 if (id < memcg_nr_cache_ids)
2201 return id;
2202
2203 /*
2204 * There's no space for the new id in memcg_caches arrays,
2205 * so we have to grow them.
2206 */
2207 down_write(&memcg_cache_ids_sem);
2208
2209 size = 2 * (id + 1);
2210 if (size < MEMCG_CACHES_MIN_SIZE)
2211 size = MEMCG_CACHES_MIN_SIZE;
2212 else if (size > MEMCG_CACHES_MAX_SIZE)
2213 size = MEMCG_CACHES_MAX_SIZE;
2214
2215 err = memcg_update_all_caches(size);
2216 if (!err)
2217 err = memcg_update_all_list_lrus(size);
2218 if (!err)
2219 memcg_nr_cache_ids = size;
2220
2221 up_write(&memcg_cache_ids_sem);
2222
2223 if (err) {
2224 ida_simple_remove(&memcg_cache_ida, id);
2225 return err;
2226 }
2227 return id;
2228 }
2229
2230 static void memcg_free_cache_id(int id)
2231 {
2232 ida_simple_remove(&memcg_cache_ida, id);
2233 }
2234
2235 struct memcg_kmem_cache_create_work {
2236 struct mem_cgroup *memcg;
2237 struct kmem_cache *cachep;
2238 struct work_struct work;
2239 };
2240
2241 static void memcg_kmem_cache_create_func(struct work_struct *w)
2242 {
2243 struct memcg_kmem_cache_create_work *cw =
2244 container_of(w, struct memcg_kmem_cache_create_work, work);
2245 struct mem_cgroup *memcg = cw->memcg;
2246 struct kmem_cache *cachep = cw->cachep;
2247
2248 memcg_create_kmem_cache(memcg, cachep);
2249
2250 css_put(&memcg->css);
2251 kfree(cw);
2252 }
2253
2254 /*
2255 * Enqueue the creation of a per-memcg kmem_cache.
2256 */
2257 static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2258 struct kmem_cache *cachep)
2259 {
2260 struct memcg_kmem_cache_create_work *cw;
2261
2262 cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
2263 if (!cw)
2264 return;
2265
2266 css_get(&memcg->css);
2267
2268 cw->memcg = memcg;
2269 cw->cachep = cachep;
2270 INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
2271
2272 schedule_work(&cw->work);
2273 }
2274
2275 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2276 struct kmem_cache *cachep)
2277 {
2278 /*
2279 * We need to stop accounting when we kmalloc, because if the
2280 * corresponding kmalloc cache is not yet created, the first allocation
2281 * in __memcg_schedule_kmem_cache_create will recurse.
2282 *
2283 * However, it is better to enclose the whole function. Depending on
2284 * the debugging options enabled, INIT_WORK(), for instance, can
2285 * trigger an allocation. This too, will make us recurse. Because at
2286 * this point we can't allow ourselves back into memcg_kmem_get_cache,
2287 * the safest choice is to do it like this, wrapping the whole function.
2288 */
2289 current->memcg_kmem_skip_account = 1;
2290 __memcg_schedule_kmem_cache_create(memcg, cachep);
2291 current->memcg_kmem_skip_account = 0;
2292 }
2293
2294 /*
2295 * Return the kmem_cache we're supposed to use for a slab allocation.
2296 * We try to use the current memcg's version of the cache.
2297 *
2298 * If the cache does not exist yet, if we are the first user of it,
2299 * we either create it immediately, if possible, or create it asynchronously
2300 * in a workqueue.
2301 * In the latter case, we will let the current allocation go through with
2302 * the original cache.
2303 *
2304 * Can't be called in interrupt context or from kernel threads.
2305 * This function needs to be called with rcu_read_lock() held.
2306 */
2307 struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
2308 {
2309 struct mem_cgroup *memcg;
2310 struct kmem_cache *memcg_cachep;
2311 int kmemcg_id;
2312
2313 VM_BUG_ON(!is_root_cache(cachep));
2314
2315 if (cachep->flags & SLAB_ACCOUNT)
2316 gfp |= __GFP_ACCOUNT;
2317
2318 if (!(gfp & __GFP_ACCOUNT))
2319 return cachep;
2320
2321 if (current->memcg_kmem_skip_account)
2322 return cachep;
2323
2324 memcg = get_mem_cgroup_from_mm(current->mm);
2325 kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2326 if (kmemcg_id < 0)
2327 goto out;
2328
2329 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
2330 if (likely(memcg_cachep))
2331 return memcg_cachep;
2332
2333 /*
2334 * If we are in a safe context (can wait, and not in interrupt
2335 * context), we could be be predictable and return right away.
2336 * This would guarantee that the allocation being performed
2337 * already belongs in the new cache.
2338 *
2339 * However, there are some clashes that can arrive from locking.
2340 * For instance, because we acquire the slab_mutex while doing
2341 * memcg_create_kmem_cache, this means no further allocation
2342 * could happen with the slab_mutex held. So it's better to
2343 * defer everything.
2344 */
2345 memcg_schedule_kmem_cache_create(memcg, cachep);
2346 out:
2347 css_put(&memcg->css);
2348 return cachep;
2349 }
2350
2351 void __memcg_kmem_put_cache(struct kmem_cache *cachep)
2352 {
2353 if (!is_root_cache(cachep))
2354 css_put(&cachep->memcg_params.memcg->css);
2355 }
2356
2357 int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2358 struct mem_cgroup *memcg)
2359 {
2360 unsigned int nr_pages = 1 << order;
2361 struct page_counter *counter;
2362 int ret;
2363
2364 if (!memcg_kmem_online(memcg))
2365 return 0;
2366
2367 ret = try_charge(memcg, gfp, nr_pages);
2368 if (ret)
2369 return ret;
2370
2371 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2372 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2373 cancel_charge(memcg, nr_pages);
2374 return -ENOMEM;
2375 }
2376
2377 page->mem_cgroup = memcg;
2378
2379 return 0;
2380 }
2381
2382 int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
2383 {
2384 struct mem_cgroup *memcg;
2385 int ret;
2386
2387 memcg = get_mem_cgroup_from_mm(current->mm);
2388 ret = __memcg_kmem_charge_memcg(page, gfp, order, memcg);
2389 css_put(&memcg->css);
2390 return ret;
2391 }
2392
2393 void __memcg_kmem_uncharge(struct page *page, int order)
2394 {
2395 struct mem_cgroup *memcg = page->mem_cgroup;
2396 unsigned int nr_pages = 1 << order;
2397
2398 if (!memcg)
2399 return;
2400
2401 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
2402
2403 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2404 page_counter_uncharge(&memcg->kmem, nr_pages);
2405
2406 page_counter_uncharge(&memcg->memory, nr_pages);
2407 if (do_memsw_account())
2408 page_counter_uncharge(&memcg->memsw, nr_pages);
2409
2410 page->mem_cgroup = NULL;
2411 css_put_many(&memcg->css, nr_pages);
2412 }
2413 #endif /* !CONFIG_SLOB */
2414
2415 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2416
2417 /*
2418 * Because tail pages are not marked as "used", set it. We're under
2419 * zone->lru_lock and migration entries setup in all page mappings.
2420 */
2421 void mem_cgroup_split_huge_fixup(struct page *head)
2422 {
2423 int i;
2424
2425 if (mem_cgroup_disabled())
2426 return;
2427
2428 for (i = 1; i < HPAGE_PMD_NR; i++)
2429 head[i].mem_cgroup = head->mem_cgroup;
2430
2431 __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
2432 HPAGE_PMD_NR);
2433 }
2434 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2435
2436 #ifdef CONFIG_MEMCG_SWAP
2437 static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
2438 bool charge)
2439 {
2440 int val = (charge) ? 1 : -1;
2441 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
2442 }
2443
2444 /**
2445 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2446 * @entry: swap entry to be moved
2447 * @from: mem_cgroup which the entry is moved from
2448 * @to: mem_cgroup which the entry is moved to
2449 *
2450 * It succeeds only when the swap_cgroup's record for this entry is the same
2451 * as the mem_cgroup's id of @from.
2452 *
2453 * Returns 0 on success, -EINVAL on failure.
2454 *
2455 * The caller must have charged to @to, IOW, called page_counter_charge() about
2456 * both res and memsw, and called css_get().
2457 */
2458 static int mem_cgroup_move_swap_account(swp_entry_t entry,
2459 struct mem_cgroup *from, struct mem_cgroup *to)
2460 {
2461 unsigned short old_id, new_id;
2462
2463 old_id = mem_cgroup_id(from);
2464 new_id = mem_cgroup_id(to);
2465
2466 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2467 mem_cgroup_swap_statistics(from, false);
2468 mem_cgroup_swap_statistics(to, true);
2469 return 0;
2470 }
2471 return -EINVAL;
2472 }
2473 #else
2474 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2475 struct mem_cgroup *from, struct mem_cgroup *to)
2476 {
2477 return -EINVAL;
2478 }
2479 #endif
2480
2481 static DEFINE_MUTEX(memcg_limit_mutex);
2482
2483 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2484 unsigned long limit)
2485 {
2486 unsigned long curusage;
2487 unsigned long oldusage;
2488 bool enlarge = false;
2489 int retry_count;
2490 int ret;
2491
2492 /*
2493 * For keeping hierarchical_reclaim simple, how long we should retry
2494 * is depends on callers. We set our retry-count to be function
2495 * of # of children which we should visit in this loop.
2496 */
2497 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2498 mem_cgroup_count_children(memcg);
2499
2500 oldusage = page_counter_read(&memcg->memory);
2501
2502 do {
2503 if (signal_pending(current)) {
2504 ret = -EINTR;
2505 break;
2506 }
2507
2508 mutex_lock(&memcg_limit_mutex);
2509 if (limit > memcg->memsw.limit) {
2510 mutex_unlock(&memcg_limit_mutex);
2511 ret = -EINVAL;
2512 break;
2513 }
2514 if (limit > memcg->memory.limit)
2515 enlarge = true;
2516 ret = page_counter_limit(&memcg->memory, limit);
2517 mutex_unlock(&memcg_limit_mutex);
2518
2519 if (!ret)
2520 break;
2521
2522 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
2523
2524 curusage = page_counter_read(&memcg->memory);
2525 /* Usage is reduced ? */
2526 if (curusage >= oldusage)
2527 retry_count--;
2528 else
2529 oldusage = curusage;
2530 } while (retry_count);
2531
2532 if (!ret && enlarge)
2533 memcg_oom_recover(memcg);
2534
2535 return ret;
2536 }
2537
2538 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2539 unsigned long limit)
2540 {
2541 unsigned long curusage;
2542 unsigned long oldusage;
2543 bool enlarge = false;
2544 int retry_count;
2545 int ret;
2546
2547 /* see mem_cgroup_resize_res_limit */
2548 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2549 mem_cgroup_count_children(memcg);
2550
2551 oldusage = page_counter_read(&memcg->memsw);
2552
2553 do {
2554 if (signal_pending(current)) {
2555 ret = -EINTR;
2556 break;
2557 }
2558
2559 mutex_lock(&memcg_limit_mutex);
2560 if (limit < memcg->memory.limit) {
2561 mutex_unlock(&memcg_limit_mutex);
2562 ret = -EINVAL;
2563 break;
2564 }
2565 if (limit > memcg->memsw.limit)
2566 enlarge = true;
2567 ret = page_counter_limit(&memcg->memsw, limit);
2568 mutex_unlock(&memcg_limit_mutex);
2569
2570 if (!ret)
2571 break;
2572
2573 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
2574
2575 curusage = page_counter_read(&memcg->memsw);
2576 /* Usage is reduced ? */
2577 if (curusage >= oldusage)
2578 retry_count--;
2579 else
2580 oldusage = curusage;
2581 } while (retry_count);
2582
2583 if (!ret && enlarge)
2584 memcg_oom_recover(memcg);
2585
2586 return ret;
2587 }
2588
2589 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2590 gfp_t gfp_mask,
2591 unsigned long *total_scanned)
2592 {
2593 unsigned long nr_reclaimed = 0;
2594 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
2595 unsigned long reclaimed;
2596 int loop = 0;
2597 struct mem_cgroup_tree_per_zone *mctz;
2598 unsigned long excess;
2599 unsigned long nr_scanned;
2600
2601 if (order > 0)
2602 return 0;
2603
2604 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
2605 /*
2606 * This loop can run a while, specially if mem_cgroup's continuously
2607 * keep exceeding their soft limit and putting the system under
2608 * pressure
2609 */
2610 do {
2611 if (next_mz)
2612 mz = next_mz;
2613 else
2614 mz = mem_cgroup_largest_soft_limit_node(mctz);
2615 if (!mz)
2616 break;
2617
2618 nr_scanned = 0;
2619 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
2620 gfp_mask, &nr_scanned);
2621 nr_reclaimed += reclaimed;
2622 *total_scanned += nr_scanned;
2623 spin_lock_irq(&mctz->lock);
2624 __mem_cgroup_remove_exceeded(mz, mctz);
2625
2626 /*
2627 * If we failed to reclaim anything from this memory cgroup
2628 * it is time to move on to the next cgroup
2629 */
2630 next_mz = NULL;
2631 if (!reclaimed)
2632 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
2633
2634 excess = soft_limit_excess(mz->memcg);
2635 /*
2636 * One school of thought says that we should not add
2637 * back the node to the tree if reclaim returns 0.
2638 * But our reclaim could return 0, simply because due
2639 * to priority we are exposing a smaller subset of
2640 * memory to reclaim from. Consider this as a longer
2641 * term TODO.
2642 */
2643 /* If excess == 0, no tree ops */
2644 __mem_cgroup_insert_exceeded(mz, mctz, excess);
2645 spin_unlock_irq(&mctz->lock);
2646 css_put(&mz->memcg->css);
2647 loop++;
2648 /*
2649 * Could not reclaim anything and there are no more
2650 * mem cgroups to try or we seem to be looping without
2651 * reclaiming anything.
2652 */
2653 if (!nr_reclaimed &&
2654 (next_mz == NULL ||
2655 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2656 break;
2657 } while (!nr_reclaimed);
2658 if (next_mz)
2659 css_put(&next_mz->memcg->css);
2660 return nr_reclaimed;
2661 }
2662
2663 /*
2664 * Test whether @memcg has children, dead or alive. Note that this
2665 * function doesn't care whether @memcg has use_hierarchy enabled and
2666 * returns %true if there are child csses according to the cgroup
2667 * hierarchy. Testing use_hierarchy is the caller's responsiblity.
2668 */
2669 static inline bool memcg_has_children(struct mem_cgroup *memcg)
2670 {
2671 bool ret;
2672
2673 rcu_read_lock();
2674 ret = css_next_child(NULL, &memcg->css);
2675 rcu_read_unlock();
2676 return ret;
2677 }
2678
2679 /*
2680 * Reclaims as many pages from the given memcg as possible and moves
2681 * the rest to the parent.
2682 *
2683 * Caller is responsible for holding css reference for memcg.
2684 */
2685 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
2686 {
2687 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2688
2689 /* we call try-to-free pages for make this cgroup empty */
2690 lru_add_drain_all();
2691 /* try to free all pages in this cgroup */
2692 while (nr_retries && page_counter_read(&memcg->memory)) {
2693 int progress;
2694
2695 if (signal_pending(current))
2696 return -EINTR;
2697
2698 progress = try_to_free_mem_cgroup_pages(memcg, 1,
2699 GFP_KERNEL, true);
2700 if (!progress) {
2701 nr_retries--;
2702 /* maybe some writeback is necessary */
2703 congestion_wait(BLK_RW_ASYNC, HZ/10);
2704 }
2705
2706 }
2707
2708 return 0;
2709 }
2710
2711 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
2712 char *buf, size_t nbytes,
2713 loff_t off)
2714 {
2715 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2716
2717 if (mem_cgroup_is_root(memcg))
2718 return -EINVAL;
2719 return mem_cgroup_force_empty(memcg) ?: nbytes;
2720 }
2721
2722 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
2723 struct cftype *cft)
2724 {
2725 return mem_cgroup_from_css(css)->use_hierarchy;
2726 }
2727
2728 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
2729 struct cftype *cft, u64 val)
2730 {
2731 int retval = 0;
2732 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2733 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
2734
2735 if (memcg->use_hierarchy == val)
2736 return 0;
2737
2738 /*
2739 * If parent's use_hierarchy is set, we can't make any modifications
2740 * in the child subtrees. If it is unset, then the change can
2741 * occur, provided the current cgroup has no children.
2742 *
2743 * For the root cgroup, parent_mem is NULL, we allow value to be
2744 * set if there are no children.
2745 */
2746 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
2747 (val == 1 || val == 0)) {
2748 if (!memcg_has_children(memcg))
2749 memcg->use_hierarchy = val;
2750 else
2751 retval = -EBUSY;
2752 } else
2753 retval = -EINVAL;
2754
2755 return retval;
2756 }
2757
2758 static unsigned long tree_stat(struct mem_cgroup *memcg,
2759 enum mem_cgroup_stat_index idx)
2760 {
2761 struct mem_cgroup *iter;
2762 unsigned long val = 0;
2763
2764 for_each_mem_cgroup_tree(iter, memcg)
2765 val += mem_cgroup_read_stat(iter, idx);
2766
2767 return val;
2768 }
2769
2770 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
2771 {
2772 unsigned long val;
2773
2774 if (mem_cgroup_is_root(memcg)) {
2775 val = tree_stat(memcg, MEM_CGROUP_STAT_CACHE);
2776 val += tree_stat(memcg, MEM_CGROUP_STAT_RSS);
2777 if (swap)
2778 val += tree_stat(memcg, MEM_CGROUP_STAT_SWAP);
2779 } else {
2780 if (!swap)
2781 val = page_counter_read(&memcg->memory);
2782 else
2783 val = page_counter_read(&memcg->memsw);
2784 }
2785 return val;
2786 }
2787
2788 enum {
2789 RES_USAGE,
2790 RES_LIMIT,
2791 RES_MAX_USAGE,
2792 RES_FAILCNT,
2793 RES_SOFT_LIMIT,
2794 };
2795
2796 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
2797 struct cftype *cft)
2798 {
2799 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2800 struct page_counter *counter;
2801
2802 switch (MEMFILE_TYPE(cft->private)) {
2803 case _MEM:
2804 counter = &memcg->memory;
2805 break;
2806 case _MEMSWAP:
2807 counter = &memcg->memsw;
2808 break;
2809 case _KMEM:
2810 counter = &memcg->kmem;
2811 break;
2812 case _TCP:
2813 counter = &memcg->tcpmem;
2814 break;
2815 default:
2816 BUG();
2817 }
2818
2819 switch (MEMFILE_ATTR(cft->private)) {
2820 case RES_USAGE:
2821 if (counter == &memcg->memory)
2822 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
2823 if (counter == &memcg->memsw)
2824 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
2825 return (u64)page_counter_read(counter) * PAGE_SIZE;
2826 case RES_LIMIT:
2827 return (u64)counter->limit * PAGE_SIZE;
2828 case RES_MAX_USAGE:
2829 return (u64)counter->watermark * PAGE_SIZE;
2830 case RES_FAILCNT:
2831 return counter->failcnt;
2832 case RES_SOFT_LIMIT:
2833 return (u64)memcg->soft_limit * PAGE_SIZE;
2834 default:
2835 BUG();
2836 }
2837 }
2838
2839 #ifndef CONFIG_SLOB
2840 static int memcg_online_kmem(struct mem_cgroup *memcg)
2841 {
2842 int memcg_id;
2843
2844 BUG_ON(memcg->kmemcg_id >= 0);
2845 BUG_ON(memcg->kmem_state);
2846
2847 memcg_id = memcg_alloc_cache_id();
2848 if (memcg_id < 0)
2849 return memcg_id;
2850
2851 static_branch_inc(&memcg_kmem_enabled_key);
2852 /*
2853 * A memory cgroup is considered kmem-online as soon as it gets
2854 * kmemcg_id. Setting the id after enabling static branching will
2855 * guarantee no one starts accounting before all call sites are
2856 * patched.
2857 */
2858 memcg->kmemcg_id = memcg_id;
2859 memcg->kmem_state = KMEM_ONLINE;
2860
2861 return 0;
2862 }
2863
2864 static int memcg_propagate_kmem(struct mem_cgroup *parent,
2865 struct mem_cgroup *memcg)
2866 {
2867 int ret = 0;
2868
2869 mutex_lock(&memcg_limit_mutex);
2870 /*
2871 * If the parent cgroup is not kmem-online now, it cannot be
2872 * onlined after this point, because it has at least one child
2873 * already.
2874 */
2875 if (memcg_kmem_online(parent) ||
2876 (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nokmem))
2877 ret = memcg_online_kmem(memcg);
2878 mutex_unlock(&memcg_limit_mutex);
2879 return ret;
2880 }
2881
2882 static void memcg_offline_kmem(struct mem_cgroup *memcg)
2883 {
2884 struct cgroup_subsys_state *css;
2885 struct mem_cgroup *parent, *child;
2886 int kmemcg_id;
2887
2888 if (memcg->kmem_state != KMEM_ONLINE)
2889 return;
2890 /*
2891 * Clear the online state before clearing memcg_caches array
2892 * entries. The slab_mutex in memcg_deactivate_kmem_caches()
2893 * guarantees that no cache will be created for this cgroup
2894 * after we are done (see memcg_create_kmem_cache()).
2895 */
2896 memcg->kmem_state = KMEM_ALLOCATED;
2897
2898 memcg_deactivate_kmem_caches(memcg);
2899
2900 kmemcg_id = memcg->kmemcg_id;
2901 BUG_ON(kmemcg_id < 0);
2902
2903 parent = parent_mem_cgroup(memcg);
2904 if (!parent)
2905 parent = root_mem_cgroup;
2906
2907 /*
2908 * Change kmemcg_id of this cgroup and all its descendants to the
2909 * parent's id, and then move all entries from this cgroup's list_lrus
2910 * to ones of the parent. After we have finished, all list_lrus
2911 * corresponding to this cgroup are guaranteed to remain empty. The
2912 * ordering is imposed by list_lru_node->lock taken by
2913 * memcg_drain_all_list_lrus().
2914 */
2915 css_for_each_descendant_pre(css, &memcg->css) {
2916 child = mem_cgroup_from_css(css);
2917 BUG_ON(child->kmemcg_id != kmemcg_id);
2918 child->kmemcg_id = parent->kmemcg_id;
2919 if (!memcg->use_hierarchy)
2920 break;
2921 }
2922 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
2923
2924 memcg_free_cache_id(kmemcg_id);
2925 }
2926
2927 static void memcg_free_kmem(struct mem_cgroup *memcg)
2928 {
2929 /* css_alloc() failed, offlining didn't happen */
2930 if (unlikely(memcg->kmem_state == KMEM_ONLINE))
2931 memcg_offline_kmem(memcg);
2932
2933 if (memcg->kmem_state == KMEM_ALLOCATED) {
2934 memcg_destroy_kmem_caches(memcg);
2935 static_branch_dec(&memcg_kmem_enabled_key);
2936 WARN_ON(page_counter_read(&memcg->kmem));
2937 }
2938 }
2939 #else
2940 static int memcg_propagate_kmem(struct mem_cgroup *parent, struct mem_cgroup *memcg)
2941 {
2942 return 0;
2943 }
2944 static int memcg_online_kmem(struct mem_cgroup *memcg)
2945 {
2946 return 0;
2947 }
2948 static void memcg_offline_kmem(struct mem_cgroup *memcg)
2949 {
2950 }
2951 static void memcg_free_kmem(struct mem_cgroup *memcg)
2952 {
2953 }
2954 #endif /* !CONFIG_SLOB */
2955
2956 static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
2957 unsigned long limit)
2958 {
2959 int ret = 0;
2960
2961 mutex_lock(&memcg_limit_mutex);
2962 /* Top-level cgroup doesn't propagate from root */
2963 if (!memcg_kmem_online(memcg)) {
2964 if (cgroup_is_populated(memcg->css.cgroup) ||
2965 (memcg->use_hierarchy && memcg_has_children(memcg)))
2966 ret = -EBUSY;
2967 if (ret)
2968 goto out;
2969 ret = memcg_online_kmem(memcg);
2970 if (ret)
2971 goto out;
2972 }
2973 ret = page_counter_limit(&memcg->kmem, limit);
2974 out:
2975 mutex_unlock(&memcg_limit_mutex);
2976 return ret;
2977 }
2978
2979 static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
2980 {
2981 int ret;
2982
2983 mutex_lock(&memcg_limit_mutex);
2984
2985 ret = page_counter_limit(&memcg->tcpmem, limit);
2986 if (ret)
2987 goto out;
2988
2989 if (!memcg->tcpmem_active) {
2990 /*
2991 * The active flag needs to be written after the static_key
2992 * update. This is what guarantees that the socket activation
2993 * function is the last one to run. See sock_update_memcg() for
2994 * details, and note that we don't mark any socket as belonging
2995 * to this memcg until that flag is up.
2996 *
2997 * We need to do this, because static_keys will span multiple
2998 * sites, but we can't control their order. If we mark a socket
2999 * as accounted, but the accounting functions are not patched in
3000 * yet, we'll lose accounting.
3001 *
3002 * We never race with the readers in sock_update_memcg(),
3003 * because when this value change, the code to process it is not
3004 * patched in yet.
3005 */
3006 static_branch_inc(&memcg_sockets_enabled_key);
3007 memcg->tcpmem_active = true;
3008 }
3009 out:
3010 mutex_unlock(&memcg_limit_mutex);
3011 return ret;
3012 }
3013
3014 /*
3015 * The user of this function is...
3016 * RES_LIMIT.
3017 */
3018 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3019 char *buf, size_t nbytes, loff_t off)
3020 {
3021 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3022 unsigned long nr_pages;
3023 int ret;
3024
3025 buf = strstrip(buf);
3026 ret = page_counter_memparse(buf, "-1", &nr_pages);
3027 if (ret)
3028 return ret;
3029
3030 switch (MEMFILE_ATTR(of_cft(of)->private)) {
3031 case RES_LIMIT:
3032 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3033 ret = -EINVAL;
3034 break;
3035 }
3036 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3037 case _MEM:
3038 ret = mem_cgroup_resize_limit(memcg, nr_pages);
3039 break;
3040 case _MEMSWAP:
3041 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages);
3042 break;
3043 case _KMEM:
3044 ret = memcg_update_kmem_limit(memcg, nr_pages);
3045 break;
3046 case _TCP:
3047 ret = memcg_update_tcp_limit(memcg, nr_pages);
3048 break;
3049 }
3050 break;
3051 case RES_SOFT_LIMIT:
3052 memcg->soft_limit = nr_pages;
3053 ret = 0;
3054 break;
3055 }
3056 return ret ?: nbytes;
3057 }
3058
3059 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3060 size_t nbytes, loff_t off)
3061 {
3062 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3063 struct page_counter *counter;
3064
3065 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3066 case _MEM:
3067 counter = &memcg->memory;
3068 break;
3069 case _MEMSWAP:
3070 counter = &memcg->memsw;
3071 break;
3072 case _KMEM:
3073 counter = &memcg->kmem;
3074 break;
3075 case _TCP:
3076 counter = &memcg->tcpmem;
3077 break;
3078 default:
3079 BUG();
3080 }
3081
3082 switch (MEMFILE_ATTR(of_cft(of)->private)) {
3083 case RES_MAX_USAGE:
3084 page_counter_reset_watermark(counter);
3085 break;
3086 case RES_FAILCNT:
3087 counter->failcnt = 0;
3088 break;
3089 default:
3090 BUG();
3091 }
3092
3093 return nbytes;
3094 }
3095
3096 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3097 struct cftype *cft)
3098 {
3099 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3100 }
3101
3102 #ifdef CONFIG_MMU
3103 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3104 struct cftype *cft, u64 val)
3105 {
3106 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3107
3108 if (val & ~MOVE_MASK)
3109 return -EINVAL;
3110
3111 /*
3112 * No kind of locking is needed in here, because ->can_attach() will
3113 * check this value once in the beginning of the process, and then carry
3114 * on with stale data. This means that changes to this value will only
3115 * affect task migrations starting after the change.
3116 */
3117 memcg->move_charge_at_immigrate = val;
3118 return 0;
3119 }
3120 #else
3121 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3122 struct cftype *cft, u64 val)
3123 {
3124 return -ENOSYS;
3125 }
3126 #endif
3127
3128 #ifdef CONFIG_NUMA
3129 static int memcg_numa_stat_show(struct seq_file *m, void *v)
3130 {
3131 struct numa_stat {
3132 const char *name;
3133 unsigned int lru_mask;
3134 };
3135
3136 static const struct numa_stat stats[] = {
3137 { "total", LRU_ALL },
3138 { "file", LRU_ALL_FILE },
3139 { "anon", LRU_ALL_ANON },
3140 { "unevictable", BIT(LRU_UNEVICTABLE) },
3141 };
3142 const struct numa_stat *stat;
3143 int nid;
3144 unsigned long nr;
3145 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3146
3147 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3148 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3149 seq_printf(m, "%s=%lu", stat->name, nr);
3150 for_each_node_state(nid, N_MEMORY) {
3151 nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3152 stat->lru_mask);
3153 seq_printf(m, " N%d=%lu", nid, nr);
3154 }
3155 seq_putc(m, '\n');
3156 }
3157
3158 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3159 struct mem_cgroup *iter;
3160
3161 nr = 0;
3162 for_each_mem_cgroup_tree(iter, memcg)
3163 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3164 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3165 for_each_node_state(nid, N_MEMORY) {
3166 nr = 0;
3167 for_each_mem_cgroup_tree(iter, memcg)
3168 nr += mem_cgroup_node_nr_lru_pages(
3169 iter, nid, stat->lru_mask);
3170 seq_printf(m, " N%d=%lu", nid, nr);
3171 }
3172 seq_putc(m, '\n');
3173 }
3174
3175 return 0;
3176 }
3177 #endif /* CONFIG_NUMA */
3178
3179 static int memcg_stat_show(struct seq_file *m, void *v)
3180 {
3181 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3182 unsigned long memory, memsw;
3183 struct mem_cgroup *mi;
3184 unsigned int i;
3185
3186 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) !=
3187 MEM_CGROUP_STAT_NSTATS);
3188 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) !=
3189 MEM_CGROUP_EVENTS_NSTATS);
3190 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3191
3192 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3193 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
3194 continue;
3195 seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
3196 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
3197 }
3198
3199 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
3200 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
3201 mem_cgroup_read_events(memcg, i));
3202
3203 for (i = 0; i < NR_LRU_LISTS; i++)
3204 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3205 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
3206
3207 /* Hierarchical information */
3208 memory = memsw = PAGE_COUNTER_MAX;
3209 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3210 memory = min(memory, mi->memory.limit);
3211 memsw = min(memsw, mi->memsw.limit);
3212 }
3213 seq_printf(m, "hierarchical_memory_limit %llu\n",
3214 (u64)memory * PAGE_SIZE);
3215 if (do_memsw_account())
3216 seq_printf(m, "hierarchical_memsw_limit %llu\n",
3217 (u64)memsw * PAGE_SIZE);
3218
3219 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3220 unsigned long long val = 0;
3221
3222 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
3223 continue;
3224 for_each_mem_cgroup_tree(mi, memcg)
3225 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
3226 seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
3227 }
3228
3229 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
3230 unsigned long long val = 0;
3231
3232 for_each_mem_cgroup_tree(mi, memcg)
3233 val += mem_cgroup_read_events(mi, i);
3234 seq_printf(m, "total_%s %llu\n",
3235 mem_cgroup_events_names[i], val);
3236 }
3237
3238 for (i = 0; i < NR_LRU_LISTS; i++) {
3239 unsigned long long val = 0;
3240
3241 for_each_mem_cgroup_tree(mi, memcg)
3242 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
3243 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
3244 }
3245
3246 #ifdef CONFIG_DEBUG_VM
3247 {
3248 int nid, zid;
3249 struct mem_cgroup_per_zone *mz;
3250 struct zone_reclaim_stat *rstat;
3251 unsigned long recent_rotated[2] = {0, 0};
3252 unsigned long recent_scanned[2] = {0, 0};
3253
3254 for_each_online_node(nid)
3255 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
3256 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
3257 rstat = &mz->lruvec.reclaim_stat;
3258
3259 recent_rotated[0] += rstat->recent_rotated[0];
3260 recent_rotated[1] += rstat->recent_rotated[1];
3261 recent_scanned[0] += rstat->recent_scanned[0];
3262 recent_scanned[1] += rstat->recent_scanned[1];
3263 }
3264 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3265 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3266 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3267 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
3268 }
3269 #endif
3270
3271 return 0;
3272 }
3273
3274 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3275 struct cftype *cft)
3276 {
3277 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3278
3279 return mem_cgroup_swappiness(memcg);
3280 }
3281
3282 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3283 struct cftype *cft, u64 val)
3284 {
3285 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3286
3287 if (val > 100)
3288 return -EINVAL;
3289
3290 if (css->parent)
3291 memcg->swappiness = val;
3292 else
3293 vm_swappiness = val;
3294
3295 return 0;
3296 }
3297
3298 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3299 {
3300 struct mem_cgroup_threshold_ary *t;
3301 unsigned long usage;
3302 int i;
3303
3304 rcu_read_lock();
3305 if (!swap)
3306 t = rcu_dereference(memcg->thresholds.primary);
3307 else
3308 t = rcu_dereference(memcg->memsw_thresholds.primary);
3309
3310 if (!t)
3311 goto unlock;
3312
3313 usage = mem_cgroup_usage(memcg, swap);
3314
3315 /*
3316 * current_threshold points to threshold just below or equal to usage.
3317 * If it's not true, a threshold was crossed after last
3318 * call of __mem_cgroup_threshold().
3319 */
3320 i = t->current_threshold;
3321
3322 /*
3323 * Iterate backward over array of thresholds starting from
3324 * current_threshold and check if a threshold is crossed.
3325 * If none of thresholds below usage is crossed, we read
3326 * only one element of the array here.
3327 */
3328 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3329 eventfd_signal(t->entries[i].eventfd, 1);
3330
3331 /* i = current_threshold + 1 */
3332 i++;
3333
3334 /*
3335 * Iterate forward over array of thresholds starting from
3336 * current_threshold+1 and check if a threshold is crossed.
3337 * If none of thresholds above usage is crossed, we read
3338 * only one element of the array here.
3339 */
3340 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3341 eventfd_signal(t->entries[i].eventfd, 1);
3342
3343 /* Update current_threshold */
3344 t->current_threshold = i - 1;
3345 unlock:
3346 rcu_read_unlock();
3347 }
3348
3349 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3350 {
3351 while (memcg) {
3352 __mem_cgroup_threshold(memcg, false);
3353 if (do_memsw_account())
3354 __mem_cgroup_threshold(memcg, true);
3355
3356 memcg = parent_mem_cgroup(memcg);
3357 }
3358 }
3359
3360 static int compare_thresholds(const void *a, const void *b)
3361 {
3362 const struct mem_cgroup_threshold *_a = a;
3363 const struct mem_cgroup_threshold *_b = b;
3364
3365 if (_a->threshold > _b->threshold)
3366 return 1;
3367
3368 if (_a->threshold < _b->threshold)
3369 return -1;
3370
3371 return 0;
3372 }
3373
3374 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
3375 {
3376 struct mem_cgroup_eventfd_list *ev;
3377
3378 spin_lock(&memcg_oom_lock);
3379
3380 list_for_each_entry(ev, &memcg->oom_notify, list)
3381 eventfd_signal(ev->eventfd, 1);
3382
3383 spin_unlock(&memcg_oom_lock);
3384 return 0;
3385 }
3386
3387 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
3388 {
3389 struct mem_cgroup *iter;
3390
3391 for_each_mem_cgroup_tree(iter, memcg)
3392 mem_cgroup_oom_notify_cb(iter);
3393 }
3394
3395 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3396 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
3397 {
3398 struct mem_cgroup_thresholds *thresholds;
3399 struct mem_cgroup_threshold_ary *new;
3400 unsigned long threshold;
3401 unsigned long usage;
3402 int i, size, ret;
3403
3404 ret = page_counter_memparse(args, "-1", &threshold);
3405 if (ret)
3406 return ret;
3407
3408 mutex_lock(&memcg->thresholds_lock);
3409
3410 if (type == _MEM) {
3411 thresholds = &memcg->thresholds;
3412 usage = mem_cgroup_usage(memcg, false);
3413 } else if (type == _MEMSWAP) {
3414 thresholds = &memcg->memsw_thresholds;
3415 usage = mem_cgroup_usage(memcg, true);
3416 } else
3417 BUG();
3418
3419 /* Check if a threshold crossed before adding a new one */
3420 if (thresholds->primary)
3421 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3422
3423 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3424
3425 /* Allocate memory for new array of thresholds */
3426 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3427 GFP_KERNEL);
3428 if (!new) {
3429 ret = -ENOMEM;
3430 goto unlock;
3431 }
3432 new->size = size;
3433
3434 /* Copy thresholds (if any) to new array */
3435 if (thresholds->primary) {
3436 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3437 sizeof(struct mem_cgroup_threshold));
3438 }
3439
3440 /* Add new threshold */
3441 new->entries[size - 1].eventfd = eventfd;
3442 new->entries[size - 1].threshold = threshold;
3443
3444 /* Sort thresholds. Registering of new threshold isn't time-critical */
3445 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3446 compare_thresholds, NULL);
3447
3448 /* Find current threshold */
3449 new->current_threshold = -1;
3450 for (i = 0; i < size; i++) {
3451 if (new->entries[i].threshold <= usage) {
3452 /*
3453 * new->current_threshold will not be used until
3454 * rcu_assign_pointer(), so it's safe to increment
3455 * it here.
3456 */
3457 ++new->current_threshold;
3458 } else
3459 break;
3460 }
3461
3462 /* Free old spare buffer and save old primary buffer as spare */
3463 kfree(thresholds->spare);
3464 thresholds->spare = thresholds->primary;
3465
3466 rcu_assign_pointer(thresholds->primary, new);
3467
3468 /* To be sure that nobody uses thresholds */
3469 synchronize_rcu();
3470
3471 unlock:
3472 mutex_unlock(&memcg->thresholds_lock);
3473
3474 return ret;
3475 }
3476
3477 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3478 struct eventfd_ctx *eventfd, const char *args)
3479 {
3480 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
3481 }
3482
3483 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
3484 struct eventfd_ctx *eventfd, const char *args)
3485 {
3486 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
3487 }
3488
3489 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3490 struct eventfd_ctx *eventfd, enum res_type type)
3491 {
3492 struct mem_cgroup_thresholds *thresholds;
3493 struct mem_cgroup_threshold_ary *new;
3494 unsigned long usage;
3495 int i, j, size;
3496
3497 mutex_lock(&memcg->thresholds_lock);
3498
3499 if (type == _MEM) {
3500 thresholds = &memcg->thresholds;
3501 usage = mem_cgroup_usage(memcg, false);
3502 } else if (type == _MEMSWAP) {
3503 thresholds = &memcg->memsw_thresholds;
3504 usage = mem_cgroup_usage(memcg, true);
3505 } else
3506 BUG();
3507
3508 if (!thresholds->primary)
3509 goto unlock;
3510
3511 /* Check if a threshold crossed before removing */
3512 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3513
3514 /* Calculate new number of threshold */
3515 size = 0;
3516 for (i = 0; i < thresholds->primary->size; i++) {
3517 if (thresholds->primary->entries[i].eventfd != eventfd)
3518 size++;
3519 }
3520
3521 new = thresholds->spare;
3522
3523 /* Set thresholds array to NULL if we don't have thresholds */
3524 if (!size) {
3525 kfree(new);
3526 new = NULL;
3527 goto swap_buffers;
3528 }
3529
3530 new->size = size;
3531
3532 /* Copy thresholds and find current threshold */
3533 new->current_threshold = -1;
3534 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3535 if (thresholds->primary->entries[i].eventfd == eventfd)
3536 continue;
3537
3538 new->entries[j] = thresholds->primary->entries[i];
3539 if (new->entries[j].threshold <= usage) {
3540 /*
3541 * new->current_threshold will not be used
3542 * until rcu_assign_pointer(), so it's safe to increment
3543 * it here.
3544 */
3545 ++new->current_threshold;
3546 }
3547 j++;
3548 }
3549
3550 swap_buffers:
3551 /* Swap primary and spare array */
3552 thresholds->spare = thresholds->primary;
3553
3554 rcu_assign_pointer(thresholds->primary, new);
3555
3556 /* To be sure that nobody uses thresholds */
3557 synchronize_rcu();
3558
3559 /* If all events are unregistered, free the spare array */
3560 if (!new) {
3561 kfree(thresholds->spare);
3562 thresholds->spare = NULL;
3563 }
3564 unlock:
3565 mutex_unlock(&memcg->thresholds_lock);
3566 }
3567
3568 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3569 struct eventfd_ctx *eventfd)
3570 {
3571 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
3572 }
3573
3574 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3575 struct eventfd_ctx *eventfd)
3576 {
3577 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
3578 }
3579
3580 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
3581 struct eventfd_ctx *eventfd, const char *args)
3582 {
3583 struct mem_cgroup_eventfd_list *event;
3584
3585 event = kmalloc(sizeof(*event), GFP_KERNEL);
3586 if (!event)
3587 return -ENOMEM;
3588
3589 spin_lock(&memcg_oom_lock);
3590
3591 event->eventfd = eventfd;
3592 list_add(&event->list, &memcg->oom_notify);
3593
3594 /* already in OOM ? */
3595 if (memcg->under_oom)
3596 eventfd_signal(eventfd, 1);
3597 spin_unlock(&memcg_oom_lock);
3598
3599 return 0;
3600 }
3601
3602 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
3603 struct eventfd_ctx *eventfd)
3604 {
3605 struct mem_cgroup_eventfd_list *ev, *tmp;
3606
3607 spin_lock(&memcg_oom_lock);
3608
3609 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
3610 if (ev->eventfd == eventfd) {
3611 list_del(&ev->list);
3612 kfree(ev);
3613 }
3614 }
3615
3616 spin_unlock(&memcg_oom_lock);
3617 }
3618
3619 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3620 {
3621 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3622
3623 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
3624 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
3625 return 0;
3626 }
3627
3628 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3629 struct cftype *cft, u64 val)
3630 {
3631 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3632
3633 /* cannot set to root cgroup and only 0 and 1 are allowed */
3634 if (!css->parent || !((val == 0) || (val == 1)))
3635 return -EINVAL;
3636
3637 memcg->oom_kill_disable = val;
3638 if (!val)
3639 memcg_oom_recover(memcg);
3640
3641 return 0;
3642 }
3643
3644 #ifdef CONFIG_CGROUP_WRITEBACK
3645
3646 struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
3647 {
3648 return &memcg->cgwb_list;
3649 }
3650
3651 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3652 {
3653 return wb_domain_init(&memcg->cgwb_domain, gfp);
3654 }
3655
3656 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3657 {
3658 wb_domain_exit(&memcg->cgwb_domain);
3659 }
3660
3661 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3662 {
3663 wb_domain_size_changed(&memcg->cgwb_domain);
3664 }
3665
3666 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3667 {
3668 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3669
3670 if (!memcg->css.parent)
3671 return NULL;
3672
3673 return &memcg->cgwb_domain;
3674 }
3675
3676 /**
3677 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3678 * @wb: bdi_writeback in question
3679 * @pfilepages: out parameter for number of file pages
3680 * @pheadroom: out parameter for number of allocatable pages according to memcg
3681 * @pdirty: out parameter for number of dirty pages
3682 * @pwriteback: out parameter for number of pages under writeback
3683 *
3684 * Determine the numbers of file, headroom, dirty, and writeback pages in
3685 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
3686 * is a bit more involved.
3687 *
3688 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
3689 * headroom is calculated as the lowest headroom of itself and the
3690 * ancestors. Note that this doesn't consider the actual amount of
3691 * available memory in the system. The caller should further cap
3692 * *@pheadroom accordingly.
3693 */
3694 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3695 unsigned long *pheadroom, unsigned long *pdirty,
3696 unsigned long *pwriteback)
3697 {
3698 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3699 struct mem_cgroup *parent;
3700
3701 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY);
3702
3703 /* this should eventually include NR_UNSTABLE_NFS */
3704 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
3705 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3706 (1 << LRU_ACTIVE_FILE));
3707 *pheadroom = PAGE_COUNTER_MAX;
3708
3709 while ((parent = parent_mem_cgroup(memcg))) {
3710 unsigned long ceiling = min(memcg->memory.limit, memcg->high);
3711 unsigned long used = page_counter_read(&memcg->memory);
3712
3713 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3714 memcg = parent;
3715 }
3716 }
3717
3718 #else /* CONFIG_CGROUP_WRITEBACK */
3719
3720 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3721 {
3722 return 0;
3723 }
3724
3725 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3726 {
3727 }
3728
3729 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3730 {
3731 }
3732
3733 #endif /* CONFIG_CGROUP_WRITEBACK */
3734
3735 /*
3736 * DO NOT USE IN NEW FILES.
3737 *
3738 * "cgroup.event_control" implementation.
3739 *
3740 * This is way over-engineered. It tries to support fully configurable
3741 * events for each user. Such level of flexibility is completely
3742 * unnecessary especially in the light of the planned unified hierarchy.
3743 *
3744 * Please deprecate this and replace with something simpler if at all
3745 * possible.
3746 */
3747
3748 /*
3749 * Unregister event and free resources.
3750 *
3751 * Gets called from workqueue.
3752 */
3753 static void memcg_event_remove(struct work_struct *work)
3754 {
3755 struct mem_cgroup_event *event =
3756 container_of(work, struct mem_cgroup_event, remove);
3757 struct mem_cgroup *memcg = event->memcg;
3758
3759 remove_wait_queue(event->wqh, &event->wait);
3760
3761 event->unregister_event(memcg, event->eventfd);
3762
3763 /* Notify userspace the event is going away. */
3764 eventfd_signal(event->eventfd, 1);
3765
3766 eventfd_ctx_put(event->eventfd);
3767 kfree(event);
3768 css_put(&memcg->css);
3769 }
3770
3771 /*
3772 * Gets called on POLLHUP on eventfd when user closes it.
3773 *
3774 * Called with wqh->lock held and interrupts disabled.
3775 */
3776 static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
3777 int sync, void *key)
3778 {
3779 struct mem_cgroup_event *event =
3780 container_of(wait, struct mem_cgroup_event, wait);
3781 struct mem_cgroup *memcg = event->memcg;
3782 unsigned long flags = (unsigned long)key;
3783
3784 if (flags & POLLHUP) {
3785 /*
3786 * If the event has been detached at cgroup removal, we
3787 * can simply return knowing the other side will cleanup
3788 * for us.
3789 *
3790 * We can't race against event freeing since the other
3791 * side will require wqh->lock via remove_wait_queue(),
3792 * which we hold.
3793 */
3794 spin_lock(&memcg->event_list_lock);
3795 if (!list_empty(&event->list)) {
3796 list_del_init(&event->list);
3797 /*
3798 * We are in atomic context, but cgroup_event_remove()
3799 * may sleep, so we have to call it in workqueue.
3800 */
3801 schedule_work(&event->remove);
3802 }
3803 spin_unlock(&memcg->event_list_lock);
3804 }
3805
3806 return 0;
3807 }
3808
3809 static void memcg_event_ptable_queue_proc(struct file *file,
3810 wait_queue_head_t *wqh, poll_table *pt)
3811 {
3812 struct mem_cgroup_event *event =
3813 container_of(pt, struct mem_cgroup_event, pt);
3814
3815 event->wqh = wqh;
3816 add_wait_queue(wqh, &event->wait);
3817 }
3818
3819 /*
3820 * DO NOT USE IN NEW FILES.
3821 *
3822 * Parse input and register new cgroup event handler.
3823 *
3824 * Input must be in format '<event_fd> <control_fd> <args>'.
3825 * Interpretation of args is defined by control file implementation.
3826 */
3827 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
3828 char *buf, size_t nbytes, loff_t off)
3829 {
3830 struct cgroup_subsys_state *css = of_css(of);
3831 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3832 struct mem_cgroup_event *event;
3833 struct cgroup_subsys_state *cfile_css;
3834 unsigned int efd, cfd;
3835 struct fd efile;
3836 struct fd cfile;
3837 const char *name;
3838 char *endp;
3839 int ret;
3840
3841 buf = strstrip(buf);
3842
3843 efd = simple_strtoul(buf, &endp, 10);
3844 if (*endp != ' ')
3845 return -EINVAL;
3846 buf = endp + 1;
3847
3848 cfd = simple_strtoul(buf, &endp, 10);
3849 if ((*endp != ' ') && (*endp != '\0'))
3850 return -EINVAL;
3851 buf = endp + 1;
3852
3853 event = kzalloc(sizeof(*event), GFP_KERNEL);
3854 if (!event)
3855 return -ENOMEM;
3856
3857 event->memcg = memcg;
3858 INIT_LIST_HEAD(&event->list);
3859 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
3860 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
3861 INIT_WORK(&event->remove, memcg_event_remove);
3862
3863 efile = fdget(efd);
3864 if (!efile.file) {
3865 ret = -EBADF;
3866 goto out_kfree;
3867 }
3868
3869 event->eventfd = eventfd_ctx_fileget(efile.file);
3870 if (IS_ERR(event->eventfd)) {
3871 ret = PTR_ERR(event->eventfd);
3872 goto out_put_efile;
3873 }
3874
3875 cfile = fdget(cfd);
3876 if (!cfile.file) {
3877 ret = -EBADF;
3878 goto out_put_eventfd;
3879 }
3880
3881 /* the process need read permission on control file */
3882 /* AV: shouldn't we check that it's been opened for read instead? */
3883 ret = inode_permission(file_inode(cfile.file), MAY_READ);
3884 if (ret < 0)
3885 goto out_put_cfile;
3886
3887 /*
3888 * Determine the event callbacks and set them in @event. This used
3889 * to be done via struct cftype but cgroup core no longer knows
3890 * about these events. The following is crude but the whole thing
3891 * is for compatibility anyway.
3892 *
3893 * DO NOT ADD NEW FILES.
3894 */
3895 name = cfile.file->f_path.dentry->d_name.name;
3896
3897 if (!strcmp(name, "memory.usage_in_bytes")) {
3898 event->register_event = mem_cgroup_usage_register_event;
3899 event->unregister_event = mem_cgroup_usage_unregister_event;
3900 } else if (!strcmp(name, "memory.oom_control")) {
3901 event->register_event = mem_cgroup_oom_register_event;
3902 event->unregister_event = mem_cgroup_oom_unregister_event;
3903 } else if (!strcmp(name, "memory.pressure_level")) {
3904 event->register_event = vmpressure_register_event;
3905 event->unregister_event = vmpressure_unregister_event;
3906 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
3907 event->register_event = memsw_cgroup_usage_register_event;
3908 event->unregister_event = memsw_cgroup_usage_unregister_event;
3909 } else {
3910 ret = -EINVAL;
3911 goto out_put_cfile;
3912 }
3913
3914 /*
3915 * Verify @cfile should belong to @css. Also, remaining events are
3916 * automatically removed on cgroup destruction but the removal is
3917 * asynchronous, so take an extra ref on @css.
3918 */
3919 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
3920 &memory_cgrp_subsys);
3921 ret = -EINVAL;
3922 if (IS_ERR(cfile_css))
3923 goto out_put_cfile;
3924 if (cfile_css != css) {
3925 css_put(cfile_css);
3926 goto out_put_cfile;
3927 }
3928
3929 ret = event->register_event(memcg, event->eventfd, buf);
3930 if (ret)
3931 goto out_put_css;
3932
3933 efile.file->f_op->poll(efile.file, &event->pt);
3934
3935 spin_lock(&memcg->event_list_lock);
3936 list_add(&event->list, &memcg->event_list);
3937 spin_unlock(&memcg->event_list_lock);
3938
3939 fdput(cfile);
3940 fdput(efile);
3941
3942 return nbytes;
3943
3944 out_put_css:
3945 css_put(css);
3946 out_put_cfile:
3947 fdput(cfile);
3948 out_put_eventfd:
3949 eventfd_ctx_put(event->eventfd);
3950 out_put_efile:
3951 fdput(efile);
3952 out_kfree:
3953 kfree(event);
3954
3955 return ret;
3956 }
3957
3958 static struct cftype mem_cgroup_legacy_files[] = {
3959 {
3960 .name = "usage_in_bytes",
3961 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
3962 .read_u64 = mem_cgroup_read_u64,
3963 },
3964 {
3965 .name = "max_usage_in_bytes",
3966 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
3967 .write = mem_cgroup_reset,
3968 .read_u64 = mem_cgroup_read_u64,
3969 },
3970 {
3971 .name = "limit_in_bytes",
3972 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
3973 .write = mem_cgroup_write,
3974 .read_u64 = mem_cgroup_read_u64,
3975 },
3976 {
3977 .name = "soft_limit_in_bytes",
3978 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
3979 .write = mem_cgroup_write,
3980 .read_u64 = mem_cgroup_read_u64,
3981 },
3982 {
3983 .name = "failcnt",
3984 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
3985 .write = mem_cgroup_reset,
3986 .read_u64 = mem_cgroup_read_u64,
3987 },
3988 {
3989 .name = "stat",
3990 .seq_show = memcg_stat_show,
3991 },
3992 {
3993 .name = "force_empty",
3994 .write = mem_cgroup_force_empty_write,
3995 },
3996 {
3997 .name = "use_hierarchy",
3998 .write_u64 = mem_cgroup_hierarchy_write,
3999 .read_u64 = mem_cgroup_hierarchy_read,
4000 },
4001 {
4002 .name = "cgroup.event_control", /* XXX: for compat */
4003 .write = memcg_write_event_control,
4004 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
4005 },
4006 {
4007 .name = "swappiness",
4008 .read_u64 = mem_cgroup_swappiness_read,
4009 .write_u64 = mem_cgroup_swappiness_write,
4010 },
4011 {
4012 .name = "move_charge_at_immigrate",
4013 .read_u64 = mem_cgroup_move_charge_read,
4014 .write_u64 = mem_cgroup_move_charge_write,
4015 },
4016 {
4017 .name = "oom_control",
4018 .seq_show = mem_cgroup_oom_control_read,
4019 .write_u64 = mem_cgroup_oom_control_write,
4020 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4021 },
4022 {
4023 .name = "pressure_level",
4024 },
4025 #ifdef CONFIG_NUMA
4026 {
4027 .name = "numa_stat",
4028 .seq_show = memcg_numa_stat_show,
4029 },
4030 #endif
4031 {
4032 .name = "kmem.limit_in_bytes",
4033 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
4034 .write = mem_cgroup_write,
4035 .read_u64 = mem_cgroup_read_u64,
4036 },
4037 {
4038 .name = "kmem.usage_in_bytes",
4039 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
4040 .read_u64 = mem_cgroup_read_u64,
4041 },
4042 {
4043 .name = "kmem.failcnt",
4044 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
4045 .write = mem_cgroup_reset,
4046 .read_u64 = mem_cgroup_read_u64,
4047 },
4048 {
4049 .name = "kmem.max_usage_in_bytes",
4050 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
4051 .write = mem_cgroup_reset,
4052 .read_u64 = mem_cgroup_read_u64,
4053 },
4054 #ifdef CONFIG_SLABINFO
4055 {
4056 .name = "kmem.slabinfo",
4057 .seq_start = slab_start,
4058 .seq_next = slab_next,
4059 .seq_stop = slab_stop,
4060 .seq_show = memcg_slab_show,
4061 },
4062 #endif
4063 {
4064 .name = "kmem.tcp.limit_in_bytes",
4065 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
4066 .write = mem_cgroup_write,
4067 .read_u64 = mem_cgroup_read_u64,
4068 },
4069 {
4070 .name = "kmem.tcp.usage_in_bytes",
4071 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
4072 .read_u64 = mem_cgroup_read_u64,
4073 },
4074 {
4075 .name = "kmem.tcp.failcnt",
4076 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
4077 .write = mem_cgroup_reset,
4078 .read_u64 = mem_cgroup_read_u64,
4079 },
4080 {
4081 .name = "kmem.tcp.max_usage_in_bytes",
4082 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4083 .write = mem_cgroup_reset,
4084 .read_u64 = mem_cgroup_read_u64,
4085 },
4086 { }, /* terminate */
4087 };
4088
4089 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4090 {
4091 struct mem_cgroup_per_node *pn;
4092 struct mem_cgroup_per_zone *mz;
4093 int zone, tmp = node;
4094 /*
4095 * This routine is called against possible nodes.
4096 * But it's BUG to call kmalloc() against offline node.
4097 *
4098 * TODO: this routine can waste much memory for nodes which will
4099 * never be onlined. It's better to use memory hotplug callback
4100 * function.
4101 */
4102 if (!node_state(node, N_NORMAL_MEMORY))
4103 tmp = -1;
4104 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4105 if (!pn)
4106 return 1;
4107
4108 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4109 mz = &pn->zoneinfo[zone];
4110 lruvec_init(&mz->lruvec);
4111 mz->usage_in_excess = 0;
4112 mz->on_tree = false;
4113 mz->memcg = memcg;
4114 }
4115 memcg->nodeinfo[node] = pn;
4116 return 0;
4117 }
4118
4119 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4120 {
4121 kfree(memcg->nodeinfo[node]);
4122 }
4123
4124 static void mem_cgroup_free(struct mem_cgroup *memcg)
4125 {
4126 int node;
4127
4128 memcg_wb_domain_exit(memcg);
4129 for_each_node(node)
4130 free_mem_cgroup_per_zone_info(memcg, node);
4131 free_percpu(memcg->stat);
4132 kfree(memcg);
4133 }
4134
4135 static struct mem_cgroup *mem_cgroup_alloc(void)
4136 {
4137 struct mem_cgroup *memcg;
4138 size_t size;
4139 int node;
4140
4141 size = sizeof(struct mem_cgroup);
4142 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
4143
4144 memcg = kzalloc(size, GFP_KERNEL);
4145 if (!memcg)
4146 return NULL;
4147
4148 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4149 if (!memcg->stat)
4150 goto fail;
4151
4152 for_each_node(node)
4153 if (alloc_mem_cgroup_per_zone_info(memcg, node))
4154 goto fail;
4155
4156 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4157 goto fail;
4158
4159 INIT_WORK(&memcg->high_work, high_work_func);
4160 memcg->last_scanned_node = MAX_NUMNODES;
4161 INIT_LIST_HEAD(&memcg->oom_notify);
4162 mutex_init(&memcg->thresholds_lock);
4163 spin_lock_init(&memcg->move_lock);
4164 vmpressure_init(&memcg->vmpressure);
4165 INIT_LIST_HEAD(&memcg->event_list);
4166 spin_lock_init(&memcg->event_list_lock);
4167 memcg->socket_pressure = jiffies;
4168 #ifndef CONFIG_SLOB
4169 memcg->kmemcg_id = -1;
4170 #endif
4171 #ifdef CONFIG_CGROUP_WRITEBACK
4172 INIT_LIST_HEAD(&memcg->cgwb_list);
4173 #endif
4174 return memcg;
4175 fail:
4176 mem_cgroup_free(memcg);
4177 return NULL;
4178 }
4179
4180 static struct cgroup_subsys_state * __ref
4181 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4182 {
4183 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
4184 struct mem_cgroup *memcg;
4185 long error = -ENOMEM;
4186
4187 memcg = mem_cgroup_alloc();
4188 if (!memcg)
4189 return ERR_PTR(error);
4190
4191 memcg->high = PAGE_COUNTER_MAX;
4192 memcg->soft_limit = PAGE_COUNTER_MAX;
4193 if (parent) {
4194 memcg->swappiness = mem_cgroup_swappiness(parent);
4195 memcg->oom_kill_disable = parent->oom_kill_disable;
4196 }
4197 if (parent && parent->use_hierarchy) {
4198 memcg->use_hierarchy = true;
4199 page_counter_init(&memcg->memory, &parent->memory);
4200 page_counter_init(&memcg->swap, &parent->swap);
4201 page_counter_init(&memcg->memsw, &parent->memsw);
4202 page_counter_init(&memcg->kmem, &parent->kmem);
4203 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
4204 } else {
4205 page_counter_init(&memcg->memory, NULL);
4206 page_counter_init(&memcg->swap, NULL);
4207 page_counter_init(&memcg->memsw, NULL);
4208 page_counter_init(&memcg->kmem, NULL);
4209 page_counter_init(&memcg->tcpmem, NULL);
4210 /*
4211 * Deeper hierachy with use_hierarchy == false doesn't make
4212 * much sense so let cgroup subsystem know about this
4213 * unfortunate state in our controller.
4214 */
4215 if (parent != root_mem_cgroup)
4216 memory_cgrp_subsys.broken_hierarchy = true;
4217 }
4218
4219 /* The following stuff does not apply to the root */
4220 if (!parent) {
4221 root_mem_cgroup = memcg;
4222 return &memcg->css;
4223 }
4224
4225 error = memcg_propagate_kmem(parent, memcg);
4226 if (error)
4227 goto fail;
4228
4229 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4230 static_branch_inc(&memcg_sockets_enabled_key);
4231
4232 return &memcg->css;
4233 fail:
4234 mem_cgroup_free(memcg);
4235 return NULL;
4236 }
4237
4238 static int
4239 mem_cgroup_css_online(struct cgroup_subsys_state *css)
4240 {
4241 if (css->id > MEM_CGROUP_ID_MAX)
4242 return -ENOSPC;
4243
4244 return 0;
4245 }
4246
4247 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
4248 {
4249 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4250 struct mem_cgroup_event *event, *tmp;
4251
4252 /*
4253 * Unregister events and notify userspace.
4254 * Notify userspace about cgroup removing only after rmdir of cgroup
4255 * directory to avoid race between userspace and kernelspace.
4256 */
4257 spin_lock(&memcg->event_list_lock);
4258 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
4259 list_del_init(&event->list);
4260 schedule_work(&event->remove);
4261 }
4262 spin_unlock(&memcg->event_list_lock);
4263
4264 memcg_offline_kmem(memcg);
4265 wb_memcg_offline(memcg);
4266 }
4267
4268 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
4269 {
4270 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4271
4272 invalidate_reclaim_iterators(memcg);
4273 }
4274
4275 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
4276 {
4277 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4278
4279 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4280 static_branch_dec(&memcg_sockets_enabled_key);
4281
4282 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
4283 static_branch_dec(&memcg_sockets_enabled_key);
4284
4285 vmpressure_cleanup(&memcg->vmpressure);
4286 cancel_work_sync(&memcg->high_work);
4287 mem_cgroup_remove_from_trees(memcg);
4288 memcg_free_kmem(memcg);
4289 mem_cgroup_free(memcg);
4290 }
4291
4292 /**
4293 * mem_cgroup_css_reset - reset the states of a mem_cgroup
4294 * @css: the target css
4295 *
4296 * Reset the states of the mem_cgroup associated with @css. This is
4297 * invoked when the userland requests disabling on the default hierarchy
4298 * but the memcg is pinned through dependency. The memcg should stop
4299 * applying policies and should revert to the vanilla state as it may be
4300 * made visible again.
4301 *
4302 * The current implementation only resets the essential configurations.
4303 * This needs to be expanded to cover all the visible parts.
4304 */
4305 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4306 {
4307 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4308
4309 mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX);
4310 mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX);
4311 memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX);
4312 memcg->low = 0;
4313 memcg->high = PAGE_COUNTER_MAX;
4314 memcg->soft_limit = PAGE_COUNTER_MAX;
4315 memcg_wb_domain_size_changed(memcg);
4316 }
4317
4318 #ifdef CONFIG_MMU
4319 /* Handlers for move charge at task migration. */
4320 static int mem_cgroup_do_precharge(unsigned long count)
4321 {
4322 int ret;
4323
4324 /* Try a single bulk charge without reclaim first, kswapd may wake */
4325 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
4326 if (!ret) {
4327 mc.precharge += count;
4328 return ret;
4329 }
4330
4331 /* Try charges one by one with reclaim */
4332 while (count--) {
4333 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
4334 if (ret)
4335 return ret;
4336 mc.precharge++;
4337 cond_resched();
4338 }
4339 return 0;
4340 }
4341
4342 /**
4343 * get_mctgt_type - get target type of moving charge
4344 * @vma: the vma the pte to be checked belongs
4345 * @addr: the address corresponding to the pte to be checked
4346 * @ptent: the pte to be checked
4347 * @target: the pointer the target page or swap ent will be stored(can be NULL)
4348 *
4349 * Returns
4350 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
4351 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4352 * move charge. if @target is not NULL, the page is stored in target->page
4353 * with extra refcnt got(Callers should handle it).
4354 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4355 * target for charge migration. if @target is not NULL, the entry is stored
4356 * in target->ent.
4357 *
4358 * Called with pte lock held.
4359 */
4360 union mc_target {
4361 struct page *page;
4362 swp_entry_t ent;
4363 };
4364
4365 enum mc_target_type {
4366 MC_TARGET_NONE = 0,
4367 MC_TARGET_PAGE,
4368 MC_TARGET_SWAP,
4369 };
4370
4371 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4372 unsigned long addr, pte_t ptent)
4373 {
4374 struct page *page = vm_normal_page(vma, addr, ptent);
4375
4376 if (!page || !page_mapped(page))
4377 return NULL;
4378 if (PageAnon(page)) {
4379 if (!(mc.flags & MOVE_ANON))
4380 return NULL;
4381 } else {
4382 if (!(mc.flags & MOVE_FILE))
4383 return NULL;
4384 }
4385 if (!get_page_unless_zero(page))
4386 return NULL;
4387
4388 return page;
4389 }
4390
4391 #ifdef CONFIG_SWAP
4392 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4393 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4394 {
4395 struct page *page = NULL;
4396 swp_entry_t ent = pte_to_swp_entry(ptent);
4397
4398 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
4399 return NULL;
4400 /*
4401 * Because lookup_swap_cache() updates some statistics counter,
4402 * we call find_get_page() with swapper_space directly.
4403 */
4404 page = find_get_page(swap_address_space(ent), ent.val);
4405 if (do_memsw_account())
4406 entry->val = ent.val;
4407
4408 return page;
4409 }
4410 #else
4411 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4412 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4413 {
4414 return NULL;
4415 }
4416 #endif
4417
4418 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4419 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4420 {
4421 struct page *page = NULL;
4422 struct address_space *mapping;
4423 pgoff_t pgoff;
4424
4425 if (!vma->vm_file) /* anonymous vma */
4426 return NULL;
4427 if (!(mc.flags & MOVE_FILE))
4428 return NULL;
4429
4430 mapping = vma->vm_file->f_mapping;
4431 pgoff = linear_page_index(vma, addr);
4432
4433 /* page is moved even if it's not RSS of this task(page-faulted). */
4434 #ifdef CONFIG_SWAP
4435 /* shmem/tmpfs may report page out on swap: account for that too. */
4436 if (shmem_mapping(mapping)) {
4437 page = find_get_entry(mapping, pgoff);
4438 if (radix_tree_exceptional_entry(page)) {
4439 swp_entry_t swp = radix_to_swp_entry(page);
4440 if (do_memsw_account())
4441 *entry = swp;
4442 page = find_get_page(swap_address_space(swp), swp.val);
4443 }
4444 } else
4445 page = find_get_page(mapping, pgoff);
4446 #else
4447 page = find_get_page(mapping, pgoff);
4448 #endif
4449 return page;
4450 }
4451
4452 /**
4453 * mem_cgroup_move_account - move account of the page
4454 * @page: the page
4455 * @nr_pages: number of regular pages (>1 for huge pages)
4456 * @from: mem_cgroup which the page is moved from.
4457 * @to: mem_cgroup which the page is moved to. @from != @to.
4458 *
4459 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
4460 *
4461 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
4462 * from old cgroup.
4463 */
4464 static int mem_cgroup_move_account(struct page *page,
4465 bool compound,
4466 struct mem_cgroup *from,
4467 struct mem_cgroup *to)
4468 {
4469 unsigned long flags;
4470 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
4471 int ret;
4472 bool anon;
4473
4474 VM_BUG_ON(from == to);
4475 VM_BUG_ON_PAGE(PageLRU(page), page);
4476 VM_BUG_ON(compound && !PageTransHuge(page));
4477
4478 /*
4479 * Prevent mem_cgroup_replace_page() from looking at
4480 * page->mem_cgroup of its source page while we change it.
4481 */
4482 ret = -EBUSY;
4483 if (!trylock_page(page))
4484 goto out;
4485
4486 ret = -EINVAL;
4487 if (page->mem_cgroup != from)
4488 goto out_unlock;
4489
4490 anon = PageAnon(page);
4491
4492 spin_lock_irqsave(&from->move_lock, flags);
4493
4494 if (!anon && page_mapped(page)) {
4495 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4496 nr_pages);
4497 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4498 nr_pages);
4499 }
4500
4501 /*
4502 * move_lock grabbed above and caller set from->moving_account, so
4503 * mem_cgroup_update_page_stat() will serialize updates to PageDirty.
4504 * So mapping should be stable for dirty pages.
4505 */
4506 if (!anon && PageDirty(page)) {
4507 struct address_space *mapping = page_mapping(page);
4508
4509 if (mapping_cap_account_dirty(mapping)) {
4510 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY],
4511 nr_pages);
4512 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY],
4513 nr_pages);
4514 }
4515 }
4516
4517 if (PageWriteback(page)) {
4518 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4519 nr_pages);
4520 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4521 nr_pages);
4522 }
4523
4524 /*
4525 * It is safe to change page->mem_cgroup here because the page
4526 * is referenced, charged, and isolated - we can't race with
4527 * uncharging, charging, migration, or LRU putback.
4528 */
4529
4530 /* caller should have done css_get */
4531 page->mem_cgroup = to;
4532 spin_unlock_irqrestore(&from->move_lock, flags);
4533
4534 ret = 0;
4535
4536 local_irq_disable();
4537 mem_cgroup_charge_statistics(to, page, compound, nr_pages);
4538 memcg_check_events(to, page);
4539 mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
4540 memcg_check_events(from, page);
4541 local_irq_enable();
4542 out_unlock:
4543 unlock_page(page);
4544 out:
4545 return ret;
4546 }
4547
4548 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
4549 unsigned long addr, pte_t ptent, union mc_target *target)
4550 {
4551 struct page *page = NULL;
4552 enum mc_target_type ret = MC_TARGET_NONE;
4553 swp_entry_t ent = { .val = 0 };
4554
4555 if (pte_present(ptent))
4556 page = mc_handle_present_pte(vma, addr, ptent);
4557 else if (is_swap_pte(ptent))
4558 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
4559 else if (pte_none(ptent))
4560 page = mc_handle_file_pte(vma, addr, ptent, &ent);
4561
4562 if (!page && !ent.val)
4563 return ret;
4564 if (page) {
4565 /*
4566 * Do only loose check w/o serialization.
4567 * mem_cgroup_move_account() checks the page is valid or
4568 * not under LRU exclusion.
4569 */
4570 if (page->mem_cgroup == mc.from) {
4571 ret = MC_TARGET_PAGE;
4572 if (target)
4573 target->page = page;
4574 }
4575 if (!ret || !target)
4576 put_page(page);
4577 }
4578 /* There is a swap entry and a page doesn't exist or isn't charged */
4579 if (ent.val && !ret &&
4580 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
4581 ret = MC_TARGET_SWAP;
4582 if (target)
4583 target->ent = ent;
4584 }
4585 return ret;
4586 }
4587
4588 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4589 /*
4590 * We don't consider swapping or file mapped pages because THP does not
4591 * support them for now.
4592 * Caller should make sure that pmd_trans_huge(pmd) is true.
4593 */
4594 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4595 unsigned long addr, pmd_t pmd, union mc_target *target)
4596 {
4597 struct page *page = NULL;
4598 enum mc_target_type ret = MC_TARGET_NONE;
4599
4600 page = pmd_page(pmd);
4601 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
4602 if (!(mc.flags & MOVE_ANON))
4603 return ret;
4604 if (page->mem_cgroup == mc.from) {
4605 ret = MC_TARGET_PAGE;
4606 if (target) {
4607 get_page(page);
4608 target->page = page;
4609 }
4610 }
4611 return ret;
4612 }
4613 #else
4614 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4615 unsigned long addr, pmd_t pmd, union mc_target *target)
4616 {
4617 return MC_TARGET_NONE;
4618 }
4619 #endif
4620
4621 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4622 unsigned long addr, unsigned long end,
4623 struct mm_walk *walk)
4624 {
4625 struct vm_area_struct *vma = walk->vma;
4626 pte_t *pte;
4627 spinlock_t *ptl;
4628
4629 if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
4630 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
4631 mc.precharge += HPAGE_PMD_NR;
4632 spin_unlock(ptl);
4633 return 0;
4634 }
4635
4636 if (pmd_trans_unstable(pmd))
4637 return 0;
4638 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4639 for (; addr != end; pte++, addr += PAGE_SIZE)
4640 if (get_mctgt_type(vma, addr, *pte, NULL))
4641 mc.precharge++; /* increment precharge temporarily */
4642 pte_unmap_unlock(pte - 1, ptl);
4643 cond_resched();
4644
4645 return 0;
4646 }
4647
4648 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4649 {
4650 unsigned long precharge;
4651
4652 struct mm_walk mem_cgroup_count_precharge_walk = {
4653 .pmd_entry = mem_cgroup_count_precharge_pte_range,
4654 .mm = mm,
4655 };
4656 down_read(&mm->mmap_sem);
4657 walk_page_range(0, ~0UL, &mem_cgroup_count_precharge_walk);
4658 up_read(&mm->mmap_sem);
4659
4660 precharge = mc.precharge;
4661 mc.precharge = 0;
4662
4663 return precharge;
4664 }
4665
4666 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4667 {
4668 unsigned long precharge = mem_cgroup_count_precharge(mm);
4669
4670 VM_BUG_ON(mc.moving_task);
4671 mc.moving_task = current;
4672 return mem_cgroup_do_precharge(precharge);
4673 }
4674
4675 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
4676 static void __mem_cgroup_clear_mc(void)
4677 {
4678 struct mem_cgroup *from = mc.from;
4679 struct mem_cgroup *to = mc.to;
4680
4681 /* we must uncharge all the leftover precharges from mc.to */
4682 if (mc.precharge) {
4683 cancel_charge(mc.to, mc.precharge);
4684 mc.precharge = 0;
4685 }
4686 /*
4687 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4688 * we must uncharge here.
4689 */
4690 if (mc.moved_charge) {
4691 cancel_charge(mc.from, mc.moved_charge);
4692 mc.moved_charge = 0;
4693 }
4694 /* we must fixup refcnts and charges */
4695 if (mc.moved_swap) {
4696 /* uncharge swap account from the old cgroup */
4697 if (!mem_cgroup_is_root(mc.from))
4698 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
4699
4700 /*
4701 * we charged both to->memory and to->memsw, so we
4702 * should uncharge to->memory.
4703 */
4704 if (!mem_cgroup_is_root(mc.to))
4705 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
4706
4707 css_put_many(&mc.from->css, mc.moved_swap);
4708
4709 /* we've already done css_get(mc.to) */
4710 mc.moved_swap = 0;
4711 }
4712 memcg_oom_recover(from);
4713 memcg_oom_recover(to);
4714 wake_up_all(&mc.waitq);
4715 }
4716
4717 static void mem_cgroup_clear_mc(void)
4718 {
4719 /*
4720 * we must clear moving_task before waking up waiters at the end of
4721 * task migration.
4722 */
4723 mc.moving_task = NULL;
4724 __mem_cgroup_clear_mc();
4725 spin_lock(&mc.lock);
4726 mc.from = NULL;
4727 mc.to = NULL;
4728 spin_unlock(&mc.lock);
4729 }
4730
4731 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4732 {
4733 struct cgroup_subsys_state *css;
4734 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
4735 struct mem_cgroup *from;
4736 struct task_struct *leader, *p;
4737 struct mm_struct *mm;
4738 unsigned long move_flags;
4739 int ret = 0;
4740
4741 /* charge immigration isn't supported on the default hierarchy */
4742 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4743 return 0;
4744
4745 /*
4746 * Multi-process migrations only happen on the default hierarchy
4747 * where charge immigration is not used. Perform charge
4748 * immigration if @tset contains a leader and whine if there are
4749 * multiple.
4750 */
4751 p = NULL;
4752 cgroup_taskset_for_each_leader(leader, css, tset) {
4753 WARN_ON_ONCE(p);
4754 p = leader;
4755 memcg = mem_cgroup_from_css(css);
4756 }
4757 if (!p)
4758 return 0;
4759
4760 /*
4761 * We are now commited to this value whatever it is. Changes in this
4762 * tunable will only affect upcoming migrations, not the current one.
4763 * So we need to save it, and keep it going.
4764 */
4765 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
4766 if (!move_flags)
4767 return 0;
4768
4769 from = mem_cgroup_from_task(p);
4770
4771 VM_BUG_ON(from == memcg);
4772
4773 mm = get_task_mm(p);
4774 if (!mm)
4775 return 0;
4776 /* We move charges only when we move a owner of the mm */
4777 if (mm->owner == p) {
4778 VM_BUG_ON(mc.from);
4779 VM_BUG_ON(mc.to);
4780 VM_BUG_ON(mc.precharge);
4781 VM_BUG_ON(mc.moved_charge);
4782 VM_BUG_ON(mc.moved_swap);
4783
4784 spin_lock(&mc.lock);
4785 mc.from = from;
4786 mc.to = memcg;
4787 mc.flags = move_flags;
4788 spin_unlock(&mc.lock);
4789 /* We set mc.moving_task later */
4790
4791 ret = mem_cgroup_precharge_mc(mm);
4792 if (ret)
4793 mem_cgroup_clear_mc();
4794 }
4795 mmput(mm);
4796 return ret;
4797 }
4798
4799 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4800 {
4801 if (mc.to)
4802 mem_cgroup_clear_mc();
4803 }
4804
4805 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4806 unsigned long addr, unsigned long end,
4807 struct mm_walk *walk)
4808 {
4809 int ret = 0;
4810 struct vm_area_struct *vma = walk->vma;
4811 pte_t *pte;
4812 spinlock_t *ptl;
4813 enum mc_target_type target_type;
4814 union mc_target target;
4815 struct page *page;
4816
4817 if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
4818 if (mc.precharge < HPAGE_PMD_NR) {
4819 spin_unlock(ptl);
4820 return 0;
4821 }
4822 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
4823 if (target_type == MC_TARGET_PAGE) {
4824 page = target.page;
4825 if (!isolate_lru_page(page)) {
4826 if (!mem_cgroup_move_account(page, true,
4827 mc.from, mc.to)) {
4828 mc.precharge -= HPAGE_PMD_NR;
4829 mc.moved_charge += HPAGE_PMD_NR;
4830 }
4831 putback_lru_page(page);
4832 }
4833 put_page(page);
4834 }
4835 spin_unlock(ptl);
4836 return 0;
4837 }
4838
4839 if (pmd_trans_unstable(pmd))
4840 return 0;
4841 retry:
4842 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4843 for (; addr != end; addr += PAGE_SIZE) {
4844 pte_t ptent = *(pte++);
4845 swp_entry_t ent;
4846
4847 if (!mc.precharge)
4848 break;
4849
4850 switch (get_mctgt_type(vma, addr, ptent, &target)) {
4851 case MC_TARGET_PAGE:
4852 page = target.page;
4853 /*
4854 * We can have a part of the split pmd here. Moving it
4855 * can be done but it would be too convoluted so simply
4856 * ignore such a partial THP and keep it in original
4857 * memcg. There should be somebody mapping the head.
4858 */
4859 if (PageTransCompound(page))
4860 goto put;
4861 if (isolate_lru_page(page))
4862 goto put;
4863 if (!mem_cgroup_move_account(page, false,
4864 mc.from, mc.to)) {
4865 mc.precharge--;
4866 /* we uncharge from mc.from later. */
4867 mc.moved_charge++;
4868 }
4869 putback_lru_page(page);
4870 put: /* get_mctgt_type() gets the page */
4871 put_page(page);
4872 break;
4873 case MC_TARGET_SWAP:
4874 ent = target.ent;
4875 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
4876 mc.precharge--;
4877 /* we fixup refcnts and charges later. */
4878 mc.moved_swap++;
4879 }
4880 break;
4881 default:
4882 break;
4883 }
4884 }
4885 pte_unmap_unlock(pte - 1, ptl);
4886 cond_resched();
4887
4888 if (addr != end) {
4889 /*
4890 * We have consumed all precharges we got in can_attach().
4891 * We try charge one by one, but don't do any additional
4892 * charges to mc.to if we have failed in charge once in attach()
4893 * phase.
4894 */
4895 ret = mem_cgroup_do_precharge(1);
4896 if (!ret)
4897 goto retry;
4898 }
4899
4900 return ret;
4901 }
4902
4903 static void mem_cgroup_move_charge(struct mm_struct *mm)
4904 {
4905 struct mm_walk mem_cgroup_move_charge_walk = {
4906 .pmd_entry = mem_cgroup_move_charge_pte_range,
4907 .mm = mm,
4908 };
4909
4910 lru_add_drain_all();
4911 /*
4912 * Signal mem_cgroup_begin_page_stat() to take the memcg's
4913 * move_lock while we're moving its pages to another memcg.
4914 * Then wait for already started RCU-only updates to finish.
4915 */
4916 atomic_inc(&mc.from->moving_account);
4917 synchronize_rcu();
4918 retry:
4919 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
4920 /*
4921 * Someone who are holding the mmap_sem might be waiting in
4922 * waitq. So we cancel all extra charges, wake up all waiters,
4923 * and retry. Because we cancel precharges, we might not be able
4924 * to move enough charges, but moving charge is a best-effort
4925 * feature anyway, so it wouldn't be a big problem.
4926 */
4927 __mem_cgroup_clear_mc();
4928 cond_resched();
4929 goto retry;
4930 }
4931 /*
4932 * When we have consumed all precharges and failed in doing
4933 * additional charge, the page walk just aborts.
4934 */
4935 walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
4936 up_read(&mm->mmap_sem);
4937 atomic_dec(&mc.from->moving_account);
4938 }
4939
4940 static void mem_cgroup_move_task(struct cgroup_taskset *tset)
4941 {
4942 struct cgroup_subsys_state *css;
4943 struct task_struct *p = cgroup_taskset_first(tset, &css);
4944 struct mm_struct *mm = get_task_mm(p);
4945
4946 if (mm) {
4947 if (mc.to)
4948 mem_cgroup_move_charge(mm);
4949 mmput(mm);
4950 }
4951 if (mc.to)
4952 mem_cgroup_clear_mc();
4953 }
4954 #else /* !CONFIG_MMU */
4955 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4956 {
4957 return 0;
4958 }
4959 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4960 {
4961 }
4962 static void mem_cgroup_move_task(struct cgroup_taskset *tset)
4963 {
4964 }
4965 #endif
4966
4967 /*
4968 * Cgroup retains root cgroups across [un]mount cycles making it necessary
4969 * to verify whether we're attached to the default hierarchy on each mount
4970 * attempt.
4971 */
4972 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
4973 {
4974 /*
4975 * use_hierarchy is forced on the default hierarchy. cgroup core
4976 * guarantees that @root doesn't have any children, so turning it
4977 * on for the root memcg is enough.
4978 */
4979 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4980 root_mem_cgroup->use_hierarchy = true;
4981 else
4982 root_mem_cgroup->use_hierarchy = false;
4983 }
4984
4985 static u64 memory_current_read(struct cgroup_subsys_state *css,
4986 struct cftype *cft)
4987 {
4988 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4989
4990 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
4991 }
4992
4993 static int memory_low_show(struct seq_file *m, void *v)
4994 {
4995 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
4996 unsigned long low = READ_ONCE(memcg->low);
4997
4998 if (low == PAGE_COUNTER_MAX)
4999 seq_puts(m, "max\n");
5000 else
5001 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
5002
5003 return 0;
5004 }
5005
5006 static ssize_t memory_low_write(struct kernfs_open_file *of,
5007 char *buf, size_t nbytes, loff_t off)
5008 {
5009 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5010 unsigned long low;
5011 int err;
5012
5013 buf = strstrip(buf);
5014 err = page_counter_memparse(buf, "max", &low);
5015 if (err)
5016 return err;
5017
5018 memcg->low = low;
5019
5020 return nbytes;
5021 }
5022
5023 static int memory_high_show(struct seq_file *m, void *v)
5024 {
5025 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5026 unsigned long high = READ_ONCE(memcg->high);
5027
5028 if (high == PAGE_COUNTER_MAX)
5029 seq_puts(m, "max\n");
5030 else
5031 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
5032
5033 return 0;
5034 }
5035
5036 static ssize_t memory_high_write(struct kernfs_open_file *of,
5037 char *buf, size_t nbytes, loff_t off)
5038 {
5039 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5040 unsigned long high;
5041 int err;
5042
5043 buf = strstrip(buf);
5044 err = page_counter_memparse(buf, "max", &high);
5045 if (err)
5046 return err;
5047
5048 memcg->high = high;
5049
5050 memcg_wb_domain_size_changed(memcg);
5051 return nbytes;
5052 }
5053
5054 static int memory_max_show(struct seq_file *m, void *v)
5055 {
5056 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5057 unsigned long max = READ_ONCE(memcg->memory.limit);
5058
5059 if (max == PAGE_COUNTER_MAX)
5060 seq_puts(m, "max\n");
5061 else
5062 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5063
5064 return 0;
5065 }
5066
5067 static ssize_t memory_max_write(struct kernfs_open_file *of,
5068 char *buf, size_t nbytes, loff_t off)
5069 {
5070 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5071 unsigned long max;
5072 int err;
5073
5074 buf = strstrip(buf);
5075 err = page_counter_memparse(buf, "max", &max);
5076 if (err)
5077 return err;
5078
5079 err = mem_cgroup_resize_limit(memcg, max);
5080 if (err)
5081 return err;
5082
5083 memcg_wb_domain_size_changed(memcg);
5084 return nbytes;
5085 }
5086
5087 static int memory_events_show(struct seq_file *m, void *v)
5088 {
5089 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5090
5091 seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW));
5092 seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH));
5093 seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX));
5094 seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM));
5095
5096 return 0;
5097 }
5098
5099 static struct cftype memory_files[] = {
5100 {
5101 .name = "current",
5102 .flags = CFTYPE_NOT_ON_ROOT,
5103 .read_u64 = memory_current_read,
5104 },
5105 {
5106 .name = "low",
5107 .flags = CFTYPE_NOT_ON_ROOT,
5108 .seq_show = memory_low_show,
5109 .write = memory_low_write,
5110 },
5111 {
5112 .name = "high",
5113 .flags = CFTYPE_NOT_ON_ROOT,
5114 .seq_show = memory_high_show,
5115 .write = memory_high_write,
5116 },
5117 {
5118 .name = "max",
5119 .flags = CFTYPE_NOT_ON_ROOT,
5120 .seq_show = memory_max_show,
5121 .write = memory_max_write,
5122 },
5123 {
5124 .name = "events",
5125 .flags = CFTYPE_NOT_ON_ROOT,
5126 .file_offset = offsetof(struct mem_cgroup, events_file),
5127 .seq_show = memory_events_show,
5128 },
5129 { } /* terminate */
5130 };
5131
5132 struct cgroup_subsys memory_cgrp_subsys = {
5133 .css_alloc = mem_cgroup_css_alloc,
5134 .css_online = mem_cgroup_css_online,
5135 .css_offline = mem_cgroup_css_offline,
5136 .css_released = mem_cgroup_css_released,
5137 .css_free = mem_cgroup_css_free,
5138 .css_reset = mem_cgroup_css_reset,
5139 .can_attach = mem_cgroup_can_attach,
5140 .cancel_attach = mem_cgroup_cancel_attach,
5141 .attach = mem_cgroup_move_task,
5142 .bind = mem_cgroup_bind,
5143 .dfl_cftypes = memory_files,
5144 .legacy_cftypes = mem_cgroup_legacy_files,
5145 .early_init = 0,
5146 };
5147
5148 /**
5149 * mem_cgroup_low - check if memory consumption is below the normal range
5150 * @root: the highest ancestor to consider
5151 * @memcg: the memory cgroup to check
5152 *
5153 * Returns %true if memory consumption of @memcg, and that of all
5154 * configurable ancestors up to @root, is below the normal range.
5155 */
5156 bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
5157 {
5158 if (mem_cgroup_disabled())
5159 return false;
5160
5161 /*
5162 * The toplevel group doesn't have a configurable range, so
5163 * it's never low when looked at directly, and it is not
5164 * considered an ancestor when assessing the hierarchy.
5165 */
5166
5167 if (memcg == root_mem_cgroup)
5168 return false;
5169
5170 if (page_counter_read(&memcg->memory) >= memcg->low)
5171 return false;
5172
5173 while (memcg != root) {
5174 memcg = parent_mem_cgroup(memcg);
5175
5176 if (memcg == root_mem_cgroup)
5177 break;
5178
5179 if (page_counter_read(&memcg->memory) >= memcg->low)
5180 return false;
5181 }
5182 return true;
5183 }
5184
5185 /**
5186 * mem_cgroup_try_charge - try charging a page
5187 * @page: page to charge
5188 * @mm: mm context of the victim
5189 * @gfp_mask: reclaim mode
5190 * @memcgp: charged memcg return
5191 *
5192 * Try to charge @page to the memcg that @mm belongs to, reclaiming
5193 * pages according to @gfp_mask if necessary.
5194 *
5195 * Returns 0 on success, with *@memcgp pointing to the charged memcg.
5196 * Otherwise, an error code is returned.
5197 *
5198 * After page->mapping has been set up, the caller must finalize the
5199 * charge with mem_cgroup_commit_charge(). Or abort the transaction
5200 * with mem_cgroup_cancel_charge() in case page instantiation fails.
5201 */
5202 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
5203 gfp_t gfp_mask, struct mem_cgroup **memcgp,
5204 bool compound)
5205 {
5206 struct mem_cgroup *memcg = NULL;
5207 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5208 int ret = 0;
5209
5210 if (mem_cgroup_disabled())
5211 goto out;
5212
5213 if (PageSwapCache(page)) {
5214 /*
5215 * Every swap fault against a single page tries to charge the
5216 * page, bail as early as possible. shmem_unuse() encounters
5217 * already charged pages, too. The USED bit is protected by
5218 * the page lock, which serializes swap cache removal, which
5219 * in turn serializes uncharging.
5220 */
5221 VM_BUG_ON_PAGE(!PageLocked(page), page);
5222 if (page->mem_cgroup)
5223 goto out;
5224
5225 if (do_swap_account) {
5226 swp_entry_t ent = { .val = page_private(page), };
5227 unsigned short id = lookup_swap_cgroup_id(ent);
5228
5229 rcu_read_lock();
5230 memcg = mem_cgroup_from_id(id);
5231 if (memcg && !css_tryget_online(&memcg->css))
5232 memcg = NULL;
5233 rcu_read_unlock();
5234 }
5235 }
5236
5237 if (!memcg)
5238 memcg = get_mem_cgroup_from_mm(mm);
5239
5240 ret = try_charge(memcg, gfp_mask, nr_pages);
5241
5242 css_put(&memcg->css);
5243 out:
5244 *memcgp = memcg;
5245 return ret;
5246 }
5247
5248 /**
5249 * mem_cgroup_commit_charge - commit a page charge
5250 * @page: page to charge
5251 * @memcg: memcg to charge the page to
5252 * @lrucare: page might be on LRU already
5253 *
5254 * Finalize a charge transaction started by mem_cgroup_try_charge(),
5255 * after page->mapping has been set up. This must happen atomically
5256 * as part of the page instantiation, i.e. under the page table lock
5257 * for anonymous pages, under the page lock for page and swap cache.
5258 *
5259 * In addition, the page must not be on the LRU during the commit, to
5260 * prevent racing with task migration. If it might be, use @lrucare.
5261 *
5262 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
5263 */
5264 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5265 bool lrucare, bool compound)
5266 {
5267 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5268
5269 VM_BUG_ON_PAGE(!page->mapping, page);
5270 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
5271
5272 if (mem_cgroup_disabled())
5273 return;
5274 /*
5275 * Swap faults will attempt to charge the same page multiple
5276 * times. But reuse_swap_page() might have removed the page
5277 * from swapcache already, so we can't check PageSwapCache().
5278 */
5279 if (!memcg)
5280 return;
5281
5282 commit_charge(page, memcg, lrucare);
5283
5284 local_irq_disable();
5285 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
5286 memcg_check_events(memcg, page);
5287 local_irq_enable();
5288
5289 if (do_memsw_account() && PageSwapCache(page)) {
5290 swp_entry_t entry = { .val = page_private(page) };
5291 /*
5292 * The swap entry might not get freed for a long time,
5293 * let's not wait for it. The page already received a
5294 * memory+swap charge, drop the swap entry duplicate.
5295 */
5296 mem_cgroup_uncharge_swap(entry);
5297 }
5298 }
5299
5300 /**
5301 * mem_cgroup_cancel_charge - cancel a page charge
5302 * @page: page to charge
5303 * @memcg: memcg to charge the page to
5304 *
5305 * Cancel a charge transaction started by mem_cgroup_try_charge().
5306 */
5307 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
5308 bool compound)
5309 {
5310 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5311
5312 if (mem_cgroup_disabled())
5313 return;
5314 /*
5315 * Swap faults will attempt to charge the same page multiple
5316 * times. But reuse_swap_page() might have removed the page
5317 * from swapcache already, so we can't check PageSwapCache().
5318 */
5319 if (!memcg)
5320 return;
5321
5322 cancel_charge(memcg, nr_pages);
5323 }
5324
5325 static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
5326 unsigned long nr_anon, unsigned long nr_file,
5327 unsigned long nr_huge, struct page *dummy_page)
5328 {
5329 unsigned long nr_pages = nr_anon + nr_file;
5330 unsigned long flags;
5331
5332 if (!mem_cgroup_is_root(memcg)) {
5333 page_counter_uncharge(&memcg->memory, nr_pages);
5334 if (do_memsw_account())
5335 page_counter_uncharge(&memcg->memsw, nr_pages);
5336 memcg_oom_recover(memcg);
5337 }
5338
5339 local_irq_save(flags);
5340 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
5341 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
5342 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
5343 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
5344 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
5345 memcg_check_events(memcg, dummy_page);
5346 local_irq_restore(flags);
5347
5348 if (!mem_cgroup_is_root(memcg))
5349 css_put_many(&memcg->css, nr_pages);
5350 }
5351
5352 static void uncharge_list(struct list_head *page_list)
5353 {
5354 struct mem_cgroup *memcg = NULL;
5355 unsigned long nr_anon = 0;
5356 unsigned long nr_file = 0;
5357 unsigned long nr_huge = 0;
5358 unsigned long pgpgout = 0;
5359 struct list_head *next;
5360 struct page *page;
5361
5362 next = page_list->next;
5363 do {
5364 unsigned int nr_pages = 1;
5365
5366 page = list_entry(next, struct page, lru);
5367 next = page->lru.next;
5368
5369 VM_BUG_ON_PAGE(PageLRU(page), page);
5370 VM_BUG_ON_PAGE(page_count(page), page);
5371
5372 if (!page->mem_cgroup)
5373 continue;
5374
5375 /*
5376 * Nobody should be changing or seriously looking at
5377 * page->mem_cgroup at this point, we have fully
5378 * exclusive access to the page.
5379 */
5380
5381 if (memcg != page->mem_cgroup) {
5382 if (memcg) {
5383 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5384 nr_huge, page);
5385 pgpgout = nr_anon = nr_file = nr_huge = 0;
5386 }
5387 memcg = page->mem_cgroup;
5388 }
5389
5390 if (PageTransHuge(page)) {
5391 nr_pages <<= compound_order(page);
5392 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5393 nr_huge += nr_pages;
5394 }
5395
5396 if (PageAnon(page))
5397 nr_anon += nr_pages;
5398 else
5399 nr_file += nr_pages;
5400
5401 page->mem_cgroup = NULL;
5402
5403 pgpgout++;
5404 } while (next != page_list);
5405
5406 if (memcg)
5407 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5408 nr_huge, page);
5409 }
5410
5411 /**
5412 * mem_cgroup_uncharge - uncharge a page
5413 * @page: page to uncharge
5414 *
5415 * Uncharge a page previously charged with mem_cgroup_try_charge() and
5416 * mem_cgroup_commit_charge().
5417 */
5418 void mem_cgroup_uncharge(struct page *page)
5419 {
5420 if (mem_cgroup_disabled())
5421 return;
5422
5423 /* Don't touch page->lru of any random page, pre-check: */
5424 if (!page->mem_cgroup)
5425 return;
5426
5427 INIT_LIST_HEAD(&page->lru);
5428 uncharge_list(&page->lru);
5429 }
5430
5431 /**
5432 * mem_cgroup_uncharge_list - uncharge a list of page
5433 * @page_list: list of pages to uncharge
5434 *
5435 * Uncharge a list of pages previously charged with
5436 * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
5437 */
5438 void mem_cgroup_uncharge_list(struct list_head *page_list)
5439 {
5440 if (mem_cgroup_disabled())
5441 return;
5442
5443 if (!list_empty(page_list))
5444 uncharge_list(page_list);
5445 }
5446
5447 /**
5448 * mem_cgroup_replace_page - migrate a charge to another page
5449 * @oldpage: currently charged page
5450 * @newpage: page to transfer the charge to
5451 *
5452 * Migrate the charge from @oldpage to @newpage.
5453 *
5454 * Both pages must be locked, @newpage->mapping must be set up.
5455 * Either or both pages might be on the LRU already.
5456 */
5457 void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage)
5458 {
5459 struct mem_cgroup *memcg;
5460 unsigned int nr_pages;
5461 bool compound;
5462
5463 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
5464 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
5465 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
5466 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
5467 newpage);
5468
5469 if (mem_cgroup_disabled())
5470 return;
5471
5472 /* Page cache replacement: new page already charged? */
5473 if (newpage->mem_cgroup)
5474 return;
5475
5476 /* Swapcache readahead pages can get replaced before being charged */
5477 memcg = oldpage->mem_cgroup;
5478 if (!memcg)
5479 return;
5480
5481 /* Force-charge the new page. The old one will be freed soon */
5482 compound = PageTransHuge(newpage);
5483 nr_pages = compound ? hpage_nr_pages(newpage) : 1;
5484
5485 page_counter_charge(&memcg->memory, nr_pages);
5486 if (do_memsw_account())
5487 page_counter_charge(&memcg->memsw, nr_pages);
5488 css_get_many(&memcg->css, nr_pages);
5489
5490 commit_charge(newpage, memcg, true);
5491
5492 local_irq_disable();
5493 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
5494 memcg_check_events(memcg, newpage);
5495 local_irq_enable();
5496 }
5497
5498 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
5499 EXPORT_SYMBOL(memcg_sockets_enabled_key);
5500
5501 void sock_update_memcg(struct sock *sk)
5502 {
5503 struct mem_cgroup *memcg;
5504
5505 /* Socket cloning can throw us here with sk_cgrp already
5506 * filled. It won't however, necessarily happen from
5507 * process context. So the test for root memcg given
5508 * the current task's memcg won't help us in this case.
5509 *
5510 * Respecting the original socket's memcg is a better
5511 * decision in this case.
5512 */
5513 if (sk->sk_memcg) {
5514 BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
5515 css_get(&sk->sk_memcg->css);
5516 return;
5517 }
5518
5519 rcu_read_lock();
5520 memcg = mem_cgroup_from_task(current);
5521 if (memcg == root_mem_cgroup)
5522 goto out;
5523 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
5524 goto out;
5525 if (css_tryget_online(&memcg->css))
5526 sk->sk_memcg = memcg;
5527 out:
5528 rcu_read_unlock();
5529 }
5530 EXPORT_SYMBOL(sock_update_memcg);
5531
5532 void sock_release_memcg(struct sock *sk)
5533 {
5534 WARN_ON(!sk->sk_memcg);
5535 css_put(&sk->sk_memcg->css);
5536 }
5537
5538 /**
5539 * mem_cgroup_charge_skmem - charge socket memory
5540 * @memcg: memcg to charge
5541 * @nr_pages: number of pages to charge
5542 *
5543 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
5544 * @memcg's configured limit, %false if the charge had to be forced.
5545 */
5546 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5547 {
5548 gfp_t gfp_mask = GFP_KERNEL;
5549
5550 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5551 struct page_counter *fail;
5552
5553 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
5554 memcg->tcpmem_pressure = 0;
5555 return true;
5556 }
5557 page_counter_charge(&memcg->tcpmem, nr_pages);
5558 memcg->tcpmem_pressure = 1;
5559 return false;
5560 }
5561
5562 /* Don't block in the packet receive path */
5563 if (in_softirq())
5564 gfp_mask = GFP_NOWAIT;
5565
5566 if (try_charge(memcg, gfp_mask, nr_pages) == 0)
5567 return true;
5568
5569 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
5570 return false;
5571 }
5572
5573 /**
5574 * mem_cgroup_uncharge_skmem - uncharge socket memory
5575 * @memcg - memcg to uncharge
5576 * @nr_pages - number of pages to uncharge
5577 */
5578 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5579 {
5580 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5581 page_counter_uncharge(&memcg->tcpmem, nr_pages);
5582 return;
5583 }
5584
5585 page_counter_uncharge(&memcg->memory, nr_pages);
5586 css_put_many(&memcg->css, nr_pages);
5587 }
5588
5589 static int __init cgroup_memory(char *s)
5590 {
5591 char *token;
5592
5593 while ((token = strsep(&s, ",")) != NULL) {
5594 if (!*token)
5595 continue;
5596 if (!strcmp(token, "nosocket"))
5597 cgroup_memory_nosocket = true;
5598 if (!strcmp(token, "nokmem"))
5599 cgroup_memory_nokmem = true;
5600 }
5601 return 0;
5602 }
5603 __setup("cgroup.memory=", cgroup_memory);
5604
5605 /*
5606 * subsys_initcall() for memory controller.
5607 *
5608 * Some parts like hotcpu_notifier() have to be initialized from this context
5609 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
5610 * everything that doesn't depend on a specific mem_cgroup structure should
5611 * be initialized from here.
5612 */
5613 static int __init mem_cgroup_init(void)
5614 {
5615 int cpu, node;
5616
5617 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
5618
5619 for_each_possible_cpu(cpu)
5620 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5621 drain_local_stock);
5622
5623 for_each_node(node) {
5624 struct mem_cgroup_tree_per_node *rtpn;
5625 int zone;
5626
5627 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
5628 node_online(node) ? node : NUMA_NO_NODE);
5629
5630 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
5631 struct mem_cgroup_tree_per_zone *rtpz;
5632
5633 rtpz = &rtpn->rb_tree_per_zone[zone];
5634 rtpz->rb_root = RB_ROOT;
5635 spin_lock_init(&rtpz->lock);
5636 }
5637 soft_limit_tree.rb_tree_per_node[node] = rtpn;
5638 }
5639
5640 return 0;
5641 }
5642 subsys_initcall(mem_cgroup_init);
5643
5644 #ifdef CONFIG_MEMCG_SWAP
5645 /**
5646 * mem_cgroup_swapout - transfer a memsw charge to swap
5647 * @page: page whose memsw charge to transfer
5648 * @entry: swap entry to move the charge to
5649 *
5650 * Transfer the memsw charge of @page to @entry.
5651 */
5652 void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5653 {
5654 struct mem_cgroup *memcg;
5655 unsigned short oldid;
5656
5657 VM_BUG_ON_PAGE(PageLRU(page), page);
5658 VM_BUG_ON_PAGE(page_count(page), page);
5659
5660 if (!do_memsw_account())
5661 return;
5662
5663 memcg = page->mem_cgroup;
5664
5665 /* Readahead page, never charged */
5666 if (!memcg)
5667 return;
5668
5669 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5670 VM_BUG_ON_PAGE(oldid, page);
5671 mem_cgroup_swap_statistics(memcg, true);
5672
5673 page->mem_cgroup = NULL;
5674
5675 if (!mem_cgroup_is_root(memcg))
5676 page_counter_uncharge(&memcg->memory, 1);
5677
5678 /*
5679 * Interrupts should be disabled here because the caller holds the
5680 * mapping->tree_lock lock which is taken with interrupts-off. It is
5681 * important here to have the interrupts disabled because it is the
5682 * only synchronisation we have for udpating the per-CPU variables.
5683 */
5684 VM_BUG_ON(!irqs_disabled());
5685 mem_cgroup_charge_statistics(memcg, page, false, -1);
5686 memcg_check_events(memcg, page);
5687 }
5688
5689 /*
5690 * mem_cgroup_try_charge_swap - try charging a swap entry
5691 * @page: page being added to swap
5692 * @entry: swap entry to charge
5693 *
5694 * Try to charge @entry to the memcg that @page belongs to.
5695 *
5696 * Returns 0 on success, -ENOMEM on failure.
5697 */
5698 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
5699 {
5700 struct mem_cgroup *memcg;
5701 struct page_counter *counter;
5702 unsigned short oldid;
5703
5704 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
5705 return 0;
5706
5707 memcg = page->mem_cgroup;
5708
5709 /* Readahead page, never charged */
5710 if (!memcg)
5711 return 0;
5712
5713 if (!mem_cgroup_is_root(memcg) &&
5714 !page_counter_try_charge(&memcg->swap, 1, &counter))
5715 return -ENOMEM;
5716
5717 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5718 VM_BUG_ON_PAGE(oldid, page);
5719 mem_cgroup_swap_statistics(memcg, true);
5720
5721 css_get(&memcg->css);
5722 return 0;
5723 }
5724
5725 /**
5726 * mem_cgroup_uncharge_swap - uncharge a swap entry
5727 * @entry: swap entry to uncharge
5728 *
5729 * Drop the swap charge associated with @entry.
5730 */
5731 void mem_cgroup_uncharge_swap(swp_entry_t entry)
5732 {
5733 struct mem_cgroup *memcg;
5734 unsigned short id;
5735
5736 if (!do_swap_account)
5737 return;
5738
5739 id = swap_cgroup_record(entry, 0);
5740 rcu_read_lock();
5741 memcg = mem_cgroup_from_id(id);
5742 if (memcg) {
5743 if (!mem_cgroup_is_root(memcg)) {
5744 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5745 page_counter_uncharge(&memcg->swap, 1);
5746 else
5747 page_counter_uncharge(&memcg->memsw, 1);
5748 }
5749 mem_cgroup_swap_statistics(memcg, false);
5750 css_put(&memcg->css);
5751 }
5752 rcu_read_unlock();
5753 }
5754
5755 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
5756 {
5757 long nr_swap_pages = get_nr_swap_pages();
5758
5759 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5760 return nr_swap_pages;
5761 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
5762 nr_swap_pages = min_t(long, nr_swap_pages,
5763 READ_ONCE(memcg->swap.limit) -
5764 page_counter_read(&memcg->swap));
5765 return nr_swap_pages;
5766 }
5767
5768 bool mem_cgroup_swap_full(struct page *page)
5769 {
5770 struct mem_cgroup *memcg;
5771
5772 VM_BUG_ON_PAGE(!PageLocked(page), page);
5773
5774 if (vm_swap_full())
5775 return true;
5776 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5777 return false;
5778
5779 memcg = page->mem_cgroup;
5780 if (!memcg)
5781 return false;
5782
5783 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
5784 if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit)
5785 return true;
5786
5787 return false;
5788 }
5789
5790 /* for remember boot option*/
5791 #ifdef CONFIG_MEMCG_SWAP_ENABLED
5792 static int really_do_swap_account __initdata = 1;
5793 #else
5794 static int really_do_swap_account __initdata;
5795 #endif
5796
5797 static int __init enable_swap_account(char *s)
5798 {
5799 if (!strcmp(s, "1"))
5800 really_do_swap_account = 1;
5801 else if (!strcmp(s, "0"))
5802 really_do_swap_account = 0;
5803 return 1;
5804 }
5805 __setup("swapaccount=", enable_swap_account);
5806
5807 static u64 swap_current_read(struct cgroup_subsys_state *css,
5808 struct cftype *cft)
5809 {
5810 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5811
5812 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
5813 }
5814
5815 static int swap_max_show(struct seq_file *m, void *v)
5816 {
5817 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5818 unsigned long max = READ_ONCE(memcg->swap.limit);
5819
5820 if (max == PAGE_COUNTER_MAX)
5821 seq_puts(m, "max\n");
5822 else
5823 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5824
5825 return 0;
5826 }
5827
5828 static ssize_t swap_max_write(struct kernfs_open_file *of,
5829 char *buf, size_t nbytes, loff_t off)
5830 {
5831 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5832 unsigned long max;
5833 int err;
5834
5835 buf = strstrip(buf);
5836 err = page_counter_memparse(buf, "max", &max);
5837 if (err)
5838 return err;
5839
5840 mutex_lock(&memcg_limit_mutex);
5841 err = page_counter_limit(&memcg->swap, max);
5842 mutex_unlock(&memcg_limit_mutex);
5843 if (err)
5844 return err;
5845
5846 return nbytes;
5847 }
5848
5849 static struct cftype swap_files[] = {
5850 {
5851 .name = "swap.current",
5852 .flags = CFTYPE_NOT_ON_ROOT,
5853 .read_u64 = swap_current_read,
5854 },
5855 {
5856 .name = "swap.max",
5857 .flags = CFTYPE_NOT_ON_ROOT,
5858 .seq_show = swap_max_show,
5859 .write = swap_max_write,
5860 },
5861 { } /* terminate */
5862 };
5863
5864 static struct cftype memsw_cgroup_files[] = {
5865 {
5866 .name = "memsw.usage_in_bytes",
5867 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
5868 .read_u64 = mem_cgroup_read_u64,
5869 },
5870 {
5871 .name = "memsw.max_usage_in_bytes",
5872 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
5873 .write = mem_cgroup_reset,
5874 .read_u64 = mem_cgroup_read_u64,
5875 },
5876 {
5877 .name = "memsw.limit_in_bytes",
5878 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
5879 .write = mem_cgroup_write,
5880 .read_u64 = mem_cgroup_read_u64,
5881 },
5882 {
5883 .name = "memsw.failcnt",
5884 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
5885 .write = mem_cgroup_reset,
5886 .read_u64 = mem_cgroup_read_u64,
5887 },
5888 { }, /* terminate */
5889 };
5890
5891 static int __init mem_cgroup_swap_init(void)
5892 {
5893 if (!mem_cgroup_disabled() && really_do_swap_account) {
5894 do_swap_account = 1;
5895 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
5896 swap_files));
5897 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
5898 memsw_cgroup_files));
5899 }
5900 return 0;
5901 }
5902 subsys_initcall(mem_cgroup_swap_init);
5903
5904 #endif /* CONFIG_MEMCG_SWAP */
This page took 0.149966 seconds and 5 git commands to generate.