mm: memcontrol: do not uncharge old page in page cache replacement
[deliverable/linux.git] / mm / memcontrol.c
index 379f9911b87b7c903b99570275dda46e1b25cdde..bf35bff282fc09412b34929f9829bfe86b88b361 100644 (file)
@@ -250,13 +250,6 @@ enum res_type {
 /* Used for OOM nofiier */
 #define OOM_CONTROL            (0)
 
-/*
- * The memcg_create_mutex will be held whenever a new cgroup is created.
- * As a consequence, any change that needs to protect against new child cgroups
- * appearing has to hold it as well.
- */
-static DEFINE_MUTEX(memcg_create_mutex);
-
 /* Some nice accessors for the vmpressure. */
 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
 {
@@ -373,13 +366,6 @@ mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
  *
  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
  * is returned.
- *
- * XXX: The above description of behavior on the default hierarchy isn't
- * strictly true yet as replace_page_cache_page() can modify the
- * association before @page is released even on the default hierarchy;
- * however, the current and planned usages don't mix the the two functions
- * and replace_page_cache_page() will soon be updated to make the invariant
- * actually true.
  */
 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
 {
@@ -899,17 +885,8 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
                if (css == &root->css)
                        break;
 
-               if (css_tryget(css)) {
-                       /*
-                        * Make sure the memcg is initialized:
-                        * mem_cgroup_css_online() orders the the
-                        * initialization against setting the flag.
-                        */
-                       if (smp_load_acquire(&memcg->initialized))
-                               break;
-
-                       css_put(css);
-               }
+               if (css_tryget(css))
+                       break;
 
                memcg = NULL;
        }
@@ -1236,7 +1213,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
                pr_cont(":");
 
                for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
-                       if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
+                       if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
                                continue;
                        pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
                                K(mem_cgroup_read_stat(iter, i)));
@@ -1275,9 +1252,12 @@ static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
        limit = memcg->memory.limit;
        if (mem_cgroup_swappiness(memcg)) {
                unsigned long memsw_limit;
+               unsigned long swap_limit;
 
                memsw_limit = memcg->memsw.limit;
-               limit = min(limit + total_swap_pages, memsw_limit);
+               swap_limit = memcg->swap.limit;
+               swap_limit = min(swap_limit, (unsigned long)total_swap_pages);
+               limit = min(limit + swap_limit, memsw_limit);
        }
        return limit;
 }
@@ -2690,14 +2670,6 @@ static inline bool memcg_has_children(struct mem_cgroup *memcg)
 {
        bool ret;
 
-       /*
-        * The lock does not prevent addition or deletion of children, but
-        * it prevents a new child from being initialized based on this
-        * parent in css_online(), so it's enough to decide whether
-        * hierarchically inherited attributes can still be changed or not.
-        */
-       lockdep_assert_held(&memcg_create_mutex);
-
        rcu_read_lock();
        ret = css_next_child(NULL, &memcg->css);
        rcu_read_unlock();
@@ -2760,10 +2732,8 @@ static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
        struct mem_cgroup *memcg = mem_cgroup_from_css(css);
        struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
 
-       mutex_lock(&memcg_create_mutex);
-
        if (memcg->use_hierarchy == val)
-               goto out;
+               return 0;
 
        /*
         * If parent's use_hierarchy is set, we can't make any modifications
@@ -2782,9 +2752,6 @@ static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
        } else
                retval = -EINVAL;
 
-out:
-       mutex_unlock(&memcg_create_mutex);
-
        return retval;
 }
 
@@ -2843,7 +2810,7 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
                counter = &memcg->kmem;
                break;
        case _TCP:
-               counter = &memcg->tcp_mem.memory_allocated;
+               counter = &memcg->tcpmem;
                break;
        default:
                BUG();
@@ -2872,37 +2839,14 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
 #ifndef CONFIG_SLOB
 static int memcg_online_kmem(struct mem_cgroup *memcg)
 {
-       int err = 0;
        int memcg_id;
 
        BUG_ON(memcg->kmemcg_id >= 0);
        BUG_ON(memcg->kmem_state);
 
-       /*
-        * For simplicity, we won't allow this to be disabled.  It also can't
-        * be changed if the cgroup has children already, or if tasks had
-        * already joined.
-        *
-        * If tasks join before we set the limit, a person looking at
-        * kmem.usage_in_bytes will have no way to determine when it took
-        * place, which makes the value quite meaningless.
-        *
-        * After it first became limited, changes in the value of the limit are
-        * of course permitted.
-        */
-       mutex_lock(&memcg_create_mutex);
-       if (cgroup_is_populated(memcg->css.cgroup) ||
-           (memcg->use_hierarchy && memcg_has_children(memcg)))
-               err = -EBUSY;
-       mutex_unlock(&memcg_create_mutex);
-       if (err)
-               goto out;
-
        memcg_id = memcg_alloc_cache_id();
-       if (memcg_id < 0) {
-               err = memcg_id;
-               goto out;
-       }
+       if (memcg_id < 0)
+               return memcg_id;
 
        static_branch_inc(&memcg_kmem_enabled_key);
        /*
@@ -2913,17 +2857,14 @@ static int memcg_online_kmem(struct mem_cgroup *memcg)
         */
        memcg->kmemcg_id = memcg_id;
        memcg->kmem_state = KMEM_ONLINE;
-out:
-       return err;
+
+       return 0;
 }
 
-static int memcg_propagate_kmem(struct mem_cgroup *memcg)
+static int memcg_propagate_kmem(struct mem_cgroup *parent,
+                               struct mem_cgroup *memcg)
 {
        int ret = 0;
-       struct mem_cgroup *parent = parent_mem_cgroup(memcg);
-
-       if (!parent)
-               return 0;
 
        mutex_lock(&memcg_limit_mutex);
        /*
@@ -2985,6 +2926,10 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg)
 
 static void memcg_free_kmem(struct mem_cgroup *memcg)
 {
+       /* css_alloc() failed, offlining didn't happen */
+       if (unlikely(memcg->kmem_state == KMEM_ONLINE))
+               memcg_offline_kmem(memcg);
+
        if (memcg->kmem_state == KMEM_ALLOCATED) {
                memcg_destroy_kmem_caches(memcg);
                static_branch_dec(&memcg_kmem_enabled_key);
@@ -2992,7 +2937,11 @@ static void memcg_free_kmem(struct mem_cgroup *memcg)
        }
 }
 #else
-static int memcg_propagate_kmem(struct mem_cgroup *memcg)
+static int memcg_propagate_kmem(struct mem_cgroup *parent, struct mem_cgroup *memcg)
+{
+       return 0;
+}
+static int memcg_online_kmem(struct mem_cgroup *memcg)
 {
        return 0;
 }
@@ -3007,11 +2956,16 @@ static void memcg_free_kmem(struct mem_cgroup *memcg)
 static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
                                   unsigned long limit)
 {
-       int ret;
+       int ret = 0;
 
        mutex_lock(&memcg_limit_mutex);
        /* Top-level cgroup doesn't propagate from root */
        if (!memcg_kmem_online(memcg)) {
+               if (cgroup_is_populated(memcg->css.cgroup) ||
+                   (memcg->use_hierarchy && memcg_has_children(memcg)))
+                       ret = -EBUSY;
+               if (ret)
+                       goto out;
                ret = memcg_online_kmem(memcg);
                if (ret)
                        goto out;
@@ -3028,11 +2982,11 @@ static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
 
        mutex_lock(&memcg_limit_mutex);
 
-       ret = page_counter_limit(&memcg->tcp_mem.memory_allocated, limit);
+       ret = page_counter_limit(&memcg->tcpmem, limit);
        if (ret)
                goto out;
 
-       if (!memcg->tcp_mem.active) {
+       if (!memcg->tcpmem_active) {
                /*
                 * The active flag needs to be written after the static_key
                 * update. This is what guarantees that the socket activation
@@ -3050,7 +3004,7 @@ static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
                 * patched in yet.
                 */
                static_branch_inc(&memcg_sockets_enabled_key);
-               memcg->tcp_mem.active = true;
+               memcg->tcpmem_active = true;
        }
 out:
        mutex_unlock(&memcg_limit_mutex);
@@ -3119,7 +3073,7 @@ static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
                counter = &memcg->kmem;
                break;
        case _TCP:
-               counter = &memcg->tcp_mem.memory_allocated;
+               counter = &memcg->tcpmem;
                break;
        default:
                BUG();
@@ -4167,90 +4121,44 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
        kfree(memcg->nodeinfo[node]);
 }
 
-static struct mem_cgroup *mem_cgroup_alloc(void)
-{
-       struct mem_cgroup *memcg;
-       size_t size;
-
-       size = sizeof(struct mem_cgroup);
-       size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
-
-       memcg = kzalloc(size, GFP_KERNEL);
-       if (!memcg)
-               return NULL;
-
-       memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
-       if (!memcg->stat)
-               goto out_free;
-
-       if (memcg_wb_domain_init(memcg, GFP_KERNEL))
-               goto out_free_stat;
-
-       return memcg;
-
-out_free_stat:
-       free_percpu(memcg->stat);
-out_free:
-       kfree(memcg);
-       return NULL;
-}
-
-/*
- * At destroying mem_cgroup, references from swap_cgroup can remain.
- * (scanning all at force_empty is too costly...)
- *
- * Instead of clearing all references at force_empty, we remember
- * the number of reference from swap_cgroup and free mem_cgroup when
- * it goes down to 0.
- *
- * Removal of cgroup itself succeeds regardless of refs from swap.
- */
-
-static void __mem_cgroup_free(struct mem_cgroup *memcg)
+static void mem_cgroup_free(struct mem_cgroup *memcg)
 {
        int node;
 
-       cancel_work_sync(&memcg->high_work);
-
-       mem_cgroup_remove_from_trees(memcg);
-
+       memcg_wb_domain_exit(memcg);
        for_each_node(node)
                free_mem_cgroup_per_zone_info(memcg, node);
-
        free_percpu(memcg->stat);
-       memcg_wb_domain_exit(memcg);
        kfree(memcg);
 }
 
-static struct cgroup_subsys_state * __ref
-mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
+static struct mem_cgroup *mem_cgroup_alloc(void)
 {
        struct mem_cgroup *memcg;
-       long error = -ENOMEM;
+       size_t size;
        int node;
 
-       memcg = mem_cgroup_alloc();
+       size = sizeof(struct mem_cgroup);
+       size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
+
+       memcg = kzalloc(size, GFP_KERNEL);
        if (!memcg)
-               return ERR_PTR(error);
+               return NULL;
+
+       memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
+       if (!memcg->stat)
+               goto fail;
 
        for_each_node(node)
                if (alloc_mem_cgroup_per_zone_info(memcg, node))
-                       goto free_out;
+                       goto fail;
 
-       /* root ? */
-       if (parent_css == NULL) {
-               root_mem_cgroup = memcg;
-               page_counter_init(&memcg->memory, NULL);
-               memcg->high = PAGE_COUNTER_MAX;
-               memcg->soft_limit = PAGE_COUNTER_MAX;
-               page_counter_init(&memcg->memsw, NULL);
-               page_counter_init(&memcg->kmem, NULL);
-       }
+       if (memcg_wb_domain_init(memcg, GFP_KERNEL))
+               goto fail;
 
        INIT_WORK(&memcg->high_work, high_work_func);
        memcg->last_scanned_node = MAX_NUMNODES;
        INIT_LIST_HEAD(&memcg->oom_notify);
-       memcg->move_charge_at_immigrate = 0;
        mutex_init(&memcg->thresholds_lock);
        spin_lock_init(&memcg->move_lock);
        vmpressure_init(&memcg->vmpressure);
@@ -4263,52 +4171,42 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
 #ifdef CONFIG_CGROUP_WRITEBACK
        INIT_LIST_HEAD(&memcg->cgwb_list);
 #endif
-       return &memcg->css;
-
-free_out:
-       __mem_cgroup_free(memcg);
-       return ERR_PTR(error);
+       return memcg;
+fail:
+       mem_cgroup_free(memcg);
+       return NULL;
 }
 
-static int
-mem_cgroup_css_online(struct cgroup_subsys_state *css)
+static struct cgroup_subsys_state * __ref
+mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
 {
-       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
-       struct mem_cgroup *parent = mem_cgroup_from_css(css->parent);
-       int ret;
-
-       if (css->id > MEM_CGROUP_ID_MAX)
-               return -ENOSPC;
-
-       if (!parent)
-               return 0;
-
-       mutex_lock(&memcg_create_mutex);
+       struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
+       struct mem_cgroup *memcg;
+       long error = -ENOMEM;
 
-       memcg->use_hierarchy = parent->use_hierarchy;
-       memcg->oom_kill_disable = parent->oom_kill_disable;
-       memcg->swappiness = mem_cgroup_swappiness(parent);
+       memcg = mem_cgroup_alloc();
+       if (!memcg)
+               return ERR_PTR(error);
 
-       if (parent->use_hierarchy) {
+       memcg->high = PAGE_COUNTER_MAX;
+       memcg->soft_limit = PAGE_COUNTER_MAX;
+       if (parent) {
+               memcg->swappiness = mem_cgroup_swappiness(parent);
+               memcg->oom_kill_disable = parent->oom_kill_disable;
+       }
+       if (parent && parent->use_hierarchy) {
+               memcg->use_hierarchy = true;
                page_counter_init(&memcg->memory, &parent->memory);
-               memcg->high = PAGE_COUNTER_MAX;
-               memcg->soft_limit = PAGE_COUNTER_MAX;
+               page_counter_init(&memcg->swap, &parent->swap);
                page_counter_init(&memcg->memsw, &parent->memsw);
                page_counter_init(&memcg->kmem, &parent->kmem);
-               page_counter_init(&memcg->tcp_mem.memory_allocated,
-                                 &parent->tcp_mem.memory_allocated);
-
-               /*
-                * No need to take a reference to the parent because cgroup
-                * core guarantees its existence.
-                */
+               page_counter_init(&memcg->tcpmem, &parent->tcpmem);
        } else {
                page_counter_init(&memcg->memory, NULL);
-               memcg->high = PAGE_COUNTER_MAX;
-               memcg->soft_limit = PAGE_COUNTER_MAX;
+               page_counter_init(&memcg->swap, NULL);
                page_counter_init(&memcg->memsw, NULL);
                page_counter_init(&memcg->kmem, NULL);
-               page_counter_init(&memcg->tcp_mem.memory_allocated, NULL);
+               page_counter_init(&memcg->tcpmem, NULL);
                /*
                 * Deeper hierachy with use_hierarchy == false doesn't make
                 * much sense so let cgroup subsystem know about this
@@ -4317,21 +4215,31 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
                if (parent != root_mem_cgroup)
                        memory_cgrp_subsys.broken_hierarchy = true;
        }
-       mutex_unlock(&memcg_create_mutex);
 
-       ret = memcg_propagate_kmem(memcg);
-       if (ret)
-               return ret;
+       /* The following stuff does not apply to the root */
+       if (!parent) {
+               root_mem_cgroup = memcg;
+               return &memcg->css;
+       }
+
+       error = memcg_propagate_kmem(parent, memcg);
+       if (error)
+               goto fail;
 
        if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
                static_branch_inc(&memcg_sockets_enabled_key);
 
-       /*
-        * Make sure the memcg is initialized: mem_cgroup_iter()
-        * orders reading memcg->initialized against its callers
-        * reading the memcg members.
-        */
-       smp_store_release(&memcg->initialized, 1);
+       return &memcg->css;
+fail:
+       mem_cgroup_free(memcg);
+       return NULL;
+}
+
+static int
+mem_cgroup_css_online(struct cgroup_subsys_state *css)
+{
+       if (css->id > MEM_CGROUP_ID_MAX)
+               return -ENOSPC;
 
        return 0;
 }
@@ -4353,10 +4261,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
        }
        spin_unlock(&memcg->event_list_lock);
 
-       vmpressure_cleanup(&memcg->vmpressure);
-
        memcg_offline_kmem(memcg);
-
        wb_memcg_offline(memcg);
 }
 
@@ -4374,11 +4279,14 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
        if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
                static_branch_dec(&memcg_sockets_enabled_key);
 
-       if (memcg->tcp_mem.active)
+       if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
                static_branch_dec(&memcg_sockets_enabled_key);
 
+       vmpressure_cleanup(&memcg->vmpressure);
+       cancel_work_sync(&memcg->high_work);
+       mem_cgroup_remove_from_trees(memcg);
        memcg_free_kmem(memcg);
-       __mem_cgroup_free(memcg);
+       mem_cgroup_free(memcg);
 }
 
 /**
@@ -5314,7 +5222,7 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
                if (page->mem_cgroup)
                        goto out;
 
-               if (do_memsw_account()) {
+               if (do_swap_account) {
                        swp_entry_t ent = { .val = page_private(page), };
                        unsigned short id = lookup_swap_cgroup_id(ent);
 
@@ -5549,7 +5457,8 @@ void mem_cgroup_uncharge_list(struct list_head *page_list)
 void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage)
 {
        struct mem_cgroup *memcg;
-       int isolated;
+       unsigned int nr_pages;
+       bool compound;
 
        VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
        VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
@@ -5569,11 +5478,21 @@ void mem_cgroup_replace_page(struct page *oldpage, struct page *newpage)
        if (!memcg)
                return;
 
-       lock_page_lru(oldpage, &isolated);
-       oldpage->mem_cgroup = NULL;
-       unlock_page_lru(oldpage, isolated);
+       /* Force-charge the new page. The old one will be freed soon */
+       compound = PageTransHuge(newpage);
+       nr_pages = compound ? hpage_nr_pages(newpage) : 1;
+
+       page_counter_charge(&memcg->memory, nr_pages);
+       if (do_memsw_account())
+               page_counter_charge(&memcg->memsw, nr_pages);
+       css_get_many(&memcg->css, nr_pages);
 
        commit_charge(newpage, memcg, true);
+
+       local_irq_disable();
+       mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
+       memcg_check_events(memcg, newpage);
+       local_irq_enable();
 }
 
 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
@@ -5601,7 +5520,7 @@ void sock_update_memcg(struct sock *sk)
        memcg = mem_cgroup_from_task(current);
        if (memcg == root_mem_cgroup)
                goto out;
-       if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcp_mem.active)
+       if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
                goto out;
        if (css_tryget_online(&memcg->css))
                sk->sk_memcg = memcg;
@@ -5629,15 +5548,14 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
        gfp_t gfp_mask = GFP_KERNEL;
 
        if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
-               struct page_counter *counter;
+               struct page_counter *fail;
 
-               if (page_counter_try_charge(&memcg->tcp_mem.memory_allocated,
-                                           nr_pages, &counter)) {
-                       memcg->tcp_mem.memory_pressure = 0;
+               if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
+                       memcg->tcpmem_pressure = 0;
                        return true;
                }
-               page_counter_charge(&memcg->tcp_mem.memory_allocated, nr_pages);
-               memcg->tcp_mem.memory_pressure = 1;
+               page_counter_charge(&memcg->tcpmem, nr_pages);
+               memcg->tcpmem_pressure = 1;
                return false;
        }
 
@@ -5660,8 +5578,7 @@ bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
 {
        if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
-               page_counter_uncharge(&memcg->tcp_mem.memory_allocated,
-                                     nr_pages);
+               page_counter_uncharge(&memcg->tcpmem, nr_pages);
                return;
        }
 
@@ -5769,32 +5686,107 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
        memcg_check_events(memcg, page);
 }
 
+/*
+ * mem_cgroup_try_charge_swap - try charging a swap entry
+ * @page: page being added to swap
+ * @entry: swap entry to charge
+ *
+ * Try to charge @entry to the memcg that @page belongs to.
+ *
+ * Returns 0 on success, -ENOMEM on failure.
+ */
+int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
+{
+       struct mem_cgroup *memcg;
+       struct page_counter *counter;
+       unsigned short oldid;
+
+       if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
+               return 0;
+
+       memcg = page->mem_cgroup;
+
+       /* Readahead page, never charged */
+       if (!memcg)
+               return 0;
+
+       if (!mem_cgroup_is_root(memcg) &&
+           !page_counter_try_charge(&memcg->swap, 1, &counter))
+               return -ENOMEM;
+
+       oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
+       VM_BUG_ON_PAGE(oldid, page);
+       mem_cgroup_swap_statistics(memcg, true);
+
+       css_get(&memcg->css);
+       return 0;
+}
+
 /**
  * mem_cgroup_uncharge_swap - uncharge a swap entry
  * @entry: swap entry to uncharge
  *
- * Drop the memsw charge associated with @entry.
+ * Drop the swap charge associated with @entry.
  */
 void mem_cgroup_uncharge_swap(swp_entry_t entry)
 {
        struct mem_cgroup *memcg;
        unsigned short id;
 
-       if (!do_memsw_account())
+       if (!do_swap_account)
                return;
 
        id = swap_cgroup_record(entry, 0);
        rcu_read_lock();
        memcg = mem_cgroup_from_id(id);
        if (memcg) {
-               if (!mem_cgroup_is_root(memcg))
-                       page_counter_uncharge(&memcg->memsw, 1);
+               if (!mem_cgroup_is_root(memcg)) {
+                       if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
+                               page_counter_uncharge(&memcg->swap, 1);
+                       else
+                               page_counter_uncharge(&memcg->memsw, 1);
+               }
                mem_cgroup_swap_statistics(memcg, false);
                css_put(&memcg->css);
        }
        rcu_read_unlock();
 }
 
+long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
+{
+       long nr_swap_pages = get_nr_swap_pages();
+
+       if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
+               return nr_swap_pages;
+       for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
+               nr_swap_pages = min_t(long, nr_swap_pages,
+                                     READ_ONCE(memcg->swap.limit) -
+                                     page_counter_read(&memcg->swap));
+       return nr_swap_pages;
+}
+
+bool mem_cgroup_swap_full(struct page *page)
+{
+       struct mem_cgroup *memcg;
+
+       VM_BUG_ON_PAGE(!PageLocked(page), page);
+
+       if (vm_swap_full())
+               return true;
+       if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
+               return false;
+
+       memcg = page->mem_cgroup;
+       if (!memcg)
+               return false;
+
+       for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
+               if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit)
+                       return true;
+
+       return false;
+}
+
 /* for remember boot option*/
 #ifdef CONFIG_MEMCG_SWAP_ENABLED
 static int really_do_swap_account __initdata = 1;
@@ -5812,6 +5804,63 @@ static int __init enable_swap_account(char *s)
 }
 __setup("swapaccount=", enable_swap_account);
 
+static u64 swap_current_read(struct cgroup_subsys_state *css,
+                            struct cftype *cft)
+{
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+
+       return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
+}
+
+static int swap_max_show(struct seq_file *m, void *v)
+{
+       struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
+       unsigned long max = READ_ONCE(memcg->swap.limit);
+
+       if (max == PAGE_COUNTER_MAX)
+               seq_puts(m, "max\n");
+       else
+               seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
+
+       return 0;
+}
+
+static ssize_t swap_max_write(struct kernfs_open_file *of,
+                             char *buf, size_t nbytes, loff_t off)
+{
+       struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
+       unsigned long max;
+       int err;
+
+       buf = strstrip(buf);
+       err = page_counter_memparse(buf, "max", &max);
+       if (err)
+               return err;
+
+       mutex_lock(&memcg_limit_mutex);
+       err = page_counter_limit(&memcg->swap, max);
+       mutex_unlock(&memcg_limit_mutex);
+       if (err)
+               return err;
+
+       return nbytes;
+}
+
+static struct cftype swap_files[] = {
+       {
+               .name = "swap.current",
+               .flags = CFTYPE_NOT_ON_ROOT,
+               .read_u64 = swap_current_read,
+       },
+       {
+               .name = "swap.max",
+               .flags = CFTYPE_NOT_ON_ROOT,
+               .seq_show = swap_max_show,
+               .write = swap_max_write,
+       },
+       { }     /* terminate */
+};
+
 static struct cftype memsw_cgroup_files[] = {
        {
                .name = "memsw.usage_in_bytes",
@@ -5843,6 +5892,8 @@ static int __init mem_cgroup_swap_init(void)
 {
        if (!mem_cgroup_disabled() && really_do_swap_account) {
                do_swap_account = 1;
+               WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
+                                              swap_files));
                WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
                                                  memsw_cgroup_files));
        }
This page took 0.035059 seconds and 5 git commands to generate.