mm: memcontrol: remove synchronous stock draining code
authorJohannes Weiner <hannes@cmpxchg.org>
Wed, 10 Dec 2014 23:42:50 +0000 (15:42 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 11 Dec 2014 01:41:05 +0000 (17:41 -0800)
With charge reparenting, the last synchronous stock drainer left.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Vladimir Davydov <vdavydov@parallels.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Cc: David Rientjes <rientjes@google.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memcontrol.c

index f90e43c1499f0c79b03e89af5979caee63b30011..3a628435f36a826a82e34cb037a60a483c1ec19e 100644 (file)
@@ -634,8 +634,6 @@ static void disarm_static_keys(struct mem_cgroup *memcg)
        disarm_kmem_keys(memcg);
 }
 
-static void drain_all_stock_async(struct mem_cgroup *memcg);
-
 static struct mem_cgroup_per_zone *
 mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
 {
@@ -2302,13 +2300,15 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
 
 /*
  * Drains all per-CPU charge caches for given root_memcg resp. subtree
- * of the hierarchy under it. sync flag says whether we should block
- * until the work is done.
+ * of the hierarchy under it.
  */
-static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
+static void drain_all_stock(struct mem_cgroup *root_memcg)
 {
        int cpu, curcpu;
 
+       /* If someone's already draining, avoid adding running more workers. */
+       if (!mutex_trylock(&percpu_charge_mutex))
+               return;
        /* Notify other cpus that system-wide "drain" is running */
        get_online_cpus();
        curcpu = get_cpu();
@@ -2329,41 +2329,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync)
                }
        }
        put_cpu();
-
-       if (!sync)
-               goto out;
-
-       for_each_online_cpu(cpu) {
-               struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
-               if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
-                       flush_work(&stock->work);
-       }
-out:
        put_online_cpus();
-}
-
-/*
- * Tries to drain stocked charges in other cpus. This function is asynchronous
- * and just put a work per cpu for draining localy on each cpu. Caller can
- * expects some charges will be back later but cannot wait for it.
- */
-static void drain_all_stock_async(struct mem_cgroup *root_memcg)
-{
-       /*
-        * If someone calls draining, avoid adding more kworker runs.
-        */
-       if (!mutex_trylock(&percpu_charge_mutex))
-               return;
-       drain_all_stock(root_memcg, false);
-       mutex_unlock(&percpu_charge_mutex);
-}
-
-/* This is a synchronous drain interface. */
-static void drain_all_stock_sync(struct mem_cgroup *root_memcg)
-{
-       /* called when force_empty is called */
-       mutex_lock(&percpu_charge_mutex);
-       drain_all_stock(root_memcg, true);
        mutex_unlock(&percpu_charge_mutex);
 }
 
@@ -2472,7 +2438,7 @@ retry:
                goto retry;
 
        if (!drained) {
-               drain_all_stock_async(mem_over_limit);
+               drain_all_stock(mem_over_limit);
                drained = true;
                goto retry;
        }
This page took 0.029354 seconds and 5 git commands to generate.