mm: memcontrol: inline memcg->move_lock locking
authorJohannes Weiner <hannes@cmpxchg.org>
Wed, 10 Dec 2014 23:44:05 +0000 (15:44 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 11 Dec 2014 01:41:07 +0000 (17:41 -0800)
The wrappers around taking and dropping the memcg->move_lock spinlock add
nothing of value.  Inline the spinlock calls into the callsites.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Vladimir Davydov <vdavydov@parallels.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memcontrol.c

index 09fece0eb9f156404fd06832fb3e924dcb13d20a..a5c9aa4688e8d09264b0550aa13f11b361648f4b 100644 (file)
@@ -1522,23 +1522,6 @@ static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
        return false;
 }
 
-/*
- * Take this lock when
- * - a code tries to modify page's memcg while it's USED.
- * - a code tries to modify page state accounting in a memcg.
- */
-static void move_lock_mem_cgroup(struct mem_cgroup *memcg,
-                                 unsigned long *flags)
-{
-       spin_lock_irqsave(&memcg->move_lock, *flags);
-}
-
-static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
-                               unsigned long *flags)
-{
-       spin_unlock_irqrestore(&memcg->move_lock, *flags);
-}
-
 #define K(x) ((x) << (PAGE_SHIFT-10))
 /**
  * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
@@ -2156,9 +2139,9 @@ again:
        if (atomic_read(&memcg->moving_account) <= 0)
                return memcg;
 
-       move_lock_mem_cgroup(memcg, flags);
+       spin_lock_irqsave(&memcg->move_lock, *flags);
        if (memcg != pc->mem_cgroup) {
-               move_unlock_mem_cgroup(memcg, flags);
+               spin_unlock_irqrestore(&memcg->move_lock, *flags);
                goto again;
        }
        *locked = true;
@@ -2176,7 +2159,7 @@ void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked,
                              unsigned long flags)
 {
        if (memcg && locked)
-               move_unlock_mem_cgroup(memcg, &flags);
+               spin_unlock_irqrestore(&memcg->move_lock, flags);
 
        rcu_read_unlock();
 }
@@ -3219,7 +3202,7 @@ static int mem_cgroup_move_account(struct page *page,
        if (pc->mem_cgroup != from)
                goto out_unlock;
 
-       move_lock_mem_cgroup(from, &flags);
+       spin_lock_irqsave(&from->move_lock, flags);
 
        if (!PageAnon(page) && page_mapped(page)) {
                __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
@@ -3243,7 +3226,8 @@ static int mem_cgroup_move_account(struct page *page,
 
        /* caller should have done css_get */
        pc->mem_cgroup = to;
-       move_unlock_mem_cgroup(from, &flags);
+       spin_unlock_irqrestore(&from->move_lock, flags);
+
        ret = 0;
 
        local_irq_disable();
This page took 0.027064 seconds and 5 git commands to generate.