mm: remove rest of ACCESS_ONCE() usages
[deliverable/linux.git] / mm / memcontrol.c
index 74a9641d8f9f136d7d3ca6f31e668e21b3b68b2b..14c2f2017e37cc405e52cb12bc30b128997f1f8e 100644 (file)
@@ -674,7 +674,7 @@ static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
 {
        unsigned long nr_pages = page_counter_read(&memcg->memory);
-       unsigned long soft_limit = ACCESS_ONCE(memcg->soft_limit);
+       unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
        unsigned long excess = 0;
 
        if (nr_pages > soft_limit)
@@ -1042,7 +1042,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
                        goto out_unlock;
 
                do {
-                       pos = ACCESS_ONCE(iter->position);
+                       pos = READ_ONCE(iter->position);
                        /*
                         * A racing update may change the position and
                         * put the last reference, hence css_tryget(),
@@ -1359,13 +1359,13 @@ static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
        unsigned long limit;
 
        count = page_counter_read(&memcg->memory);
-       limit = ACCESS_ONCE(memcg->memory.limit);
+       limit = READ_ONCE(memcg->memory.limit);
        if (count < limit)
                margin = limit - count;
 
        if (do_swap_account) {
                count = page_counter_read(&memcg->memsw);
-               limit = ACCESS_ONCE(memcg->memsw.limit);
+               limit = READ_ONCE(memcg->memsw.limit);
                if (count <= limit)
                        margin = min(margin, limit - count);
        }
@@ -2637,7 +2637,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
                return cachep;
 
        memcg = get_mem_cgroup_from_mm(current->mm);
-       kmemcg_id = ACCESS_ONCE(memcg->kmemcg_id);
+       kmemcg_id = READ_ONCE(memcg->kmemcg_id);
        if (kmemcg_id < 0)
                goto out;
 
@@ -5007,7 +5007,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
         * tunable will only affect upcoming migrations, not the current one.
         * So we need to save it, and keep it going.
         */
-       move_flags = ACCESS_ONCE(memcg->move_charge_at_immigrate);
+       move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
        if (move_flags) {
                struct mm_struct *mm;
                struct mem_cgroup *from = mem_cgroup_from_task(p);
@@ -5241,7 +5241,7 @@ static u64 memory_current_read(struct cgroup_subsys_state *css,
 static int memory_low_show(struct seq_file *m, void *v)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
-       unsigned long low = ACCESS_ONCE(memcg->low);
+       unsigned long low = READ_ONCE(memcg->low);
 
        if (low == PAGE_COUNTER_MAX)
                seq_puts(m, "max\n");
@@ -5271,7 +5271,7 @@ static ssize_t memory_low_write(struct kernfs_open_file *of,
 static int memory_high_show(struct seq_file *m, void *v)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
-       unsigned long high = ACCESS_ONCE(memcg->high);
+       unsigned long high = READ_ONCE(memcg->high);
 
        if (high == PAGE_COUNTER_MAX)
                seq_puts(m, "max\n");
@@ -5301,7 +5301,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
 static int memory_max_show(struct seq_file *m, void *v)
 {
        struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
-       unsigned long max = ACCESS_ONCE(memcg->memory.limit);
+       unsigned long max = READ_ONCE(memcg->memory.limit);
 
        if (max == PAGE_COUNTER_MAX)
                seq_puts(m, "max\n");
This page took 0.026479 seconds and 5 git commands to generate.