sched/cgroup: Fix cgroup entity load tracking tear-down
authorPeter Zijlstra <peterz@infradead.org>
Thu, 21 Jan 2016 21:24:16 +0000 (22:24 +0100)
committerIngo Molnar <mingo@kernel.org>
Mon, 29 Feb 2016 08:41:50 +0000 (09:41 +0100)
When a cgroup's CPU runqueue is destroyed, it should remove its
remaining load accounting from its parent cgroup.

The current site for doing so it unsuited because its far too late and
unordered against other cgroup removal (->css_free() will be, but we're also
in an RCU callback).

Put it in the ->css_offline() callback, which is the start of cgroup
destruction, right after the group has been made unavailable to
userspace. The ->css_offline() callbacks are called in hierarchical order
after the following v4.4 commit:

  aa226ff4a1ce ("cgroup: make sure a parent css isn't offlined before its children")

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Li Zefan <lizefan@huawei.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20160121212416.GL6357@twins.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/sched.h

index 9503d590e5ef5b81537947b9925ecfe689f72a91..ab814bf100e103a79b50eb516894378c0a6aa3b5 100644 (file)
@@ -7860,11 +7860,9 @@ void sched_destroy_group(struct task_group *tg)
 void sched_offline_group(struct task_group *tg)
 {
        unsigned long flags;
-       int i;
 
        /* end participation in shares distribution */
-       for_each_possible_cpu(i)
-               unregister_fair_sched_group(tg, i);
+       unregister_fair_sched_group(tg);
 
        spin_lock_irqsave(&task_group_lock, flags);
        list_del_rcu(&tg->list);
index 56b7d4b839476b6ed1692e786abe9ed6cda64a5f..cce33032962423b31ca2637cd806d36efacd77ff 100644 (file)
@@ -8234,11 +8234,8 @@ void free_fair_sched_group(struct task_group *tg)
        for_each_possible_cpu(i) {
                if (tg->cfs_rq)
                        kfree(tg->cfs_rq[i]);
-               if (tg->se) {
-                       if (tg->se[i])
-                               remove_entity_load_avg(tg->se[i]);
+               if (tg->se)
                        kfree(tg->se[i]);
-               }
        }
 
        kfree(tg->cfs_rq);
@@ -8286,21 +8283,29 @@ err:
        return 0;
 }
 
-void unregister_fair_sched_group(struct task_group *tg, int cpu)
+void unregister_fair_sched_group(struct task_group *tg)
 {
-       struct rq *rq = cpu_rq(cpu);
        unsigned long flags;
+       struct rq *rq;
+       int cpu;
 
-       /*
-       * Only empty task groups can be destroyed; so we can speculatively
-       * check on_list without danger of it being re-added.
-       */
-       if (!tg->cfs_rq[cpu]->on_list)
-               return;
+       for_each_possible_cpu(cpu) {
+               if (tg->se[cpu])
+                       remove_entity_load_avg(tg->se[cpu]);
 
-       raw_spin_lock_irqsave(&rq->lock, flags);
-       list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
-       raw_spin_unlock_irqrestore(&rq->lock, flags);
+               /*
+                * Only empty task groups can be destroyed; so we can speculatively
+                * check on_list without danger of it being re-added.
+                */
+               if (!tg->cfs_rq[cpu]->on_list)
+                       continue;
+
+               rq = cpu_rq(cpu);
+
+               raw_spin_lock_irqsave(&rq->lock, flags);
+               list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
+       }
 }
 
 void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
@@ -8382,7 +8387,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
        return 1;
 }
 
-void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
+void unregister_fair_sched_group(struct task_group *tg) { }
 
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
index 10f16374df7f3a3f0f3dc0eb281559cacd223311..30ea2d871ba75895cee163936785320fcf542212 100644 (file)
@@ -313,7 +313,7 @@ extern int tg_nop(struct task_group *tg, void *data);
 
 extern void free_fair_sched_group(struct task_group *tg);
 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
-extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
+extern void unregister_fair_sched_group(struct task_group *tg);
 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
                        struct sched_entity *se, int cpu,
                        struct sched_entity *parent);
This page took 0.032501 seconds and 5 git commands to generate.