sched: encapsulate priority changes in a sched_set_prio static function
[deliverable/linux.git] / kernel / sched / core.c
index 44db0fffa8be333c47d6aae90c3219a335cd5c22..6946b8f7ace47136d41f9b366bd8c96faee85cd6 100644 (file)
@@ -321,6 +321,24 @@ static inline void init_hrtick(void)
 }
 #endif /* CONFIG_SCHED_HRTICK */
 
+/*
+ * cmpxchg based fetch_or, macro so it works for different integer types
+ */
+#define fetch_or(ptr, mask)                                            \
+       ({                                                              \
+               typeof(ptr) _ptr = (ptr);                               \
+               typeof(mask) _mask = (mask);                            \
+               typeof(*_ptr) _old, _val = *_ptr;                       \
+                                                                       \
+               for (;;) {                                              \
+                       _old = cmpxchg(_ptr, _val, _val | _mask);       \
+                       if (_old == _val)                               \
+                               break;                                  \
+                       _val = _old;                                    \
+               }                                                       \
+       _old;                                                           \
+})
+
 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
 /*
  * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
@@ -578,17 +596,8 @@ bool sched_can_stop_tick(struct rq *rq)
                return false;
 
        /*
-        * FIFO realtime policy runs the highest priority task (after DEADLINE).
-        * Other runnable tasks are of a lower priority. The scheduler tick
-        * isn't needed.
-        */
-       fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
-       if (fifo_nr_running)
-               return true;
-
-       /*
-        * Round-robin realtime tasks time slice with other tasks at the same
-        * realtime priority.
+        * If there are more than one RR tasks, we need the tick to effect the
+        * actual RR behaviour.
         */
        if (rq->rt.rr_nr_running) {
                if (rq->rt.rr_nr_running == 1)
@@ -597,8 +606,20 @@ bool sched_can_stop_tick(struct rq *rq)
                        return false;
        }
 
-       /* Normal multitasking need periodic preemption checks */
-       if (rq->cfs.nr_running > 1)
+       /*
+        * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
+        * forced preemption between FIFO tasks.
+        */
+       fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
+       if (fifo_nr_running)
+               return true;
+
+       /*
+        * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
+        * if there's more than one we need the tick for involuntary
+        * preemption.
+        */
+       if (rq->nr_running > 1)
                return false;
 
        return true;
@@ -2209,6 +2230,11 @@ int sysctl_schedstats(struct ctl_table *table, int write,
 #endif
 #endif
 
+static void sched_set_prio(struct task_struct *p, int prio)
+{
+       p->prio = prio;
+}
+
 /*
  * fork()/clone()-time setup:
  */
@@ -2228,7 +2254,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
        /*
         * Make sure we do not leak PI boosting priority to the child.
         */
-       p->prio = current->normal_prio;
+       sched_set_prio(p, current->normal_prio);
 
        /*
         * Revert to default priority/policy on fork if requested.
@@ -2241,7 +2267,8 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
                } else if (PRIO_TO_NICE(p->static_prio) < 0)
                        p->static_prio = NICE_TO_PRIO(0);
 
-               p->prio = p->normal_prio = __normal_prio(p);
+               p->normal_prio = __normal_prio(p);
+               sched_set_prio(p, p->normal_prio);
                set_load_weight(p);
 
                /*
@@ -3456,7 +3483,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
                p->sched_class = &fair_sched_class;
        }
 
-       p->prio = prio;
+       sched_set_prio(p, prio);
 
        if (running)
                p->sched_class->set_curr_task(rq);
@@ -3503,7 +3530,7 @@ void set_user_nice(struct task_struct *p, long nice)
        p->static_prio = NICE_TO_PRIO(nice);
        set_load_weight(p);
        old_prio = p->prio;
-       p->prio = effective_prio(p);
+       sched_set_prio(p, effective_prio(p));
        delta = p->prio - old_prio;
 
        if (queued) {
@@ -3710,9 +3737,10 @@ static void __setscheduler(struct rq *rq, struct task_struct *p,
         * sched_setscheduler().
         */
        if (keep_boost)
-               p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
+               sched_set_prio(p, rt_mutex_get_effective_prio(p,
+                                       normal_prio(p)));
        else
-               p->prio = normal_prio(p);
+               sched_set_prio(p, normal_prio(p));
 
        if (dl_prio(p->prio))
                p->sched_class = &dl_sched_class;
@@ -5371,6 +5399,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
 
        case CPU_UP_PREPARE:
                rq->calc_load_update = calc_load_update;
+               account_reset_rq(rq);
                break;
 
        case CPU_ONLINE:
@@ -7537,7 +7566,7 @@ void set_curr_task(int cpu, struct task_struct *p)
 /* task_group_lock serializes the addition/removal of task groups */
 static DEFINE_SPINLOCK(task_group_lock);
 
-static void free_sched_group(struct task_group *tg)
+static void sched_free_group(struct task_group *tg)
 {
        free_fair_sched_group(tg);
        free_rt_sched_group(tg);
@@ -7563,7 +7592,7 @@ struct task_group *sched_create_group(struct task_group *parent)
        return tg;
 
 err:
-       free_sched_group(tg);
+       sched_free_group(tg);
        return ERR_PTR(-ENOMEM);
 }
 
@@ -7583,17 +7612,16 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
 }
 
 /* rcu callback to free various structures associated with a task group */
-static void free_sched_group_rcu(struct rcu_head *rhp)
+static void sched_free_group_rcu(struct rcu_head *rhp)
 {
        /* now it should be safe to free those cfs_rqs */
-       free_sched_group(container_of(rhp, struct task_group, rcu));
+       sched_free_group(container_of(rhp, struct task_group, rcu));
 }
 
-/* Destroy runqueue etc associated with a task group */
 void sched_destroy_group(struct task_group *tg)
 {
        /* wait for possible concurrent references to cfs_rqs complete */
-       call_rcu(&tg->rcu, free_sched_group_rcu);
+       call_rcu(&tg->rcu, sched_free_group_rcu);
 }
 
 void sched_offline_group(struct task_group *tg)
@@ -8052,31 +8080,26 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
        if (IS_ERR(tg))
                return ERR_PTR(-ENOMEM);
 
+       sched_online_group(tg, parent);
+
        return &tg->css;
 }
 
-static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
+static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
 {
        struct task_group *tg = css_tg(css);
-       struct task_group *parent = css_tg(css->parent);
 
-       if (parent)
-               sched_online_group(tg, parent);
-       return 0;
+       sched_offline_group(tg);
 }
 
 static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
 {
        struct task_group *tg = css_tg(css);
 
-       sched_destroy_group(tg);
-}
-
-static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
-{
-       struct task_group *tg = css_tg(css);
-
-       sched_offline_group(tg);
+       /*
+        * Relies on the RCU grace period between css_released() and this.
+        */
+       sched_free_group(tg);
 }
 
 static void cpu_cgroup_fork(struct task_struct *task)
@@ -8436,9 +8459,8 @@ static struct cftype cpu_files[] = {
 
 struct cgroup_subsys cpu_cgrp_subsys = {
        .css_alloc      = cpu_cgroup_css_alloc,
+       .css_released   = cpu_cgroup_css_released,
        .css_free       = cpu_cgroup_css_free,
-       .css_online     = cpu_cgroup_css_online,
-       .css_offline    = cpu_cgroup_css_offline,
        .fork           = cpu_cgroup_fork,
        .can_attach     = cpu_cgroup_can_attach,
        .attach         = cpu_cgroup_attach,
This page took 0.0364139999999999 seconds and 5 git commands to generate.