sched: encapsulate priority changes in a sched_set_prio static function
[deliverable/linux.git] / kernel / sched / core.c
index d8465eeab8b3d7878866dc1aee7a87bfcb3f1b5e..6946b8f7ace47136d41f9b366bd8c96faee85cd6 100644 (file)
@@ -321,6 +321,24 @@ static inline void init_hrtick(void)
 }
 #endif /* CONFIG_SCHED_HRTICK */
 
+/*
+ * cmpxchg based fetch_or, macro so it works for different integer types
+ */
+#define fetch_or(ptr, mask)                                            \
+       ({                                                              \
+               typeof(ptr) _ptr = (ptr);                               \
+               typeof(mask) _mask = (mask);                            \
+               typeof(*_ptr) _old, _val = *_ptr;                       \
+                                                                       \
+               for (;;) {                                              \
+                       _old = cmpxchg(_ptr, _val, _val | _mask);       \
+                       if (_old == _val)                               \
+                               break;                                  \
+                       _val = _old;                                    \
+               }                                                       \
+       _old;                                                           \
+})
+
 #if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
 /*
  * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
@@ -578,17 +596,8 @@ bool sched_can_stop_tick(struct rq *rq)
                return false;
 
        /*
-        * FIFO realtime policy runs the highest priority task (after DEADLINE).
-        * Other runnable tasks are of a lower priority. The scheduler tick
-        * isn't needed.
-        */
-       fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
-       if (fifo_nr_running)
-               return true;
-
-       /*
-        * Round-robin realtime tasks time slice with other tasks at the same
-        * realtime priority.
+        * If there are more than one RR tasks, we need the tick to effect the
+        * actual RR behaviour.
         */
        if (rq->rt.rr_nr_running) {
                if (rq->rt.rr_nr_running == 1)
@@ -597,8 +606,20 @@ bool sched_can_stop_tick(struct rq *rq)
                        return false;
        }
 
-       /* Normal multitasking need periodic preemption checks */
-       if (rq->cfs.nr_running > 1)
+       /*
+        * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
+        * forced preemption between FIFO tasks.
+        */
+       fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
+       if (fifo_nr_running)
+               return true;
+
+       /*
+        * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
+        * if there's more than one we need the tick for involuntary
+        * preemption.
+        */
+       if (rq->nr_running > 1)
                return false;
 
        return true;
@@ -2209,6 +2230,11 @@ int sysctl_schedstats(struct ctl_table *table, int write,
 #endif
 #endif
 
+static void sched_set_prio(struct task_struct *p, int prio)
+{
+       p->prio = prio;
+}
+
 /*
  * fork()/clone()-time setup:
  */
@@ -2228,7 +2254,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
        /*
         * Make sure we do not leak PI boosting priority to the child.
         */
-       p->prio = current->normal_prio;
+       sched_set_prio(p, current->normal_prio);
 
        /*
         * Revert to default priority/policy on fork if requested.
@@ -2241,7 +2267,8 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
                } else if (PRIO_TO_NICE(p->static_prio) < 0)
                        p->static_prio = NICE_TO_PRIO(0);
 
-               p->prio = p->normal_prio = __normal_prio(p);
+               p->normal_prio = __normal_prio(p);
+               sched_set_prio(p, p->normal_prio);
                set_load_weight(p);
 
                /*
@@ -3456,7 +3483,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
                p->sched_class = &fair_sched_class;
        }
 
-       p->prio = prio;
+       sched_set_prio(p, prio);
 
        if (running)
                p->sched_class->set_curr_task(rq);
@@ -3503,7 +3530,7 @@ void set_user_nice(struct task_struct *p, long nice)
        p->static_prio = NICE_TO_PRIO(nice);
        set_load_weight(p);
        old_prio = p->prio;
-       p->prio = effective_prio(p);
+       sched_set_prio(p, effective_prio(p));
        delta = p->prio - old_prio;
 
        if (queued) {
@@ -3710,9 +3737,10 @@ static void __setscheduler(struct rq *rq, struct task_struct *p,
         * sched_setscheduler().
         */
        if (keep_boost)
-               p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
+               sched_set_prio(p, rt_mutex_get_effective_prio(p,
+                                       normal_prio(p)));
        else
-               p->prio = normal_prio(p);
+               sched_set_prio(p, normal_prio(p));
 
        if (dl_prio(p->prio))
                p->sched_class = &dl_sched_class;
This page took 0.026332 seconds and 5 git commands to generate.