tracing: add sched_set_prio tracepoint
[deliverable/linux.git] / kernel / sched / core.c
index 8b489fcac37bd9d829439feb08d8bac1354c7e71..45fbaab4add7afc099028d1ea6f240a6d2393f14 100644 (file)
@@ -596,17 +596,8 @@ bool sched_can_stop_tick(struct rq *rq)
                return false;
 
        /*
-        * FIFO realtime policy runs the highest priority task (after DEADLINE).
-        * Other runnable tasks are of a lower priority. The scheduler tick
-        * isn't needed.
-        */
-       fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
-       if (fifo_nr_running)
-               return true;
-
-       /*
-        * Round-robin realtime tasks time slice with other tasks at the same
-        * realtime priority.
+        * If there are more than one RR tasks, we need the tick to effect the
+        * actual RR behaviour.
         */
        if (rq->rt.rr_nr_running) {
                if (rq->rt.rr_nr_running == 1)
@@ -615,8 +606,20 @@ bool sched_can_stop_tick(struct rq *rq)
                        return false;
        }
 
-       /* Normal multitasking need periodic preemption checks */
-       if (rq->cfs.nr_running > 1)
+       /*
+        * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
+        * forced preemption between FIFO tasks.
+        */
+       fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
+       if (fifo_nr_running)
+               return true;
+
+       /*
+        * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
+        * if there's more than one we need the tick for involuntary
+        * preemption.
+        */
+       if (rq->nr_running > 1)
                return false;
 
        return true;
@@ -2227,6 +2230,12 @@ int sysctl_schedstats(struct ctl_table *table, int write,
 #endif
 #endif
 
+static void sched_set_prio(struct task_struct *p, int prio)
+{
+       trace_sched_set_prio(p, prio);
+       p->prio = prio;
+}
+
 /*
  * fork()/clone()-time setup:
  */
@@ -2246,7 +2255,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
        /*
         * Make sure we do not leak PI boosting priority to the child.
         */
-       p->prio = current->normal_prio;
+       sched_set_prio(p, current->normal_prio);
 
        /*
         * Revert to default priority/policy on fork if requested.
@@ -2259,7 +2268,8 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
                } else if (PRIO_TO_NICE(p->static_prio) < 0)
                        p->static_prio = NICE_TO_PRIO(0);
 
-               p->prio = p->normal_prio = __normal_prio(p);
+               p->normal_prio = __normal_prio(p);
+               sched_set_prio(p, p->normal_prio);
                set_load_weight(p);
 
                /*
@@ -3474,7 +3484,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
                p->sched_class = &fair_sched_class;
        }
 
-       p->prio = prio;
+       sched_set_prio(p, prio);
 
        if (running)
                p->sched_class->set_curr_task(rq);
@@ -3521,7 +3531,7 @@ void set_user_nice(struct task_struct *p, long nice)
        p->static_prio = NICE_TO_PRIO(nice);
        set_load_weight(p);
        old_prio = p->prio;
-       p->prio = effective_prio(p);
+       sched_set_prio(p, effective_prio(p));
        delta = p->prio - old_prio;
 
        if (queued) {
@@ -3728,9 +3738,10 @@ static void __setscheduler(struct rq *rq, struct task_struct *p,
         * sched_setscheduler().
         */
        if (keep_boost)
-               p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
+               sched_set_prio(p, rt_mutex_get_effective_prio(p,
+                                       normal_prio(p)));
        else
-               p->prio = normal_prio(p);
+               sched_set_prio(p, normal_prio(p));
 
        if (dl_prio(p->prio))
                p->sched_class = &dl_sched_class;
This page took 0.024598 seconds and 5 git commands to generate.