sched: encapsulate priority changes in a sched_set_prio static function
authorJulien Desfossez <jdesfossez@efficios.com>
Mon, 16 May 2016 14:57:26 +0000 (10:57 -0400)
committerJulien Desfossez <jdesfossez@efficios.com>
Mon, 27 Jun 2016 18:14:18 +0000 (14:14 -0400)
Currently, the priority of tasks is modified directly in the scheduling
functions. Encapsulate priority updates to enable instrumentation of
priority changes. This will enable analysis of real-time scheduling
delays per thread priority, which cannot be performed accurately if we
only trace the priority of the currently scheduled processes.

The call sites that modify the priority of a task are mostly system
calls: sched_setscheduler, sched_setattr, sched_process_fork and
set_user_nice. Priority can also be dynamically boosted through
priority inheritance of rt_mutex by rt_mutex_setprio.

Signed-off-by: Julien Desfossez <jdesfossez@efficios.com>
Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
include/linux/sched.h
kernel/sched/core.c

index 52c4847b05e2882a72d04c3c75fc4d55c2b4a6b9..48b35c03f6f8030e11f8ec236a636bdd6ed0eef5 100644 (file)
@@ -1409,7 +1409,8 @@ struct task_struct {
 #endif
        int on_rq;
 
-       int prio, static_prio, normal_prio;
+       int prio; /* Updated through sched_set_prio() */
+       int static_prio, normal_prio;
        unsigned int rt_priority;
        const struct sched_class *sched_class;
        struct sched_entity se;
index d1f7149f870439d65b9cfcfbc27d4160bbb1672f..6946b8f7ace47136d41f9b366bd8c96faee85cd6 100644 (file)
@@ -2230,6 +2230,11 @@ int sysctl_schedstats(struct ctl_table *table, int write,
 #endif
 #endif
 
+static void sched_set_prio(struct task_struct *p, int prio)
+{
+       p->prio = prio;
+}
+
 /*
  * fork()/clone()-time setup:
  */
@@ -2249,7 +2254,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
        /*
         * Make sure we do not leak PI boosting priority to the child.
         */
-       p->prio = current->normal_prio;
+       sched_set_prio(p, current->normal_prio);
 
        /*
         * Revert to default priority/policy on fork if requested.
@@ -2262,7 +2267,8 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
                } else if (PRIO_TO_NICE(p->static_prio) < 0)
                        p->static_prio = NICE_TO_PRIO(0);
 
-               p->prio = p->normal_prio = __normal_prio(p);
+               p->normal_prio = __normal_prio(p);
+               sched_set_prio(p, p->normal_prio);
                set_load_weight(p);
 
                /*
@@ -3477,7 +3483,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
                p->sched_class = &fair_sched_class;
        }
 
-       p->prio = prio;
+       sched_set_prio(p, prio);
 
        if (running)
                p->sched_class->set_curr_task(rq);
@@ -3524,7 +3530,7 @@ void set_user_nice(struct task_struct *p, long nice)
        p->static_prio = NICE_TO_PRIO(nice);
        set_load_weight(p);
        old_prio = p->prio;
-       p->prio = effective_prio(p);
+       sched_set_prio(p, effective_prio(p));
        delta = p->prio - old_prio;
 
        if (queued) {
@@ -3731,9 +3737,10 @@ static void __setscheduler(struct rq *rq, struct task_struct *p,
         * sched_setscheduler().
         */
        if (keep_boost)
-               p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
+               sched_set_prio(p, rt_mutex_get_effective_prio(p,
+                                       normal_prio(p)));
        else
-               p->prio = normal_prio(p);
+               sched_set_prio(p, normal_prio(p));
 
        if (dl_prio(p->prio))
                p->sched_class = &dl_sched_class;
This page took 0.028293 seconds and 5 git commands to generate.