sched: Use TASK_WAKING for fork wakups
[deliverable/linux.git] / kernel / sched.c
index fd05861b2111005a5a88b386d7d77ee036f5e160..1672823aabfe15052df86101528e68afccbcd012 100644 (file)
@@ -26,6 +26,8 @@
  *              Thomas Gleixner, Mike Kravetz
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/nmi.h>
@@ -141,7 +143,7 @@ struct rt_prio_array {
 
 struct rt_bandwidth {
        /* nests inside the rq lock: */
-       spinlock_t              rt_runtime_lock;
+       raw_spinlock_t          rt_runtime_lock;
        ktime_t                 rt_period;
        u64                     rt_runtime;
        struct hrtimer          rt_period_timer;
@@ -178,7 +180,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
        rt_b->rt_period = ns_to_ktime(period);
        rt_b->rt_runtime = runtime;
 
-       spin_lock_init(&rt_b->rt_runtime_lock);
+       raw_spin_lock_init(&rt_b->rt_runtime_lock);
 
        hrtimer_init(&rt_b->rt_period_timer,
                        CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -200,7 +202,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
        if (hrtimer_active(&rt_b->rt_period_timer))
                return;
 
-       spin_lock(&rt_b->rt_runtime_lock);
+       raw_spin_lock(&rt_b->rt_runtime_lock);
        for (;;) {
                unsigned long delta;
                ktime_t soft, hard;
@@ -217,7 +219,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
                __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
                                HRTIMER_MODE_ABS_PINNED, 0);
        }
-       spin_unlock(&rt_b->rt_runtime_lock);
+       raw_spin_unlock(&rt_b->rt_runtime_lock);
 }
 
 #ifdef CONFIG_RT_GROUP_SCHED
@@ -470,7 +472,7 @@ struct rt_rq {
        u64 rt_time;
        u64 rt_runtime;
        /* Nests inside the rq lock: */
-       spinlock_t rt_runtime_lock;
+       raw_spinlock_t rt_runtime_lock;
 
 #ifdef CONFIG_RT_GROUP_SCHED
        unsigned long rt_nr_boosted;
@@ -525,7 +527,7 @@ static struct root_domain def_root_domain;
  */
 struct rq {
        /* runqueue lock: */
-       spinlock_t lock;
+       raw_spinlock_t lock;
 
        /*
         * nr_running and cpu_load should be in the same cacheline because
@@ -685,7 +687,7 @@ inline void update_rq_clock(struct rq *rq)
  */
 int runqueue_is_locked(int cpu)
 {
-       return spin_is_locked(&cpu_rq(cpu)->lock);
+       return raw_spin_is_locked(&cpu_rq(cpu)->lock);
 }
 
 /*
@@ -893,7 +895,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
         */
        spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
 
-       spin_unlock_irq(&rq->lock);
+       raw_spin_unlock_irq(&rq->lock);
 }
 
 #else /* __ARCH_WANT_UNLOCKED_CTXSW */
@@ -917,9 +919,9 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
        next->oncpu = 1;
 #endif
 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
-       spin_unlock_irq(&rq->lock);
+       raw_spin_unlock_irq(&rq->lock);
 #else
-       spin_unlock(&rq->lock);
+       raw_spin_unlock(&rq->lock);
 #endif
 }
 
@@ -949,10 +951,10 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
 {
        for (;;) {
                struct rq *rq = task_rq(p);
-               spin_lock(&rq->lock);
+               raw_spin_lock(&rq->lock);
                if (likely(rq == task_rq(p)))
                        return rq;
-               spin_unlock(&rq->lock);
+               raw_spin_unlock(&rq->lock);
        }
 }
 
@@ -969,10 +971,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
        for (;;) {
                local_irq_save(*flags);
                rq = task_rq(p);
-               spin_lock(&rq->lock);
+               raw_spin_lock(&rq->lock);
                if (likely(rq == task_rq(p)))
                        return rq;
-               spin_unlock_irqrestore(&rq->lock, *flags);
+               raw_spin_unlock_irqrestore(&rq->lock, *flags);
        }
 }
 
@@ -981,19 +983,19 @@ void task_rq_unlock_wait(struct task_struct *p)
        struct rq *rq = task_rq(p);
 
        smp_mb(); /* spin-unlock-wait is not a full memory barrier */
-       spin_unlock_wait(&rq->lock);
+       raw_spin_unlock_wait(&rq->lock);
 }
 
 static void __task_rq_unlock(struct rq *rq)
        __releases(rq->lock)
 {
-       spin_unlock(&rq->lock);
+       raw_spin_unlock(&rq->lock);
 }
 
 static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
        __releases(rq->lock)
 {
-       spin_unlock_irqrestore(&rq->lock, *flags);
+       raw_spin_unlock_irqrestore(&rq->lock, *flags);
 }
 
 /*
@@ -1006,7 +1008,7 @@ static struct rq *this_rq_lock(void)
 
        local_irq_disable();
        rq = this_rq();
-       spin_lock(&rq->lock);
+       raw_spin_lock(&rq->lock);
 
        return rq;
 }
@@ -1053,10 +1055,10 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
 
        WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
 
-       spin_lock(&rq->lock);
+       raw_spin_lock(&rq->lock);
        update_rq_clock(rq);
        rq->curr->sched_class->task_tick(rq, rq->curr, 1);
-       spin_unlock(&rq->lock);
+       raw_spin_unlock(&rq->lock);
 
        return HRTIMER_NORESTART;
 }
@@ -1069,10 +1071,10 @@ static void __hrtick_start(void *arg)
 {
        struct rq *rq = arg;
 
-       spin_lock(&rq->lock);
+       raw_spin_lock(&rq->lock);
        hrtimer_restart(&rq->hrtick_timer);
        rq->hrtick_csd_pending = 0;
-       spin_unlock(&rq->lock);
+       raw_spin_unlock(&rq->lock);
 }
 
 /*
@@ -1179,7 +1181,7 @@ static void resched_task(struct task_struct *p)
 {
        int cpu;
 
-       assert_spin_locked(&task_rq(p)->lock);
+       assert_raw_spin_locked(&task_rq(p)->lock);
 
        if (test_tsk_need_resched(p))
                return;
@@ -1201,10 +1203,10 @@ static void resched_cpu(int cpu)
        struct rq *rq = cpu_rq(cpu);
        unsigned long flags;
 
-       if (!spin_trylock_irqsave(&rq->lock, flags))
+       if (!raw_spin_trylock_irqsave(&rq->lock, flags))
                return;
        resched_task(cpu_curr(cpu));
-       spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
 #ifdef CONFIG_NO_HZ
@@ -1273,7 +1275,7 @@ static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
 #else /* !CONFIG_SMP */
 static void resched_task(struct task_struct *p)
 {
-       assert_spin_locked(&task_rq(p)->lock);
+       assert_raw_spin_locked(&task_rq(p)->lock);
        set_tsk_need_resched(p);
 }
 
@@ -1600,11 +1602,11 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
                struct rq *rq = cpu_rq(cpu);
                unsigned long flags;
 
-               spin_lock_irqsave(&rq->lock, flags);
+               raw_spin_lock_irqsave(&rq->lock, flags);
                tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
                tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
                __set_se_shares(tg->se[cpu], shares);
-               spin_unlock_irqrestore(&rq->lock, flags);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
        }
 }
 
@@ -1706,9 +1708,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
        if (root_task_group_empty())
                return;
 
-       spin_unlock(&rq->lock);
+       raw_spin_unlock(&rq->lock);
        update_shares(sd);
-       spin_lock(&rq->lock);
+       raw_spin_lock(&rq->lock);
 }
 
 static void update_h_load(long cpu)
@@ -1748,7 +1750,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
        __acquires(busiest->lock)
        __acquires(this_rq->lock)
 {
-       spin_unlock(&this_rq->lock);
+       raw_spin_unlock(&this_rq->lock);
        double_rq_lock(this_rq, busiest);
 
        return 1;
@@ -1769,14 +1771,16 @@ static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
 {
        int ret = 0;
 
-       if (unlikely(!spin_trylock(&busiest->lock))) {
+       if (unlikely(!raw_spin_trylock(&busiest->lock))) {
                if (busiest < this_rq) {
-                       spin_unlock(&this_rq->lock);
-                       spin_lock(&busiest->lock);
-                       spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
+                       raw_spin_unlock(&this_rq->lock);
+                       raw_spin_lock(&busiest->lock);
+                       raw_spin_lock_nested(&this_rq->lock,
+                                             SINGLE_DEPTH_NESTING);
                        ret = 1;
                } else
-                       spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
+                       raw_spin_lock_nested(&busiest->lock,
+                                             SINGLE_DEPTH_NESTING);
        }
        return ret;
 }
@@ -1790,7 +1794,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
 {
        if (unlikely(!irqs_disabled())) {
                /* printk() doesn't work good under rq->lock */
-               spin_unlock(&this_rq->lock);
+               raw_spin_unlock(&this_rq->lock);
                BUG_ON(1);
        }
 
@@ -1800,7 +1804,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
        __releases(busiest->lock)
 {
-       spin_unlock(&busiest->lock);
+       raw_spin_unlock(&busiest->lock);
        lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
 }
 #endif
@@ -2023,13 +2027,13 @@ void kthread_bind(struct task_struct *p, unsigned int cpu)
                return;
        }
 
-       spin_lock_irqsave(&rq->lock, flags);
+       raw_spin_lock_irqsave(&rq->lock, flags);
        update_rq_clock(rq);
        set_task_cpu(p, cpu);
        p->cpus_allowed = cpumask_of_cpu(cpu);
        p->rt.nr_cpus_allowed = 1;
        p->flags |= PF_THREAD_BOUND;
-       spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 EXPORT_SYMBOL(kthread_bind);
 
@@ -2042,6 +2046,9 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
 {
        s64 delta;
 
+       if (p->sched_class != &fair_sched_class)
+               return 0;
+
        /*
         * Buddy candidates are cache hot:
         */
@@ -2050,9 +2057,6 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
                         &p->se == cfs_rq_of(&p->se)->last))
                return 1;
 
-       if (p->sched_class != &fair_sched_class)
-               return 0;
-
        if (sysctl_sched_migration_cost == -1)
                return 1;
        if (sysctl_sched_migration_cost == 0)
@@ -2536,14 +2540,6 @@ static void __sched_fork(struct task_struct *p)
 #ifdef CONFIG_PREEMPT_NOTIFIERS
        INIT_HLIST_HEAD(&p->preempt_notifiers);
 #endif
-
-       /*
-        * We mark the process as running here, but have not actually
-        * inserted it onto the runqueue yet. This guarantees that
-        * nobody will actually run it, and a signal or other external
-        * event cannot wake it up and insert it on the runqueue either.
-        */
-       p->state = TASK_RUNNING;
 }
 
 /*
@@ -2554,6 +2550,12 @@ void sched_fork(struct task_struct *p, int clone_flags)
        int cpu = get_cpu();
 
        __sched_fork(p);
+       /*
+        * We mark the process as waking here. This guarantees that
+        * nobody will actually run it, and a signal or other external
+        * event cannot wake it up and insert it on the runqueue either.
+        */
+       p->state = TASK_WAKING;
 
        /*
         * Revert to default priority/policy on fork if requested.
@@ -2622,7 +2624,8 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
        struct rq *rq;
 
        rq = task_rq_lock(p, &flags);
-       BUG_ON(p->state != TASK_RUNNING);
+       BUG_ON(p->state != TASK_WAKING);
+       p->state = TASK_RUNNING;
        update_rq_clock(rq);
        activate_task(rq, p, 0);
        trace_sched_wakeup_new(rq, p, 1);
@@ -2781,10 +2784,10 @@ static inline void post_schedule(struct rq *rq)
        if (rq->post_schedule) {
                unsigned long flags;
 
-               spin_lock_irqsave(&rq->lock, flags);
+               raw_spin_lock_irqsave(&rq->lock, flags);
                if (rq->curr->sched_class->post_schedule)
                        rq->curr->sched_class->post_schedule(rq);
-               spin_unlock_irqrestore(&rq->lock, flags);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
 
                rq->post_schedule = 0;
        }
@@ -3066,15 +3069,15 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
 {
        BUG_ON(!irqs_disabled());
        if (rq1 == rq2) {
-               spin_lock(&rq1->lock);
+               raw_spin_lock(&rq1->lock);
                __acquire(rq2->lock);   /* Fake it out ;) */
        } else {
                if (rq1 < rq2) {
-                       spin_lock(&rq1->lock);
-                       spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
+                       raw_spin_lock(&rq1->lock);
+                       raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
                } else {
-                       spin_lock(&rq2->lock);
-                       spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
+                       raw_spin_lock(&rq2->lock);
+                       raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
                }
        }
        update_rq_clock(rq1);
@@ -3091,9 +3094,9 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
        __releases(rq1->lock)
        __releases(rq2->lock)
 {
-       spin_unlock(&rq1->lock);
+       raw_spin_unlock(&rq1->lock);
        if (rq1 != rq2)
-               spin_unlock(&rq2->lock);
+               raw_spin_unlock(&rq2->lock);
        else
                __release(rq2->lock);
 }
@@ -4186,14 +4189,15 @@ redo:
 
                if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
 
-                       spin_lock_irqsave(&busiest->lock, flags);
+                       raw_spin_lock_irqsave(&busiest->lock, flags);
 
                        /* don't kick the migration_thread, if the curr
                         * task on busiest cpu can't be moved to this_cpu
                         */
                        if (!cpumask_test_cpu(this_cpu,
                                              &busiest->curr->cpus_allowed)) {
-                               spin_unlock_irqrestore(&busiest->lock, flags);
+                               raw_spin_unlock_irqrestore(&busiest->lock,
+                                                           flags);
                                all_pinned = 1;
                                goto out_one_pinned;
                        }
@@ -4203,7 +4207,7 @@ redo:
                                busiest->push_cpu = this_cpu;
                                active_balance = 1;
                        }
-                       spin_unlock_irqrestore(&busiest->lock, flags);
+                       raw_spin_unlock_irqrestore(&busiest->lock, flags);
                        if (active_balance)
                                wake_up_process(busiest->migration_thread);
 
@@ -4385,10 +4389,10 @@ redo:
                /*
                 * Should not call ttwu while holding a rq->lock
                 */
-               spin_unlock(&this_rq->lock);
+               raw_spin_unlock(&this_rq->lock);
                if (active_balance)
                        wake_up_process(busiest->migration_thread);
-               spin_lock(&this_rq->lock);
+               raw_spin_lock(&this_rq->lock);
 
        } else
                sd->nr_balance_failed = 0;
@@ -5257,11 +5261,11 @@ void scheduler_tick(void)
 
        sched_clock_tick();
 
-       spin_lock(&rq->lock);
+       raw_spin_lock(&rq->lock);
        update_rq_clock(rq);
        update_cpu_load(rq);
        curr->sched_class->task_tick(rq, curr, 0);
-       spin_unlock(&rq->lock);
+       raw_spin_unlock(&rq->lock);
 
        perf_event_task_tick(curr, cpu);
 
@@ -5337,8 +5341,8 @@ static noinline void __schedule_bug(struct task_struct *prev)
 {
        struct pt_regs *regs = get_irq_regs();
 
-       printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
-               prev->comm, prev->pid, preempt_count());
+       pr_err("BUG: scheduling while atomic: %s/%d/0x%08x\n",
+              prev->comm, prev->pid, preempt_count());
 
        debug_show_held_locks(prev);
        print_modules();
@@ -5455,7 +5459,7 @@ need_resched_nonpreemptible:
        if (sched_feat(HRTICK))
                hrtick_clear(rq);
 
-       spin_lock_irq(&rq->lock);
+       raw_spin_lock_irq(&rq->lock);
        update_rq_clock(rq);
        clear_tsk_need_resched(prev);
 
@@ -5491,7 +5495,7 @@ need_resched_nonpreemptible:
                cpu = smp_processor_id();
                rq = cpu_rq(cpu);
        } else
-               spin_unlock_irq(&rq->lock);
+               raw_spin_unlock_irq(&rq->lock);
 
        post_schedule(rq);
 
@@ -5908,14 +5912,15 @@ EXPORT_SYMBOL(wait_for_completion_killable);
  */
 bool try_wait_for_completion(struct completion *x)
 {
+       unsigned long flags;
        int ret = 1;
 
-       spin_lock_irq(&x->wait.lock);
+       spin_lock_irqsave(&x->wait.lock, flags);
        if (!x->done)
                ret = 0;
        else
                x->done--;
-       spin_unlock_irq(&x->wait.lock);
+       spin_unlock_irqrestore(&x->wait.lock, flags);
        return ret;
 }
 EXPORT_SYMBOL(try_wait_for_completion);
@@ -5930,12 +5935,13 @@ EXPORT_SYMBOL(try_wait_for_completion);
  */
 bool completion_done(struct completion *x)
 {
+       unsigned long flags;
        int ret = 1;
 
-       spin_lock_irq(&x->wait.lock);
+       spin_lock_irqsave(&x->wait.lock, flags);
        if (!x->done)
                ret = 0;
-       spin_unlock_irq(&x->wait.lock);
+       spin_unlock_irqrestore(&x->wait.lock, flags);
        return ret;
 }
 EXPORT_SYMBOL(completion_done);
@@ -6320,7 +6326,7 @@ recheck:
         * make sure no PI-waiters arrive (or leave) while we are
         * changing the priority of the task:
         */
-       spin_lock_irqsave(&p->pi_lock, flags);
+       raw_spin_lock_irqsave(&p->pi_lock, flags);
        /*
         * To be able to change p->policy safely, the apropriate
         * runqueue lock must be held.
@@ -6330,7 +6336,7 @@ recheck:
        if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
                policy = oldpolicy = -1;
                __task_rq_unlock(rq);
-               spin_unlock_irqrestore(&p->pi_lock, flags);
+               raw_spin_unlock_irqrestore(&p->pi_lock, flags);
                goto recheck;
        }
        update_rq_clock(rq);
@@ -6354,7 +6360,7 @@ recheck:
                check_class_changed(rq, p, prev_class, oldprio, running);
        }
        __task_rq_unlock(rq);
-       spin_unlock_irqrestore(&p->pi_lock, flags);
+       raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
        rt_mutex_adjust_pi(p);
 
@@ -6454,7 +6460,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
                return -EINVAL;
 
        retval = -ESRCH;
-       read_lock(&tasklist_lock);
+       rcu_read_lock();
        p = find_process_by_pid(pid);
        if (p) {
                retval = security_task_getscheduler(p);
@@ -6462,7 +6468,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
                        retval = p->policy
                                | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
        }
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
        return retval;
 }
 
@@ -6480,7 +6486,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
        if (!param || pid < 0)
                return -EINVAL;
 
-       read_lock(&tasklist_lock);
+       rcu_read_lock();
        p = find_process_by_pid(pid);
        retval = -ESRCH;
        if (!p)
@@ -6491,7 +6497,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
                goto out_unlock;
 
        lp.sched_priority = p->rt_priority;
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
 
        /*
         * This one might sleep, we cannot do it with a spinlock held ...
@@ -6501,7 +6507,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
        return retval;
 
 out_unlock:
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
        return retval;
 }
 
@@ -6512,22 +6518,18 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
        int retval;
 
        get_online_cpus();
-       read_lock(&tasklist_lock);
+       rcu_read_lock();
 
        p = find_process_by_pid(pid);
        if (!p) {
-               read_unlock(&tasklist_lock);
+               rcu_read_unlock();
                put_online_cpus();
                return -ESRCH;
        }
 
-       /*
-        * It is not safe to call set_cpus_allowed with the
-        * tasklist_lock held. We will bump the task_struct's
-        * usage count and then drop tasklist_lock.
-        */
+       /* Prevent p going away */
        get_task_struct(p);
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
 
        if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
                retval = -ENOMEM;
@@ -6613,7 +6615,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
        int retval;
 
        get_online_cpus();
-       read_lock(&tasklist_lock);
+       rcu_read_lock();
 
        retval = -ESRCH;
        p = find_process_by_pid(pid);
@@ -6629,7 +6631,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
        task_rq_unlock(rq, &flags);
 
 out_unlock:
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
        put_online_cpus();
 
        return retval;
@@ -6684,7 +6686,7 @@ SYSCALL_DEFINE0(sched_yield)
         */
        __release(rq->lock);
        spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
-       _raw_spin_unlock(&rq->lock);
+       do_raw_spin_unlock(&rq->lock);
        preempt_enable_no_resched();
 
        schedule();
@@ -6873,7 +6875,7 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
                return -EINVAL;
 
        retval = -ESRCH;
-       read_lock(&tasklist_lock);
+       rcu_read_lock();
        p = find_process_by_pid(pid);
        if (!p)
                goto out_unlock;
@@ -6886,13 +6888,13 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
        time_slice = p->sched_class->get_rr_interval(rq, p);
        task_rq_unlock(rq, &flags);
 
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
        jiffies_to_timespec(time_slice, &t);
        retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
        return retval;
 
 out_unlock:
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
        return retval;
 }
 
@@ -6904,23 +6906,23 @@ void sched_show_task(struct task_struct *p)
        unsigned state;
 
        state = p->state ? __ffs(p->state) + 1 : 0;
-       printk(KERN_INFO "%-13.13s %c", p->comm,
+       pr_info("%-13.13s %c", p->comm,
                state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
 #if BITS_PER_LONG == 32
        if (state == TASK_RUNNING)
-               printk(KERN_CONT " running  ");
+               pr_cont(" running  ");
        else
-               printk(KERN_CONT " %08lx ", thread_saved_pc(p));
+               pr_cont(" %08lx ", thread_saved_pc(p));
 #else
        if (state == TASK_RUNNING)
-               printk(KERN_CONT "  running task    ");
+               pr_cont("  running task    ");
        else
-               printk(KERN_CONT " %016lx ", thread_saved_pc(p));
+               pr_cont(" %016lx ", thread_saved_pc(p));
 #endif
 #ifdef CONFIG_DEBUG_STACK_USAGE
        free = stack_not_used(p);
 #endif
-       printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
+       pr_cont("%5lu %5d %6d 0x%08lx\n", free,
                task_pid_nr(p), task_pid_nr(p->real_parent),
                (unsigned long)task_thread_info(p)->flags);
 
@@ -6932,11 +6934,9 @@ void show_state_filter(unsigned long state_filter)
        struct task_struct *g, *p;
 
 #if BITS_PER_LONG == 32
-       printk(KERN_INFO
-               "  task                PC stack   pid father\n");
+       pr_info("  task                PC stack   pid father\n");
 #else
-       printk(KERN_INFO
-               "  task                        PC stack   pid father\n");
+       pr_info("  task                        PC stack   pid father\n");
 #endif
        read_lock(&tasklist_lock);
        do_each_thread(g, p) {
@@ -6980,9 +6980,10 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
        struct rq *rq = cpu_rq(cpu);
        unsigned long flags;
 
-       spin_lock_irqsave(&rq->lock, flags);
+       raw_spin_lock_irqsave(&rq->lock, flags);
 
        __sched_fork(idle);
+       idle->state = TASK_RUNNING;
        idle->se.exec_start = sched_clock();
 
        cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
@@ -6992,7 +6993,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
 #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
        idle->oncpu = 1;
 #endif
-       spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 
        /* Set the preempt count _outside_ the spinlocks! */
 #if defined(CONFIG_PREEMPT)
@@ -7209,10 +7210,10 @@ static int migration_thread(void *data)
                struct migration_req *req;
                struct list_head *head;
 
-               spin_lock_irq(&rq->lock);
+               raw_spin_lock_irq(&rq->lock);
 
                if (cpu_is_offline(cpu)) {
-                       spin_unlock_irq(&rq->lock);
+                       raw_spin_unlock_irq(&rq->lock);
                        break;
                }
 
@@ -7224,7 +7225,7 @@ static int migration_thread(void *data)
                head = &rq->migration_queue;
 
                if (list_empty(head)) {
-                       spin_unlock_irq(&rq->lock);
+                       raw_spin_unlock_irq(&rq->lock);
                        schedule();
                        set_current_state(TASK_INTERRUPTIBLE);
                        continue;
@@ -7233,14 +7234,14 @@ static int migration_thread(void *data)
                list_del_init(head->next);
 
                if (req->task != NULL) {
-                       spin_unlock(&rq->lock);
+                       raw_spin_unlock(&rq->lock);
                        __migrate_task(req->task, cpu, req->dest_cpu);
                } else if (likely(cpu == (badcpu = smp_processor_id()))) {
                        req->dest_cpu = RCU_MIGRATION_GOT_QS;
-                       spin_unlock(&rq->lock);
+                       raw_spin_unlock(&rq->lock);
                } else {
                        req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
-                       spin_unlock(&rq->lock);
+                       raw_spin_unlock(&rq->lock);
                        WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
                }
                local_irq_enable();
@@ -7294,9 +7295,8 @@ again:
                 * leave kernel.
                 */
                if (p->mm && printk_ratelimit()) {
-                       printk(KERN_INFO "process %d (%s) no "
-                              "longer affine to cpu%d\n",
-                              task_pid_nr(p), p->comm, dead_cpu);
+                       pr_info("process %d (%s) no longer affine to cpu%d\n",
+                               task_pid_nr(p), p->comm, dead_cpu);
                }
        }
 
@@ -7363,14 +7363,14 @@ void sched_idle_next(void)
         * Strictly not necessary since rest of the CPUs are stopped by now
         * and interrupts disabled on the current cpu.
         */
-       spin_lock_irqsave(&rq->lock, flags);
+       raw_spin_lock_irqsave(&rq->lock, flags);
 
        __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
 
        update_rq_clock(rq);
        activate_task(rq, p, 0);
 
-       spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
 /*
@@ -7406,9 +7406,9 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
         * that's OK. No task can be added to this CPU, so iteration is
         * fine.
         */
-       spin_unlock_irq(&rq->lock);
+       raw_spin_unlock_irq(&rq->lock);
        move_task_off_dead_cpu(dead_cpu, p);
-       spin_lock_irq(&rq->lock);
+       raw_spin_lock_irq(&rq->lock);
 
        put_task_struct(p);
 }
@@ -7674,13 +7674,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
 
                /* Update our root-domain */
                rq = cpu_rq(cpu);
-               spin_lock_irqsave(&rq->lock, flags);
+               raw_spin_lock_irqsave(&rq->lock, flags);
                if (rq->rd) {
                        BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
 
                        set_rq_online(rq);
                }
-               spin_unlock_irqrestore(&rq->lock, flags);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
                break;
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -7705,13 +7705,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                put_task_struct(rq->migration_thread);
                rq->migration_thread = NULL;
                /* Idle task back to normal (off runqueue, low prio) */
-               spin_lock_irq(&rq->lock);
+               raw_spin_lock_irq(&rq->lock);
                update_rq_clock(rq);
                deactivate_task(rq, rq->idle, 0);
                __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
                rq->idle->sched_class = &idle_sched_class;
                migrate_dead_tasks(cpu);
-               spin_unlock_irq(&rq->lock);
+               raw_spin_unlock_irq(&rq->lock);
                cpuset_unlock();
                migrate_nr_uninterruptible(rq);
                BUG_ON(rq->nr_running != 0);
@@ -7721,30 +7721,30 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
                 * they didn't take sched_hotcpu_mutex. Just wake up
                 * the requestors.
                 */
-               spin_lock_irq(&rq->lock);
+               raw_spin_lock_irq(&rq->lock);
                while (!list_empty(&rq->migration_queue)) {
                        struct migration_req *req;
 
                        req = list_entry(rq->migration_queue.next,
                                         struct migration_req, list);
                        list_del_init(&req->list);
-                       spin_unlock_irq(&rq->lock);
+                       raw_spin_unlock_irq(&rq->lock);
                        complete(&req->done);
-                       spin_lock_irq(&rq->lock);
+                       raw_spin_lock_irq(&rq->lock);
                }
-               spin_unlock_irq(&rq->lock);
+               raw_spin_unlock_irq(&rq->lock);
                break;
 
        case CPU_DYING:
        case CPU_DYING_FROZEN:
                /* Update our root-domain */
                rq = cpu_rq(cpu);
-               spin_lock_irqsave(&rq->lock, flags);
+               raw_spin_lock_irqsave(&rq->lock, flags);
                if (rq->rd) {
                        BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
                        set_rq_offline(rq);
                }
-               spin_unlock_irqrestore(&rq->lock, flags);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
                break;
 #endif
        }
@@ -7803,48 +7803,44 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
        printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
 
        if (!(sd->flags & SD_LOAD_BALANCE)) {
-               printk("does not load-balance\n");
+               pr_cont("does not load-balance\n");
                if (sd->parent)
-                       printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
-                                       " has parent");
+                       pr_err("ERROR: !SD_LOAD_BALANCE domain has parent\n");
                return -1;
        }
 
-       printk(KERN_CONT "span %s level %s\n", str, sd->name);
+       pr_cont("span %s level %s\n", str, sd->name);
 
        if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
-               printk(KERN_ERR "ERROR: domain->span does not contain "
-                               "CPU%d\n", cpu);
+               pr_err("ERROR: domain->span does not contain CPU%d\n", cpu);
        }
        if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
-               printk(KERN_ERR "ERROR: domain->groups does not contain"
-                               " CPU%d\n", cpu);
+               pr_err("ERROR: domain->groups does not contain CPU%d\n", cpu);
        }
 
        printk(KERN_DEBUG "%*s groups:", level + 1, "");
        do {
                if (!group) {
-                       printk("\n");
-                       printk(KERN_ERR "ERROR: group is NULL\n");
+                       pr_cont("\n");
+                       pr_err("ERROR: group is NULL\n");
                        break;
                }
 
                if (!group->cpu_power) {
-                       printk(KERN_CONT "\n");
-                       printk(KERN_ERR "ERROR: domain->cpu_power not "
-                                       "set\n");
+                       pr_cont("\n");
+                       pr_err("ERROR: domain->cpu_power not set\n");
                        break;
                }
 
                if (!cpumask_weight(sched_group_cpus(group))) {
-                       printk(KERN_CONT "\n");
-                       printk(KERN_ERR "ERROR: empty group\n");
+                       pr_cont("\n");
+                       pr_err("ERROR: empty group\n");
                        break;
                }
 
                if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
-                       printk(KERN_CONT "\n");
-                       printk(KERN_ERR "ERROR: repeated CPUs\n");
+                       pr_cont("\n");
+                       pr_err("ERROR: repeated CPUs\n");
                        break;
                }
 
@@ -7852,23 +7848,21 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
 
                cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
 
-               printk(KERN_CONT " %s", str);
+               pr_cont(" %s", str);
                if (group->cpu_power != SCHED_LOAD_SCALE) {
-                       printk(KERN_CONT " (cpu_power = %d)",
-                               group->cpu_power);
+                       pr_cont(" (cpu_power = %d)", group->cpu_power);
                }
 
                group = group->next;
        } while (group != sd->groups);
-       printk(KERN_CONT "\n");
+       pr_cont("\n");
 
        if (!cpumask_equal(sched_domain_span(sd), groupmask))
-               printk(KERN_ERR "ERROR: groups don't span domain->span\n");
+               pr_err("ERROR: groups don't span domain->span\n");
 
        if (sd->parent &&
            !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
-               printk(KERN_ERR "ERROR: parent span is not a superset "
-                       "of domain->span\n");
+               pr_err("ERROR: parent span is not a superset of domain->span\n");
        return 0;
 }
 
@@ -7974,7 +7968,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
        struct root_domain *old_rd = NULL;
        unsigned long flags;
 
-       spin_lock_irqsave(&rq->lock, flags);
+       raw_spin_lock_irqsave(&rq->lock, flags);
 
        if (rq->rd) {
                old_rd = rq->rd;
@@ -8000,7 +7994,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
        if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
                set_rq_online(rq);
 
-       spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 
        if (old_rd)
                free_rootdomain(old_rd);
@@ -8424,8 +8418,7 @@ static int build_numa_sched_groups(struct s_data *d,
        sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
                          GFP_KERNEL, num);
        if (!sg) {
-               printk(KERN_WARNING "Can not alloc domain group for node %d\n",
-                      num);
+               pr_warning("Can not alloc domain group for node %d\n", num);
                return -ENOMEM;
        }
        d->sched_group_nodes[num] = sg;
@@ -8454,8 +8447,8 @@ static int build_numa_sched_groups(struct s_data *d,
                sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
                                  GFP_KERNEL, num);
                if (!sg) {
-                       printk(KERN_WARNING
-                              "Can not alloc domain group for node %d\n", j);
+                       pr_warning("Can not alloc domain group for node %d\n",
+                                  j);
                        return -ENOMEM;
                }
                sg->cpu_power = 0;
@@ -8683,7 +8676,7 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
        d->sched_group_nodes = kcalloc(nr_node_ids,
                                      sizeof(struct sched_group *), GFP_KERNEL);
        if (!d->sched_group_nodes) {
-               printk(KERN_WARNING "Can not alloc sched group node list\n");
+               pr_warning("Can not alloc sched group node list\n");
                return sa_notcovered;
        }
        sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes;
@@ -8700,7 +8693,7 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
                return sa_send_covered;
        d->rd = alloc_rootdomain();
        if (!d->rd) {
-               printk(KERN_WARNING "Cannot alloc root domain\n");
+               pr_warning("Cannot alloc root domain\n");
                return sa_tmpmask;
        }
        return sa_rootdomain;
@@ -9357,13 +9350,13 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
 #ifdef CONFIG_SMP
        rt_rq->rt_nr_migratory = 0;
        rt_rq->overloaded = 0;
-       plist_head_init(&rt_rq->pushable_tasks, &rq->lock);
+       plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
 #endif
 
        rt_rq->rt_time = 0;
        rt_rq->rt_throttled = 0;
        rt_rq->rt_runtime = 0;
-       spin_lock_init(&rt_rq->rt_runtime_lock);
+       raw_spin_lock_init(&rt_rq->rt_runtime_lock);
 
 #ifdef CONFIG_RT_GROUP_SCHED
        rt_rq->rt_nr_boosted = 0;
@@ -9523,7 +9516,7 @@ void __init sched_init(void)
                struct rq *rq;
 
                rq = cpu_rq(i);
-               spin_lock_init(&rq->lock);
+               raw_spin_lock_init(&rq->lock);
                rq->nr_running = 0;
                rq->calc_load_active = 0;
                rq->calc_load_update = jiffies + LOAD_FREQ;
@@ -9621,7 +9614,7 @@ void __init sched_init(void)
 #endif
 
 #ifdef CONFIG_RT_MUTEXES
-       plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
+       plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
 #endif
 
        /*
@@ -9682,13 +9675,11 @@ void __might_sleep(char *file, int line, int preempt_offset)
                return;
        prev_jiffy = jiffies;
 
-       printk(KERN_ERR
-               "BUG: sleeping function called from invalid context at %s:%d\n",
-                       file, line);
-       printk(KERN_ERR
-               "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
-                       in_atomic(), irqs_disabled(),
-                       current->pid, current->comm);
+       pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
+              file, line);
+       pr_err("in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
+              in_atomic(), irqs_disabled(),
+              current->pid, current->comm);
 
        debug_show_held_locks(current);
        if (irqs_disabled())
@@ -9746,13 +9737,13 @@ void normalize_rt_tasks(void)
                        continue;
                }
 
-               spin_lock(&p->pi_lock);
+               raw_spin_lock(&p->pi_lock);
                rq = __task_rq_lock(p);
 
                normalize_task(rq, p);
 
                __task_rq_unlock(rq);
-               spin_unlock(&p->pi_lock);
+               raw_spin_unlock(&p->pi_lock);
        } while_each_thread(g, p);
 
        read_unlock_irqrestore(&tasklist_lock, flags);
@@ -10115,9 +10106,9 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares)
        struct rq *rq = cfs_rq->rq;
        unsigned long flags;
 
-       spin_lock_irqsave(&rq->lock, flags);
+       raw_spin_lock_irqsave(&rq->lock, flags);
        __set_se_shares(se, shares);
-       spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
 static DEFINE_MUTEX(shares_mutex);
@@ -10302,18 +10293,18 @@ static int tg_set_bandwidth(struct task_group *tg,
        if (err)
                goto unlock;
 
-       spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
+       raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
        tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
        tg->rt_bandwidth.rt_runtime = rt_runtime;
 
        for_each_possible_cpu(i) {
                struct rt_rq *rt_rq = tg->rt_rq[i];
 
-               spin_lock(&rt_rq->rt_runtime_lock);
+               raw_spin_lock(&rt_rq->rt_runtime_lock);
                rt_rq->rt_runtime = rt_runtime;
-               spin_unlock(&rt_rq->rt_runtime_lock);
+               raw_spin_unlock(&rt_rq->rt_runtime_lock);
        }
-       spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
+       raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
  unlock:
        read_unlock(&tasklist_lock);
        mutex_unlock(&rt_constraints_mutex);
@@ -10418,15 +10409,15 @@ static int sched_rt_global_constraints(void)
        if (sysctl_sched_rt_runtime == 0)
                return -EBUSY;
 
-       spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
+       raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
        for_each_possible_cpu(i) {
                struct rt_rq *rt_rq = &cpu_rq(i)->rt;
 
-               spin_lock(&rt_rq->rt_runtime_lock);
+               raw_spin_lock(&rt_rq->rt_runtime_lock);
                rt_rq->rt_runtime = global_rt_runtime();
-               spin_unlock(&rt_rq->rt_runtime_lock);
+               raw_spin_unlock(&rt_rq->rt_runtime_lock);
        }
-       spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
+       raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
 
        return 0;
 }
@@ -10717,9 +10708,9 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
        /*
         * Take rq->lock to make 64-bit read safe on 32-bit platforms.
         */
-       spin_lock_irq(&cpu_rq(cpu)->lock);
+       raw_spin_lock_irq(&cpu_rq(cpu)->lock);
        data = *cpuusage;
-       spin_unlock_irq(&cpu_rq(cpu)->lock);
+       raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
 #else
        data = *cpuusage;
 #endif
@@ -10735,9 +10726,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
        /*
         * Take rq->lock to make 64-bit write safe on 32-bit platforms.
         */
-       spin_lock_irq(&cpu_rq(cpu)->lock);
+       raw_spin_lock_irq(&cpu_rq(cpu)->lock);
        *cpuusage = val;
-       spin_unlock_irq(&cpu_rq(cpu)->lock);
+       raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
 #else
        *cpuusage = val;
 #endif
@@ -10971,9 +10962,9 @@ void synchronize_sched_expedited(void)
                init_completion(&req->done);
                req->task = NULL;
                req->dest_cpu = RCU_MIGRATION_NEED_QS;
-               spin_lock_irqsave(&rq->lock, flags);
+               raw_spin_lock_irqsave(&rq->lock, flags);
                list_add(&req->list, &rq->migration_queue);
-               spin_unlock_irqrestore(&rq->lock, flags);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
                wake_up_process(rq->migration_thread);
        }
        for_each_online_cpu(cpu) {
@@ -10981,11 +10972,11 @@ void synchronize_sched_expedited(void)
                req = &per_cpu(rcu_migration_req, cpu);
                rq = cpu_rq(cpu);
                wait_for_completion(&req->done);
-               spin_lock_irqsave(&rq->lock, flags);
+               raw_spin_lock_irqsave(&rq->lock, flags);
                if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
                        need_full_sync = 1;
                req->dest_cpu = RCU_MIGRATION_IDLE;
-               spin_unlock_irqrestore(&rq->lock, flags);
+               raw_spin_unlock_irqrestore(&rq->lock, flags);
        }
        rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
        synchronize_sched_expedited_count++;
This page took 0.046012 seconds and 5 git commands to generate.