sched: Weaken SD_POWERSAVINGS_BALANCE
[deliverable/linux.git] / kernel / sched.c
index 584a122b553c24b26c63d97dd100b62a11de2cfd..f0ccb8b926c806985b346a790f8965294abe78b4 100644 (file)
@@ -64,7 +64,6 @@
 #include <linux/tsacct_kern.h>
 #include <linux/kprobes.h>
 #include <linux/delayacct.h>
-#include <linux/reciprocal_div.h>
 #include <linux/unistd.h>
 #include <linux/pagemap.h>
 #include <linux/hrtimer.h>
  */
 #define RUNTIME_INF    ((u64)~0ULL)
 
-#ifdef CONFIG_SMP
-
-static void double_rq_lock(struct rq *rq1, struct rq *rq2);
-
-/*
- * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
- * Since cpu_power is a 'constant', we can use a reciprocal divide.
- */
-static inline u32 sg_div_cpu_power(const struct sched_group *sg, u32 load)
-{
-       return reciprocal_divide(load, sg->reciprocal_cpu_power);
-}
-
-/*
- * Each time a sched group cpu_power is changed,
- * we must compute its reciprocal value
- */
-static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val)
-{
-       sg->__cpu_power += val;
-       sg->reciprocal_cpu_power = reciprocal_value(sg->__cpu_power);
-}
-#endif
-
 static inline int rt_policy(int policy)
 {
        if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
@@ -537,14 +512,6 @@ struct root_domain {
 #ifdef CONFIG_SMP
        struct cpupri cpupri;
 #endif
-#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
-       /*
-        * Preferred wake up cpu nominated by sched_mc balance that will be
-        * used when most cpus are idle in the system indicating overall very
-        * low system utilisation. Triggered at POWERSAVINGS_BALANCE_WAKEUP(2)
-        */
-       unsigned int sched_mc_preferred_wakeup_cpu;
-#endif
 };
 
 /*
@@ -627,6 +594,9 @@ struct rq {
 
        struct task_struct *migration_thread;
        struct list_head migration_queue;
+
+       u64 rt_avg;
+       u64 age_stamp;
 #endif
 
        /* calc_load related fields */
@@ -862,6 +832,14 @@ unsigned int sysctl_sched_shares_ratelimit = 250000;
  */
 unsigned int sysctl_sched_shares_thresh = 4;
 
+/*
+ * period over which we average the RT time consumption, measured
+ * in ms.
+ *
+ * default: 1s
+ */
+const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
+
 /*
  * period over which we measure -rt task cpu usage in us.
  * default: 1s
@@ -1280,12 +1258,37 @@ void wake_up_idle_cpu(int cpu)
 }
 #endif /* CONFIG_NO_HZ */
 
+static u64 sched_avg_period(void)
+{
+       return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
+}
+
+static void sched_avg_update(struct rq *rq)
+{
+       s64 period = sched_avg_period();
+
+       while ((s64)(rq->clock - rq->age_stamp) > period) {
+               rq->age_stamp += period;
+               rq->rt_avg /= 2;
+       }
+}
+
+static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
+{
+       rq->rt_avg += rt_delta;
+       sched_avg_update(rq);
+}
+
 #else /* !CONFIG_SMP */
 static void resched_task(struct task_struct *p)
 {
        assert_spin_locked(&task_rq(p)->lock);
        set_tsk_need_resched(p);
 }
+
+static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
+{
+}
 #endif /* CONFIG_SMP */
 
 #if BITS_PER_LONG == 32
@@ -1496,8 +1499,65 @@ static int tg_nop(struct task_group *tg, void *data)
 #endif
 
 #ifdef CONFIG_SMP
-static unsigned long source_load(int cpu, int type);
-static unsigned long target_load(int cpu, int type);
+/* Used instead of source_load when we know the type == 0 */
+static unsigned long weighted_cpuload(const int cpu)
+{
+       return cpu_rq(cpu)->load.weight;
+}
+
+/*
+ * Return a low guess at the load of a migration-source cpu weighted
+ * according to the scheduling class and "nice" value.
+ *
+ * We want to under-estimate the load of migration sources, to
+ * balance conservatively.
+ */
+static unsigned long source_load(int cpu, int type)
+{
+       struct rq *rq = cpu_rq(cpu);
+       unsigned long total = weighted_cpuload(cpu);
+
+       if (type == 0 || !sched_feat(LB_BIAS))
+               return total;
+
+       return min(rq->cpu_load[type-1], total);
+}
+
+/*
+ * Return a high guess at the load of a migration-target cpu weighted
+ * according to the scheduling class and "nice" value.
+ */
+static unsigned long target_load(int cpu, int type)
+{
+       struct rq *rq = cpu_rq(cpu);
+       unsigned long total = weighted_cpuload(cpu);
+
+       if (type == 0 || !sched_feat(LB_BIAS))
+               return total;
+
+       return max(rq->cpu_load[type-1], total);
+}
+
+static struct sched_group *group_of(int cpu)
+{
+       struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
+
+       if (!sd)
+               return NULL;
+
+       return sd->groups;
+}
+
+static unsigned long power_of(int cpu)
+{
+       struct sched_group *group = group_of(cpu);
+
+       if (!group)
+               return SCHED_LOAD_SCALE;
+
+       return group->cpu_power;
+}
+
 static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
 
 static unsigned long cpu_avg_load_per_task(int cpu)
@@ -1682,6 +1742,8 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
 
 #ifdef CONFIG_PREEMPT
 
+static void double_rq_lock(struct rq *rq1, struct rq *rq2);
+
 /*
  * fair double_lock_balance: Safely acquires both rq->locks in a fair
  * way at the expense of forcing extra atomic operations in all
@@ -1946,13 +2008,6 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
 }
 
 #ifdef CONFIG_SMP
-
-/* Used instead of source_load when we know the type == 0 */
-static unsigned long weighted_cpuload(const int cpu)
-{
-       return cpu_rq(cpu)->load.weight;
-}
-
 /*
  * Is this task likely cache-hot:
  */
@@ -2226,186 +2281,6 @@ void kick_process(struct task_struct *p)
        preempt_enable();
 }
 EXPORT_SYMBOL_GPL(kick_process);
-
-/*
- * Return a low guess at the load of a migration-source cpu weighted
- * according to the scheduling class and "nice" value.
- *
- * We want to under-estimate the load of migration sources, to
- * balance conservatively.
- */
-static unsigned long source_load(int cpu, int type)
-{
-       struct rq *rq = cpu_rq(cpu);
-       unsigned long total = weighted_cpuload(cpu);
-
-       if (type == 0 || !sched_feat(LB_BIAS))
-               return total;
-
-       return min(rq->cpu_load[type-1], total);
-}
-
-/*
- * Return a high guess at the load of a migration-target cpu weighted
- * according to the scheduling class and "nice" value.
- */
-static unsigned long target_load(int cpu, int type)
-{
-       struct rq *rq = cpu_rq(cpu);
-       unsigned long total = weighted_cpuload(cpu);
-
-       if (type == 0 || !sched_feat(LB_BIAS))
-               return total;
-
-       return max(rq->cpu_load[type-1], total);
-}
-
-/*
- * find_idlest_group finds and returns the least busy CPU group within the
- * domain.
- */
-static struct sched_group *
-find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
-{
-       struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
-       unsigned long min_load = ULONG_MAX, this_load = 0;
-       int load_idx = sd->forkexec_idx;
-       int imbalance = 100 + (sd->imbalance_pct-100)/2;
-
-       do {
-               unsigned long load, avg_load;
-               int local_group;
-               int i;
-
-               /* Skip over this group if it has no CPUs allowed */
-               if (!cpumask_intersects(sched_group_cpus(group),
-                                       &p->cpus_allowed))
-                       continue;
-
-               local_group = cpumask_test_cpu(this_cpu,
-                                              sched_group_cpus(group));
-
-               /* Tally up the load of all CPUs in the group */
-               avg_load = 0;
-
-               for_each_cpu(i, sched_group_cpus(group)) {
-                       /* Bias balancing toward cpus of our domain */
-                       if (local_group)
-                               load = source_load(i, load_idx);
-                       else
-                               load = target_load(i, load_idx);
-
-                       avg_load += load;
-               }
-
-               /* Adjust by relative CPU power of the group */
-               avg_load = sg_div_cpu_power(group,
-                               avg_load * SCHED_LOAD_SCALE);
-
-               if (local_group) {
-                       this_load = avg_load;
-                       this = group;
-               } else if (avg_load < min_load) {
-                       min_load = avg_load;
-                       idlest = group;
-               }
-       } while (group = group->next, group != sd->groups);
-
-       if (!idlest || 100*this_load < imbalance*min_load)
-               return NULL;
-       return idlest;
-}
-
-/*
- * find_idlest_cpu - find the idlest cpu among the cpus in group.
- */
-static int
-find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
-{
-       unsigned long load, min_load = ULONG_MAX;
-       int idlest = -1;
-       int i;
-
-       /* Traverse only the allowed CPUs */
-       for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
-               load = weighted_cpuload(i);
-
-               if (load < min_load || (load == min_load && i == this_cpu)) {
-                       min_load = load;
-                       idlest = i;
-               }
-       }
-
-       return idlest;
-}
-
-/*
- * sched_balance_self: balance the current task (running on cpu) in domains
- * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
- * SD_BALANCE_EXEC.
- *
- * Balance, ie. select the least loaded group.
- *
- * Returns the target CPU number, or the same CPU if no balancing is needed.
- *
- * preempt must be disabled.
- */
-static int sched_balance_self(int cpu, int flag)
-{
-       struct task_struct *t = current;
-       struct sched_domain *tmp, *sd = NULL;
-
-       for_each_domain(cpu, tmp) {
-               /*
-                * If power savings logic is enabled for a domain, stop there.
-                */
-               if (tmp->flags & SD_POWERSAVINGS_BALANCE)
-                       break;
-               if (tmp->flags & flag)
-                       sd = tmp;
-       }
-
-       if (sd)
-               update_shares(sd);
-
-       while (sd) {
-               struct sched_group *group;
-               int new_cpu, weight;
-
-               if (!(sd->flags & flag)) {
-                       sd = sd->child;
-                       continue;
-               }
-
-               group = find_idlest_group(sd, t, cpu);
-               if (!group) {
-                       sd = sd->child;
-                       continue;
-               }
-
-               new_cpu = find_idlest_cpu(group, t, cpu);
-               if (new_cpu == -1 || new_cpu == cpu) {
-                       /* Now try balancing at a lower domain level of cpu */
-                       sd = sd->child;
-                       continue;
-               }
-
-               /* Now try balancing at a lower domain level of new_cpu */
-               cpu = new_cpu;
-               weight = cpumask_weight(sched_domain_span(sd));
-               sd = NULL;
-               for_each_domain(cpu, tmp) {
-                       if (weight <= cpumask_weight(sched_domain_span(tmp)))
-                               break;
-                       if (tmp->flags & flag)
-                               sd = tmp;
-               }
-               /* while loop will break here if sd == NULL */
-       }
-
-       return cpu;
-}
-
 #endif /* CONFIG_SMP */
 
 /**
@@ -2447,33 +2322,17 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
 {
        int cpu, orig_cpu, this_cpu, success = 0;
        unsigned long flags;
-       long old_state;
        struct rq *rq;
 
        if (!sched_feat(SYNC_WAKEUPS))
                sync = 0;
 
-#ifdef CONFIG_SMP
-       if (sched_feat(LB_WAKEUP_UPDATE) && !root_task_group_empty()) {
-               struct sched_domain *sd;
-
-               this_cpu = raw_smp_processor_id();
-               cpu = task_cpu(p);
-
-               for_each_domain(this_cpu, sd) {
-                       if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
-                               update_shares(sd);
-                               break;
-                       }
-               }
-       }
-#endif
+       this_cpu = get_cpu();
 
        smp_wmb();
        rq = task_rq_lock(p, &flags);
        update_rq_clock(rq);
-       old_state = p->state;
-       if (!(old_state & state))
+       if (!(p->state & state))
                goto out;
 
        if (p->se.on_rq)
@@ -2481,27 +2340,25 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
 
        cpu = task_cpu(p);
        orig_cpu = cpu;
-       this_cpu = smp_processor_id();
 
 #ifdef CONFIG_SMP
        if (unlikely(task_running(rq, p)))
                goto out_activate;
 
-       cpu = p->sched_class->select_task_rq(p, sync);
-       if (cpu != orig_cpu) {
+       /*
+        * In order to handle concurrent wakeups and release the rq->lock
+        * we put the task in TASK_WAKING state.
+        */
+       p->state = TASK_WAKING;
+       task_rq_unlock(rq, &flags);
+
+       cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, sync);
+       if (cpu != orig_cpu)
                set_task_cpu(p, cpu);
-               task_rq_unlock(rq, &flags);
-               /* might preempt at this point */
-               rq = task_rq_lock(p, &flags);
-               old_state = p->state;
-               if (!(old_state & state))
-                       goto out;
-               if (p->se.on_rq)
-                       goto out_running;
 
-               this_cpu = smp_processor_id();
-               cpu = task_cpu(p);
-       }
+       rq = task_rq_lock(p, &flags);
+       WARN_ON(p->state != TASK_WAKING);
+       cpu = task_cpu(p);
 
 #ifdef CONFIG_SCHEDSTATS
        schedstat_inc(rq, ttwu_count);
@@ -2559,6 +2416,7 @@ out_running:
 #endif
 out:
        task_rq_unlock(rq, &flags);
+       put_cpu();
 
        return success;
 }
@@ -2662,11 +2520,6 @@ void sched_fork(struct task_struct *p, int clone_flags)
 
        __sched_fork(p);
 
-#ifdef CONFIG_SMP
-       cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
-#endif
-       set_task_cpu(p, cpu);
-
        /*
         * Make sure we do not leak PI boosting priority to the child.
         */
@@ -2697,6 +2550,11 @@ void sched_fork(struct task_struct *p, int clone_flags)
        if (!rt_prio(p->prio))
                p->sched_class = &fair_sched_class;
 
+#ifdef CONFIG_SMP
+       cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0);
+#endif
+       set_task_cpu(p, cpu);
+
 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
        if (likely(sched_info_on()))
                memset(&p->sched_info, 0, sizeof(p->sched_info));
@@ -3251,7 +3109,7 @@ out:
 void sched_exec(void)
 {
        int new_cpu, this_cpu = get_cpu();
-       new_cpu = sched_balance_self(this_cpu, SD_BALANCE_EXEC);
+       new_cpu = current->sched_class->select_task_rq(current, SD_BALANCE_EXEC, 0);
        put_cpu();
        if (new_cpu != this_cpu)
                sched_migrate_task(current, new_cpu);
@@ -3632,7 +3490,7 @@ static inline void update_sd_power_savings_stats(struct sched_group *group,
         * capacity but still has some space to pick up some load
         * from other group and save more power
         */
-       if (sgs->sum_nr_running > sgs->group_capacity - 1)
+       if (sgs->sum_nr_running + 1 > sgs->group_capacity)
                return;
 
        if (sgs->sum_nr_running > sds->leader_nr_running ||
@@ -3671,11 +3529,6 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
        *imbalance = sds->min_load_per_task;
        sds->busiest = sds->group_min;
 
-       if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
-               cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
-                       group_first_cpu(sds->group_leader);
-       }
-
        return 1;
 
 }
@@ -3699,6 +3552,77 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
 }
 #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
 
+unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
+{
+       unsigned long weight = cpumask_weight(sched_domain_span(sd));
+       unsigned long smt_gain = sd->smt_gain;
+
+       smt_gain /= weight;
+
+       return smt_gain;
+}
+
+unsigned long scale_rt_power(int cpu)
+{
+       struct rq *rq = cpu_rq(cpu);
+       u64 total, available;
+
+       sched_avg_update(rq);
+
+       total = sched_avg_period() + (rq->clock - rq->age_stamp);
+       available = total - rq->rt_avg;
+
+       if (unlikely((s64)total < SCHED_LOAD_SCALE))
+               total = SCHED_LOAD_SCALE;
+
+       total >>= SCHED_LOAD_SHIFT;
+
+       return div_u64(available, total);
+}
+
+static void update_cpu_power(struct sched_domain *sd, int cpu)
+{
+       unsigned long weight = cpumask_weight(sched_domain_span(sd));
+       unsigned long power = SCHED_LOAD_SCALE;
+       struct sched_group *sdg = sd->groups;
+
+       /* here we could scale based on cpufreq */
+
+       if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
+               power *= arch_scale_smt_power(sd, cpu);
+               power >>= SCHED_LOAD_SHIFT;
+       }
+
+       power *= scale_rt_power(cpu);
+       power >>= SCHED_LOAD_SHIFT;
+
+       if (!power)
+               power = 1;
+
+       sdg->cpu_power = power;
+}
+
+static void update_group_power(struct sched_domain *sd, int cpu)
+{
+       struct sched_domain *child = sd->child;
+       struct sched_group *group, *sdg = sd->groups;
+       unsigned long power;
+
+       if (!child) {
+               update_cpu_power(sd, cpu);
+               return;
+       }
+
+       power = 0;
+
+       group = child->groups;
+       do {
+               power += group->cpu_power;
+               group = group->next;
+       } while (group != child->groups);
+
+       sdg->cpu_power = power;
+}
 
 /**
  * update_sg_lb_stats - Update sched_group's statistics for load balancing.
@@ -3712,7 +3636,8 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
  * @balance: Should we balance.
  * @sgs: variable to hold the statistics for this group.
  */
-static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
+static inline void update_sg_lb_stats(struct sched_domain *sd,
+                       struct sched_group *group, int this_cpu,
                        enum cpu_idle_type idle, int load_idx, int *sd_idle,
                        int local_group, const struct cpumask *cpus,
                        int *balance, struct sg_lb_stats *sgs)
@@ -3723,8 +3648,11 @@ static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
        unsigned long sum_avg_load_per_task;
        unsigned long avg_load_per_task;
 
-       if (local_group)
+       if (local_group) {
                balance_cpu = group_first_cpu(group);
+               if (balance_cpu == this_cpu)
+                       update_group_power(sd, this_cpu);
+       }
 
        /* Tally up the load of all CPUs in the group */
        sum_avg_load_per_task = avg_load_per_task = 0;
@@ -3773,8 +3701,7 @@ static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
        }
 
        /* Adjust by relative CPU power of the group */
-       sgs->avg_load = sg_div_cpu_power(group,
-                       sgs->group_load * SCHED_LOAD_SCALE);
+       sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;
 
 
        /*
@@ -3786,14 +3713,14 @@ static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
         *      normalized nr_running number somewhere that negates
         *      the hierarchy?
         */
-       avg_load_per_task = sg_div_cpu_power(group,
-                       sum_avg_load_per_task * SCHED_LOAD_SCALE);
+       avg_load_per_task = (sum_avg_load_per_task * SCHED_LOAD_SCALE) /
+               group->cpu_power;
 
        if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
                sgs->group_imb = 1;
 
-       sgs->group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
-
+       sgs->group_capacity =
+               DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
 }
 
 /**
@@ -3811,9 +3738,13 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
                        const struct cpumask *cpus, int *balance,
                        struct sd_lb_stats *sds)
 {
+       struct sched_domain *child = sd->child;
        struct sched_group *group = sd->groups;
        struct sg_lb_stats sgs;
-       int load_idx;
+       int load_idx, prefer_sibling = 0;
+
+       if (child && child->flags & SD_PREFER_SIBLING)
+               prefer_sibling = 1;
 
        init_sd_power_savings_stats(sd, sds, idle);
        load_idx = get_sd_load_idx(sd, idle);
@@ -3824,14 +3755,22 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
                local_group = cpumask_test_cpu(this_cpu,
                                               sched_group_cpus(group));
                memset(&sgs, 0, sizeof(sgs));
-               update_sg_lb_stats(group, this_cpu, idle, load_idx, sd_idle,
+               update_sg_lb_stats(sd, group, this_cpu, idle, load_idx, sd_idle,
                                local_group, cpus, balance, &sgs);
 
                if (local_group && balance && !(*balance))
                        return;
 
                sds->total_load += sgs.group_load;
-               sds->total_pwr += group->__cpu_power;
+               sds->total_pwr += group->cpu_power;
+
+               /*
+                * In case the child domain prefers tasks go to siblings
+                * first, lower the group capacity to one so that we'll try
+                * and move all the excess tasks away.
+                */
+               if (prefer_sibling)
+                       sgs.group_capacity = min(sgs.group_capacity, 1UL);
 
                if (local_group) {
                        sds->this_load = sgs.avg_load;
@@ -3851,7 +3790,6 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
                update_sd_power_savings_stats(group, sds, local_group, &sgs);
                group = group->next;
        } while (group != sd->groups);
-
 }
 
 /**
@@ -3889,28 +3827,28 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
         * moving them.
         */
 
-       pwr_now += sds->busiest->__cpu_power *
+       pwr_now += sds->busiest->cpu_power *
                        min(sds->busiest_load_per_task, sds->max_load);
-       pwr_now += sds->this->__cpu_power *
+       pwr_now += sds->this->cpu_power *
                        min(sds->this_load_per_task, sds->this_load);
        pwr_now /= SCHED_LOAD_SCALE;
 
        /* Amount of load we'd subtract */
-       tmp = sg_div_cpu_power(sds->busiest,
-                       sds->busiest_load_per_task * SCHED_LOAD_SCALE);
+       tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
+               sds->busiest->cpu_power;
        if (sds->max_load > tmp)
-               pwr_move += sds->busiest->__cpu_power *
+               pwr_move += sds->busiest->cpu_power *
                        min(sds->busiest_load_per_task, sds->max_load - tmp);
 
        /* Amount of load we'd add */
-       if (sds->max_load * sds->busiest->__cpu_power <
+       if (sds->max_load * sds->busiest->cpu_power <
                sds->busiest_load_per_task * SCHED_LOAD_SCALE)
-               tmp = sg_div_cpu_power(sds->this,
-                       sds->max_load * sds->busiest->__cpu_power);
+               tmp = (sds->max_load * sds->busiest->cpu_power) /
+                       sds->this->cpu_power;
        else
-               tmp = sg_div_cpu_power(sds->this,
-                       sds->busiest_load_per_task * SCHED_LOAD_SCALE);
-       pwr_move += sds->this->__cpu_power *
+               tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
+                       sds->this->cpu_power;
+       pwr_move += sds->this->cpu_power *
                        min(sds->this_load_per_task, sds->this_load + tmp);
        pwr_move /= SCHED_LOAD_SCALE;
 
@@ -3945,8 +3883,8 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
                        sds->max_load - sds->busiest_load_per_task);
 
        /* How much load to actually move to equalise the imbalance */
-       *imbalance = min(max_pull * sds->busiest->__cpu_power,
-               (sds->avg_load - sds->this_load) * sds->this->__cpu_power)
+       *imbalance = min(max_pull * sds->busiest->cpu_power,
+               (sds->avg_load - sds->this_load) * sds->this->cpu_power)
                        / SCHED_LOAD_SCALE;
 
        /*
@@ -4076,15 +4014,18 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
        int i;
 
        for_each_cpu(i, sched_group_cpus(group)) {
+               unsigned long power = power_of(i);
+               unsigned long capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
                unsigned long wl;
 
                if (!cpumask_test_cpu(i, cpus))
                        continue;
 
                rq = cpu_rq(i);
-               wl = weighted_cpuload(i);
+               wl = weighted_cpuload(i) * SCHED_LOAD_SCALE;
+               wl /= power;
 
-               if (rq->nr_running == 1 && wl > imbalance)
+               if (capacity && rq->nr_running == 1 && wl > imbalance)
                        continue;
 
                if (wl > max_load) {
@@ -5413,7 +5354,7 @@ need_resched:
        preempt_disable();
        cpu = smp_processor_id();
        rq = cpu_rq(cpu);
-       rcu_qsctr_inc(cpu);
+       rcu_sched_qs(cpu);
        prev = rq->curr;
        switch_count = &prev->nivcsw;
 
@@ -6701,6 +6642,8 @@ int __cond_resched_lock(spinlock_t *lock)
        int resched = should_resched();
        int ret = 0;
 
+       lockdep_assert_held(lock);
+
        if (spin_needbreak(lock) || resched) {
                spin_unlock(lock);
                if (resched)
@@ -7151,6 +7094,11 @@ fail:
        return ret;
 }
 
+#define RCU_MIGRATION_IDLE     0
+#define RCU_MIGRATION_NEED_QS  1
+#define RCU_MIGRATION_GOT_QS   2
+#define RCU_MIGRATION_MUST_SYNC        3
+
 /*
  * migration_thread - this is a highprio system thread that performs
  * thread migration by bumping thread off CPU then 'pushing' onto
@@ -7158,6 +7106,7 @@ fail:
  */
 static int migration_thread(void *data)
 {
+       int badcpu;
        int cpu = (long)data;
        struct rq *rq;
 
@@ -7192,8 +7141,17 @@ static int migration_thread(void *data)
                req = list_entry(head->next, struct migration_req, list);
                list_del_init(head->next);
 
-               spin_unlock(&rq->lock);
-               __migrate_task(req->task, cpu, req->dest_cpu);
+               if (req->task != NULL) {
+                       spin_unlock(&rq->lock);
+                       __migrate_task(req->task, cpu, req->dest_cpu);
+               } else if (likely(cpu == (badcpu = smp_processor_id()))) {
+                       req->dest_cpu = RCU_MIGRATION_GOT_QS;
+                       spin_unlock(&rq->lock);
+               } else {
+                       req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
+                       spin_unlock(&rq->lock);
+                       WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
+               }
                local_irq_enable();
 
                complete(&req->done);
@@ -7772,7 +7730,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                        break;
                }
 
-               if (!group->__cpu_power) {
+               if (!group->cpu_power) {
                        printk(KERN_CONT "\n");
                        printk(KERN_ERR "ERROR: domain->cpu_power not "
                                        "set\n");
@@ -7796,9 +7754,9 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
 
                printk(KERN_CONT " %s", str);
-               if (group->__cpu_power != SCHED_LOAD_SCALE) {
-                       printk(KERN_CONT " (__cpu_power = %d)",
-                               group->__cpu_power);
+               if (group->cpu_power != SCHED_LOAD_SCALE) {
+                       printk(KERN_CONT " (cpu_power = %d)",
+                               group->cpu_power);
                }
 
                group = group->next;
@@ -7863,9 +7821,7 @@ static int sd_degenerate(struct sched_domain *sd)
        }
 
        /* Following flags don't use groups */
-       if (sd->flags & (SD_WAKE_IDLE |
-                        SD_WAKE_AFFINE |
-                        SD_WAKE_BALANCE))
+       if (sd->flags & (SD_WAKE_AFFINE))
                return 0;
 
        return 1;
@@ -7882,10 +7838,6 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
        if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
                return 0;
 
-       /* Does parent contain flags not in child? */
-       /* WAKE_BALANCE is a subset of WAKE_AFFINE */
-       if (cflags & SD_WAKE_AFFINE)
-               pflags &= ~SD_WAKE_BALANCE;
        /* Flags needing groups don't count if only 1 group in parent */
        if (parent->groups == parent->groups->next) {
                pflags &= ~(SD_LOAD_BALANCE |
@@ -8083,7 +8035,7 @@ init_sched_build_groups(const struct cpumask *span,
                        continue;
 
                cpumask_clear(sched_group_cpus(sg));
-               sg->__cpu_power = 0;
+               sg->cpu_power = 0;
 
                for_each_cpu(j, span) {
                        if (group_fn(j, cpu_map, NULL, tmpmask) != group)
@@ -8341,7 +8293,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
                                continue;
                        }
 
-                       sg_inc_cpu_power(sg, sd->groups->__cpu_power);
+                       sg->cpu_power += sd->groups->cpu_power;
                }
                sg = sg->next;
        } while (sg != group_head);
@@ -8378,7 +8330,7 @@ static int build_numa_sched_groups(struct s_data *d,
                sd->groups = sg;
        }
 
-       sg->__cpu_power = 0;
+       sg->cpu_power = 0;
        cpumask_copy(sched_group_cpus(sg), d->nodemask);
        sg->next = sg;
        cpumask_or(d->covered, d->covered, d->nodemask);
@@ -8401,7 +8353,7 @@ static int build_numa_sched_groups(struct s_data *d,
                               "Can not alloc domain group for node %d\n", j);
                        return -ENOMEM;
                }
-               sg->__cpu_power = 0;
+               sg->cpu_power = 0;
                cpumask_copy(sched_group_cpus(sg), d->tmpmask);
                sg->next = prev->next;
                cpumask_or(d->covered, d->covered, d->tmpmask);
@@ -8479,17 +8431,23 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
 
        child = sd->child;
 
-       sd->groups->__cpu_power = 0;
+       sd->groups->cpu_power = 0;
 
        if (!child) {
                power = SCHED_LOAD_SCALE;
                weight = cpumask_weight(sched_domain_span(sd));
                /*
                 * SMT siblings share the power of a single core.
+                * Usually multiple threads get a better yield out of
+                * that one core than a single thread would have,
+                * reflect that in sd->smt_gain.
                 */
-               if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1)
+               if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
+                       power *= sd->smt_gain;
                        power /= weight;
-               sg_inc_cpu_power(sd->groups, power);
+                       power >>= SCHED_LOAD_SHIFT;
+               }
+               sd->groups->cpu_power += power;
                return;
        }
 
@@ -8498,7 +8456,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
         */
        group = child->groups;
        do {
-               sg_inc_cpu_power(sd->groups, group->__cpu_power);
+               sd->groups->cpu_power += group->cpu_power;
                group = group->next;
        } while (group != child->groups);
 }
@@ -8565,10 +8523,10 @@ static void set_domain_attribute(struct sched_domain *sd,
                request = attr->relax_domain_level;
        if (request < sd->level) {
                /* turn off idle balance on this domain */
-               sd->flags &= ~(SD_WAKE_IDLE|SD_BALANCE_NEWIDLE);
+               sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
        } else {
                /* turn on idle balance on this domain */
-               sd->flags |= (SD_WAKE_IDLE_FAR|SD_BALANCE_NEWIDLE);
+               sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
        }
 }
 
@@ -10760,3 +10718,113 @@ struct cgroup_subsys cpuacct_subsys = {
        .subsys_id = cpuacct_subsys_id,
 };
 #endif /* CONFIG_CGROUP_CPUACCT */
+
+#ifndef CONFIG_SMP
+
+int rcu_expedited_torture_stats(char *page)
+{
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
+
+void synchronize_sched_expedited(void)
+{
+}
+EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
+
+#else /* #ifndef CONFIG_SMP */
+
+static DEFINE_PER_CPU(struct migration_req, rcu_migration_req);
+static DEFINE_MUTEX(rcu_sched_expedited_mutex);
+
+#define RCU_EXPEDITED_STATE_POST -2
+#define RCU_EXPEDITED_STATE_IDLE -1
+
+static int rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
+
+int rcu_expedited_torture_stats(char *page)
+{
+       int cnt = 0;
+       int cpu;
+
+       cnt += sprintf(&page[cnt], "state: %d /", rcu_expedited_state);
+       for_each_online_cpu(cpu) {
+                cnt += sprintf(&page[cnt], " %d:%d",
+                               cpu, per_cpu(rcu_migration_req, cpu).dest_cpu);
+       }
+       cnt += sprintf(&page[cnt], "\n");
+       return cnt;
+}
+EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
+
+static long synchronize_sched_expedited_count;
+
+/*
+ * Wait for an rcu-sched grace period to elapse, but use "big hammer"
+ * approach to force grace period to end quickly.  This consumes
+ * significant time on all CPUs, and is thus not recommended for
+ * any sort of common-case code.
+ *
+ * Note that it is illegal to call this function while holding any
+ * lock that is acquired by a CPU-hotplug notifier.  Failing to
+ * observe this restriction will result in deadlock.
+ */
+void synchronize_sched_expedited(void)
+{
+       int cpu;
+       unsigned long flags;
+       bool need_full_sync = 0;
+       struct rq *rq;
+       struct migration_req *req;
+       long snap;
+       int trycount = 0;
+
+       smp_mb();  /* ensure prior mod happens before capturing snap. */
+       snap = ACCESS_ONCE(synchronize_sched_expedited_count) + 1;
+       get_online_cpus();
+       while (!mutex_trylock(&rcu_sched_expedited_mutex)) {
+               put_online_cpus();
+               if (trycount++ < 10)
+                       udelay(trycount * num_online_cpus());
+               else {
+                       synchronize_sched();
+                       return;
+               }
+               if (ACCESS_ONCE(synchronize_sched_expedited_count) - snap > 0) {
+                       smp_mb(); /* ensure test happens before caller kfree */
+                       return;
+               }
+               get_online_cpus();
+       }
+       rcu_expedited_state = RCU_EXPEDITED_STATE_POST;
+       for_each_online_cpu(cpu) {
+               rq = cpu_rq(cpu);
+               req = &per_cpu(rcu_migration_req, cpu);
+               init_completion(&req->done);
+               req->task = NULL;
+               req->dest_cpu = RCU_MIGRATION_NEED_QS;
+               spin_lock_irqsave(&rq->lock, flags);
+               list_add(&req->list, &rq->migration_queue);
+               spin_unlock_irqrestore(&rq->lock, flags);
+               wake_up_process(rq->migration_thread);
+       }
+       for_each_online_cpu(cpu) {
+               rcu_expedited_state = cpu;
+               req = &per_cpu(rcu_migration_req, cpu);
+               rq = cpu_rq(cpu);
+               wait_for_completion(&req->done);
+               spin_lock_irqsave(&rq->lock, flags);
+               if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
+                       need_full_sync = 1;
+               req->dest_cpu = RCU_MIGRATION_IDLE;
+               spin_unlock_irqrestore(&rq->lock, flags);
+       }
+       rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
+       mutex_unlock(&rcu_sched_expedited_mutex);
+       put_online_cpus();
+       if (need_full_sync)
+               synchronize_sched();
+}
+EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
+
+#endif /* #else #ifndef CONFIG_SMP */
This page took 0.03766 seconds and 5 git commands to generate.