sched: Weaken SD_POWERSAVINGS_BALANCE
[deliverable/linux.git] / kernel / sched.c
index e27a53685ed9cb4a24d9e1b58650a32161e0a0ea..f0ccb8b926c806985b346a790f8965294abe78b4 100644 (file)
  */
 #define RUNTIME_INF    ((u64)~0ULL)
 
-static void double_rq_lock(struct rq *rq1, struct rq *rq2);
-
 static inline int rt_policy(int policy)
 {
        if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
@@ -514,14 +512,6 @@ struct root_domain {
 #ifdef CONFIG_SMP
        struct cpupri cpupri;
 #endif
-#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
-       /*
-        * Preferred wake up cpu nominated by sched_mc balance that will be
-        * used when most cpus are idle in the system indicating overall very
-        * low system utilisation. Triggered at POWERSAVINGS_BALANCE_WAKEUP(2)
-        */
-       unsigned int sched_mc_preferred_wakeup_cpu;
-#endif
 };
 
 /*
@@ -1509,8 +1499,65 @@ static int tg_nop(struct task_group *tg, void *data)
 #endif
 
 #ifdef CONFIG_SMP
-static unsigned long source_load(int cpu, int type);
-static unsigned long target_load(int cpu, int type);
+/* Used instead of source_load when we know the type == 0 */
+static unsigned long weighted_cpuload(const int cpu)
+{
+       return cpu_rq(cpu)->load.weight;
+}
+
+/*
+ * Return a low guess at the load of a migration-source cpu weighted
+ * according to the scheduling class and "nice" value.
+ *
+ * We want to under-estimate the load of migration sources, to
+ * balance conservatively.
+ */
+static unsigned long source_load(int cpu, int type)
+{
+       struct rq *rq = cpu_rq(cpu);
+       unsigned long total = weighted_cpuload(cpu);
+
+       if (type == 0 || !sched_feat(LB_BIAS))
+               return total;
+
+       return min(rq->cpu_load[type-1], total);
+}
+
+/*
+ * Return a high guess at the load of a migration-target cpu weighted
+ * according to the scheduling class and "nice" value.
+ */
+static unsigned long target_load(int cpu, int type)
+{
+       struct rq *rq = cpu_rq(cpu);
+       unsigned long total = weighted_cpuload(cpu);
+
+       if (type == 0 || !sched_feat(LB_BIAS))
+               return total;
+
+       return max(rq->cpu_load[type-1], total);
+}
+
+static struct sched_group *group_of(int cpu)
+{
+       struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
+
+       if (!sd)
+               return NULL;
+
+       return sd->groups;
+}
+
+static unsigned long power_of(int cpu)
+{
+       struct sched_group *group = group_of(cpu);
+
+       if (!group)
+               return SCHED_LOAD_SCALE;
+
+       return group->cpu_power;
+}
+
 static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
 
 static unsigned long cpu_avg_load_per_task(int cpu)
@@ -1695,6 +1742,8 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
 
 #ifdef CONFIG_PREEMPT
 
+static void double_rq_lock(struct rq *rq1, struct rq *rq2);
+
 /*
  * fair double_lock_balance: Safely acquires both rq->locks in a fair
  * way at the expense of forcing extra atomic operations in all
@@ -1959,13 +2008,6 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
 }
 
 #ifdef CONFIG_SMP
-
-/* Used instead of source_load when we know the type == 0 */
-static unsigned long weighted_cpuload(const int cpu)
-{
-       return cpu_rq(cpu)->load.weight;
-}
-
 /*
  * Is this task likely cache-hot:
  */
@@ -2239,185 +2281,6 @@ void kick_process(struct task_struct *p)
        preempt_enable();
 }
 EXPORT_SYMBOL_GPL(kick_process);
-
-/*
- * Return a low guess at the load of a migration-source cpu weighted
- * according to the scheduling class and "nice" value.
- *
- * We want to under-estimate the load of migration sources, to
- * balance conservatively.
- */
-static unsigned long source_load(int cpu, int type)
-{
-       struct rq *rq = cpu_rq(cpu);
-       unsigned long total = weighted_cpuload(cpu);
-
-       if (type == 0 || !sched_feat(LB_BIAS))
-               return total;
-
-       return min(rq->cpu_load[type-1], total);
-}
-
-/*
- * Return a high guess at the load of a migration-target cpu weighted
- * according to the scheduling class and "nice" value.
- */
-static unsigned long target_load(int cpu, int type)
-{
-       struct rq *rq = cpu_rq(cpu);
-       unsigned long total = weighted_cpuload(cpu);
-
-       if (type == 0 || !sched_feat(LB_BIAS))
-               return total;
-
-       return max(rq->cpu_load[type-1], total);
-}
-
-/*
- * find_idlest_group finds and returns the least busy CPU group within the
- * domain.
- */
-static struct sched_group *
-find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
-{
-       struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
-       unsigned long min_load = ULONG_MAX, this_load = 0;
-       int load_idx = sd->forkexec_idx;
-       int imbalance = 100 + (sd->imbalance_pct-100)/2;
-
-       do {
-               unsigned long load, avg_load;
-               int local_group;
-               int i;
-
-               /* Skip over this group if it has no CPUs allowed */
-               if (!cpumask_intersects(sched_group_cpus(group),
-                                       &p->cpus_allowed))
-                       continue;
-
-               local_group = cpumask_test_cpu(this_cpu,
-                                              sched_group_cpus(group));
-
-               /* Tally up the load of all CPUs in the group */
-               avg_load = 0;
-
-               for_each_cpu(i, sched_group_cpus(group)) {
-                       /* Bias balancing toward cpus of our domain */
-                       if (local_group)
-                               load = source_load(i, load_idx);
-                       else
-                               load = target_load(i, load_idx);
-
-                       avg_load += load;
-               }
-
-               /* Adjust by relative CPU power of the group */
-               avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
-
-               if (local_group) {
-                       this_load = avg_load;
-                       this = group;
-               } else if (avg_load < min_load) {
-                       min_load = avg_load;
-                       idlest = group;
-               }
-       } while (group = group->next, group != sd->groups);
-
-       if (!idlest || 100*this_load < imbalance*min_load)
-               return NULL;
-       return idlest;
-}
-
-/*
- * find_idlest_cpu - find the idlest cpu among the cpus in group.
- */
-static int
-find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
-{
-       unsigned long load, min_load = ULONG_MAX;
-       int idlest = -1;
-       int i;
-
-       /* Traverse only the allowed CPUs */
-       for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
-               load = weighted_cpuload(i);
-
-               if (load < min_load || (load == min_load && i == this_cpu)) {
-                       min_load = load;
-                       idlest = i;
-               }
-       }
-
-       return idlest;
-}
-
-/*
- * sched_balance_self: balance the current task (running on cpu) in domains
- * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
- * SD_BALANCE_EXEC.
- *
- * Balance, ie. select the least loaded group.
- *
- * Returns the target CPU number, or the same CPU if no balancing is needed.
- *
- * preempt must be disabled.
- */
-static int sched_balance_self(int cpu, int flag)
-{
-       struct task_struct *t = current;
-       struct sched_domain *tmp, *sd = NULL;
-
-       for_each_domain(cpu, tmp) {
-               /*
-                * If power savings logic is enabled for a domain, stop there.
-                */
-               if (tmp->flags & SD_POWERSAVINGS_BALANCE)
-                       break;
-               if (tmp->flags & flag)
-                       sd = tmp;
-       }
-
-       if (sd)
-               update_shares(sd);
-
-       while (sd) {
-               struct sched_group *group;
-               int new_cpu, weight;
-
-               if (!(sd->flags & flag)) {
-                       sd = sd->child;
-                       continue;
-               }
-
-               group = find_idlest_group(sd, t, cpu);
-               if (!group) {
-                       sd = sd->child;
-                       continue;
-               }
-
-               new_cpu = find_idlest_cpu(group, t, cpu);
-               if (new_cpu == -1 || new_cpu == cpu) {
-                       /* Now try balancing at a lower domain level of cpu */
-                       sd = sd->child;
-                       continue;
-               }
-
-               /* Now try balancing at a lower domain level of new_cpu */
-               cpu = new_cpu;
-               weight = cpumask_weight(sched_domain_span(sd));
-               sd = NULL;
-               for_each_domain(cpu, tmp) {
-                       if (weight <= cpumask_weight(sched_domain_span(tmp)))
-                               break;
-                       if (tmp->flags & flag)
-                               sd = tmp;
-               }
-               /* while loop will break here if sd == NULL */
-       }
-
-       return cpu;
-}
-
 #endif /* CONFIG_SMP */
 
 /**
@@ -2459,33 +2322,17 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
 {
        int cpu, orig_cpu, this_cpu, success = 0;
        unsigned long flags;
-       long old_state;
        struct rq *rq;
 
        if (!sched_feat(SYNC_WAKEUPS))
                sync = 0;
 
-#ifdef CONFIG_SMP
-       if (sched_feat(LB_WAKEUP_UPDATE) && !root_task_group_empty()) {
-               struct sched_domain *sd;
-
-               this_cpu = raw_smp_processor_id();
-               cpu = task_cpu(p);
-
-               for_each_domain(this_cpu, sd) {
-                       if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
-                               update_shares(sd);
-                               break;
-                       }
-               }
-       }
-#endif
+       this_cpu = get_cpu();
 
        smp_wmb();
        rq = task_rq_lock(p, &flags);
        update_rq_clock(rq);
-       old_state = p->state;
-       if (!(old_state & state))
+       if (!(p->state & state))
                goto out;
 
        if (p->se.on_rq)
@@ -2493,27 +2340,25 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
 
        cpu = task_cpu(p);
        orig_cpu = cpu;
-       this_cpu = smp_processor_id();
 
 #ifdef CONFIG_SMP
        if (unlikely(task_running(rq, p)))
                goto out_activate;
 
-       cpu = p->sched_class->select_task_rq(p, sync);
-       if (cpu != orig_cpu) {
+       /*
+        * In order to handle concurrent wakeups and release the rq->lock
+        * we put the task in TASK_WAKING state.
+        */
+       p->state = TASK_WAKING;
+       task_rq_unlock(rq, &flags);
+
+       cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, sync);
+       if (cpu != orig_cpu)
                set_task_cpu(p, cpu);
-               task_rq_unlock(rq, &flags);
-               /* might preempt at this point */
-               rq = task_rq_lock(p, &flags);
-               old_state = p->state;
-               if (!(old_state & state))
-                       goto out;
-               if (p->se.on_rq)
-                       goto out_running;
 
-               this_cpu = smp_processor_id();
-               cpu = task_cpu(p);
-       }
+       rq = task_rq_lock(p, &flags);
+       WARN_ON(p->state != TASK_WAKING);
+       cpu = task_cpu(p);
 
 #ifdef CONFIG_SCHEDSTATS
        schedstat_inc(rq, ttwu_count);
@@ -2571,6 +2416,7 @@ out_running:
 #endif
 out:
        task_rq_unlock(rq, &flags);
+       put_cpu();
 
        return success;
 }
@@ -2674,11 +2520,6 @@ void sched_fork(struct task_struct *p, int clone_flags)
 
        __sched_fork(p);
 
-#ifdef CONFIG_SMP
-       cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
-#endif
-       set_task_cpu(p, cpu);
-
        /*
         * Make sure we do not leak PI boosting priority to the child.
         */
@@ -2709,6 +2550,11 @@ void sched_fork(struct task_struct *p, int clone_flags)
        if (!rt_prio(p->prio))
                p->sched_class = &fair_sched_class;
 
+#ifdef CONFIG_SMP
+       cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0);
+#endif
+       set_task_cpu(p, cpu);
+
 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
        if (likely(sched_info_on()))
                memset(&p->sched_info, 0, sizeof(p->sched_info));
@@ -3263,7 +3109,7 @@ out:
 void sched_exec(void)
 {
        int new_cpu, this_cpu = get_cpu();
-       new_cpu = sched_balance_self(this_cpu, SD_BALANCE_EXEC);
+       new_cpu = current->sched_class->select_task_rq(current, SD_BALANCE_EXEC, 0);
        put_cpu();
        if (new_cpu != this_cpu)
                sched_migrate_task(current, new_cpu);
@@ -3683,11 +3529,6 @@ static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
        *imbalance = sds->min_load_per_task;
        sds->busiest = sds->group_min;
 
-       if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
-               cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
-                       group_first_cpu(sds->group_leader);
-       }
-
        return 1;
 
 }
@@ -4161,26 +4002,6 @@ ret:
        return NULL;
 }
 
-static struct sched_group *group_of(int cpu)
-{
-       struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
-
-       if (!sd)
-               return NULL;
-
-       return sd->groups;
-}
-
-static unsigned long power_of(int cpu)
-{
-       struct sched_group *group = group_of(cpu);
-
-       if (!group)
-               return SCHED_LOAD_SCALE;
-
-       return group->cpu_power;
-}
-
 /*
  * find_busiest_queue - find the busiest runqueue among the cpus in group.
  */
@@ -8000,9 +7821,7 @@ static int sd_degenerate(struct sched_domain *sd)
        }
 
        /* Following flags don't use groups */
-       if (sd->flags & (SD_WAKE_IDLE |
-                        SD_WAKE_AFFINE |
-                        SD_WAKE_BALANCE))
+       if (sd->flags & (SD_WAKE_AFFINE))
                return 0;
 
        return 1;
@@ -8019,10 +7838,6 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
        if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
                return 0;
 
-       /* Does parent contain flags not in child? */
-       /* WAKE_BALANCE is a subset of WAKE_AFFINE */
-       if (cflags & SD_WAKE_AFFINE)
-               pflags &= ~SD_WAKE_BALANCE;
        /* Flags needing groups don't count if only 1 group in parent */
        if (parent->groups == parent->groups->next) {
                pflags &= ~(SD_LOAD_BALANCE |
@@ -8708,10 +8523,10 @@ static void set_domain_attribute(struct sched_domain *sd,
                request = attr->relax_domain_level;
        if (request < sd->level) {
                /* turn off idle balance on this domain */
-               sd->flags &= ~(SD_WAKE_IDLE|SD_BALANCE_NEWIDLE);
+               sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
        } else {
                /* turn on idle balance on this domain */
-               sd->flags |= (SD_WAKE_IDLE_FAR|SD_BALANCE_NEWIDLE);
+               sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
        }
 }
 
This page took 0.030399 seconds and 5 git commands to generate.