Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 12 Aug 2016 20:51:52 +0000 (13:51 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 12 Aug 2016 20:51:52 +0000 (13:51 -0700)
Pull scheduler fixes from Ingo Molnar:
 "Misc fixes: cputime fixes, two deadline scheduler fixes and a cgroups
  scheduling fix"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/cputime: Fix omitted ticks passed in parameter
  sched/cputime: Fix steal time accounting
  sched/deadline: Fix lock pinning warning during CPU hotplug
  sched/cputime: Mitigate performance regression in times()/clock_gettime()
  sched/fair: Fix typo in sync_throttle()
  sched/deadline: Fix wrap-around in DL heap

kernel/sched/core.c
kernel/sched/cpudeadline.c
kernel/sched/cputime.c
kernel/sched/deadline.c
kernel/sched/fair.c

index 5c883fe8e44016df1109e8f66dd73377dfecb5e9..2a906f20fba7c4ba63d89fe87ac3527607be453a 100644 (file)
@@ -74,6 +74,7 @@
 #include <linux/context_tracking.h>
 #include <linux/compiler.h>
 #include <linux/frame.h>
+#include <linux/prefetch.h>
 
 #include <asm/switch_to.h>
 #include <asm/tlb.h>
@@ -2971,6 +2972,23 @@ DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
 EXPORT_PER_CPU_SYMBOL(kstat);
 EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
 
+/*
+ * The function fair_sched_class.update_curr accesses the struct curr
+ * and its field curr->exec_start; when called from task_sched_runtime(),
+ * we observe a high rate of cache misses in practice.
+ * Prefetching this data results in improved performance.
+ */
+static inline void prefetch_curr_exec_start(struct task_struct *p)
+{
+#ifdef CONFIG_FAIR_GROUP_SCHED
+       struct sched_entity *curr = (&p->se)->cfs_rq->curr;
+#else
+       struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
+#endif
+       prefetch(curr);
+       prefetch(&curr->exec_start);
+}
+
 /*
  * Return accounted runtime for the task.
  * In case the task is currently running, return the runtime plus current's
@@ -3005,6 +3023,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
         * thread, breaking clock_gettime().
         */
        if (task_current(rq, p) && task_on_rq_queued(p)) {
+               prefetch_curr_exec_start(p);
                update_rq_clock(rq);
                p->sched_class->update_curr(rq);
        }
index 5be58820465cced6c0d1dc06c9de146bddcf664f..d4184498c9f5e3c8674015f97fe04da2417dafbd 100644 (file)
@@ -168,7 +168,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
 
        if (old_idx == IDX_INVALID) {
                cp->size++;
-               cp->elements[cp->size - 1].dl = 0;
+               cp->elements[cp->size - 1].dl = dl;
                cp->elements[cp->size - 1].cpu = cpu;
                cp->elements[cpu].idx = cp->size - 1;
                cpudl_change_key(cp, cp->size - 1, dl);
index 1934f658c03604272e5809f32fee1a6a3c928990..9858266fb0b32b07158b2b9e8f66e7fa2f482eb7 100644 (file)
@@ -508,13 +508,21 @@ void account_process_tick(struct task_struct *p, int user_tick)
  */
 void account_idle_ticks(unsigned long ticks)
 {
+       cputime_t cputime, steal;
 
        if (sched_clock_irqtime) {
                irqtime_account_idle_ticks(ticks);
                return;
        }
 
-       account_idle_time(jiffies_to_cputime(ticks));
+       cputime = jiffies_to_cputime(ticks);
+       steal = steal_account_process_time(cputime);
+
+       if (steal >= cputime)
+               return;
+
+       cputime -= steal;
+       account_idle_time(cputime);
 }
 
 /*
index fcb7f0217ff48610cca9bd5bd078f2f05df79164..1ce8867283dcde6e35ef74a72a1bca968decb918 100644 (file)
@@ -658,8 +658,11 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
         *
         * XXX figure out if select_task_rq_dl() deals with offline cpus.
         */
-       if (unlikely(!rq->online))
+       if (unlikely(!rq->online)) {
+               lockdep_unpin_lock(&rq->lock, rf.cookie);
                rq = dl_task_offline_migration(rq, p);
+               rf.cookie = lockdep_pin_lock(&rq->lock);
+       }
 
        /*
         * Queueing this task back might have overloaded rq, check if we need
index 4088eedea7637859844c777dfa56dfb23136c142..039de34f15216d19f61386b6d6c66744660516c9 100644 (file)
@@ -4269,7 +4269,7 @@ static void sync_throttle(struct task_group *tg, int cpu)
        pcfs_rq = tg->parent->cfs_rq[cpu];
 
        cfs_rq->throttle_count = pcfs_rq->throttle_count;
-       pcfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
+       cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
 }
 
 /* conditionally throttle active cfs_rq's from put_prev_entity() */
This page took 0.035247 seconds and 5 git commands to generate.