sched: x86, track TSC-unstable events
[deliverable/linux.git] / arch / i386 / kernel / tsc.c
index 602660df455c7676b2fd9400f628831b8b3d716c..ea63a30ca3e88daa1bca2975f3c9f98812d7dffc 100644 (file)
@@ -4,6 +4,7 @@
  * See comments there for proper credits.
  */
 
+#include <linux/sched.h>
 #include <linux/clocksource.h>
 #include <linux/workqueue.h>
 #include <linux/cpufreq.h>
@@ -18,6 +19,8 @@
 
 #include "mach_timer.h"
 
+static int tsc_enabled;
+
 /*
  * On some systems the TSC frequency does not
  * change with the cpu frequency. So we need
@@ -104,8 +107,13 @@ unsigned long long sched_clock(void)
 
        /*
         * Fall back to jiffies if there's no TSC available:
+        * ( But note that we still use it if the TSC is marked
+        *   unstable. We do this because unlike Time Of Day,
+        *   the scheduler clock tolerates small errors and it's
+        *   very important for it to be as fast as the platform
+        *   can achive it. )
         */
-       if (unlikely(tsc_disable))
+       if (unlikely(!tsc_enabled && !tsc_unstable))
                /* No locking but a rare wrong value is not a big deal: */
                return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
 
@@ -198,13 +206,10 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
 {
        struct cpufreq_freqs *freq = data;
 
-       if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
-               write_seqlock_irq(&xtime_lock);
-
        if (!ref_freq) {
                if (!freq->old){
                        ref_freq = freq->new;
-                       goto end;
+                       return 0;
                }
                ref_freq = freq->old;
                loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
@@ -231,13 +236,10 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
                                 * TSC based sched_clock turns
                                 * to junk w/ cpufreq
                                 */
-                               mark_tsc_unstable();
+                               mark_tsc_unstable("cpufreq changes");
                        }
                }
        }
-end:
-       if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
-               write_sequnlock_irq(&xtime_lock);
 
        return 0;
 }
@@ -279,10 +281,13 @@ static struct clocksource clocksource_tsc = {
                                  CLOCK_SOURCE_MUST_VERIFY,
 };
 
-void mark_tsc_unstable(void)
+void mark_tsc_unstable(char *reason)
 {
+       sched_clock_unstable_event();
        if (!tsc_unstable) {
                tsc_unstable = 1;
+               tsc_enabled = 0;
+               printk("Marking TSC unstable due to: %s.\n", reason);
                /* Can be called before registration */
                if (clocksource_tsc.mult)
                        clocksource_change_rating(&clocksource_tsc, 0);
@@ -383,7 +388,9 @@ void __init tsc_init(void)
        if (check_tsc_unstable()) {
                clocksource_tsc.rating = 0;
                clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
-       }
+       } else
+               tsc_enabled = 1;
+
        clocksource_register(&clocksource_tsc);
 
        return;
This page took 0.026441 seconds and 5 git commands to generate.