Commit | Line | Data |
---|---|---|
bfc0f594 | 1 | #include <linux/kernel.h> |
0ef95533 AK |
2 | #include <linux/sched.h> |
3 | #include <linux/init.h> | |
4 | #include <linux/module.h> | |
5 | #include <linux/timer.h> | |
bfc0f594 | 6 | #include <linux/acpi_pmtmr.h> |
2dbe06fa | 7 | #include <linux/cpufreq.h> |
8fbbc4b4 AK |
8 | #include <linux/dmi.h> |
9 | #include <linux/delay.h> | |
10 | #include <linux/clocksource.h> | |
11 | #include <linux/percpu.h> | |
bfc0f594 AK |
12 | |
13 | #include <asm/hpet.h> | |
8fbbc4b4 AK |
14 | #include <asm/timer.h> |
15 | #include <asm/vgtod.h> | |
16 | #include <asm/time.h> | |
17 | #include <asm/delay.h> | |
0ef95533 AK |
18 | |
19 | unsigned int cpu_khz; /* TSC clocks / usec, not used here */ | |
20 | EXPORT_SYMBOL(cpu_khz); | |
21 | unsigned int tsc_khz; | |
22 | EXPORT_SYMBOL(tsc_khz); | |
23 | ||
24 | /* | |
25 | * TSC can be unstable due to cpufreq or due to unsynced TSCs | |
26 | */ | |
8fbbc4b4 | 27 | static int tsc_unstable; |
0ef95533 AK |
28 | |
29 | /* native_sched_clock() is called before tsc_init(), so | |
30 | we must start with the TSC soft disabled to prevent | |
31 | erroneous rdtsc usage on !cpu_has_tsc processors */ | |
8fbbc4b4 | 32 | static int tsc_disabled = -1; |
0ef95533 AK |
33 | |
34 | /* | |
35 | * Scheduler clock - returns current time in nanosec units. | |
36 | */ | |
37 | u64 native_sched_clock(void) | |
38 | { | |
39 | u64 this_offset; | |
40 | ||
41 | /* | |
42 | * Fall back to jiffies if there's no TSC available: | |
43 | * ( But note that we still use it if the TSC is marked | |
44 | * unstable. We do this because unlike Time Of Day, | |
45 | * the scheduler clock tolerates small errors and it's | |
46 | * very important for it to be as fast as the platform | |
47 | * can achive it. ) | |
48 | */ | |
49 | if (unlikely(tsc_disabled)) { | |
50 | /* No locking but a rare wrong value is not a big deal: */ | |
51 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); | |
52 | } | |
53 | ||
54 | /* read the Time Stamp Counter: */ | |
55 | rdtscll(this_offset); | |
56 | ||
57 | /* return the value in ns */ | |
58 | return cycles_2_ns(this_offset); | |
59 | } | |
60 | ||
61 | /* We need to define a real function for sched_clock, to override the | |
62 | weak default version */ | |
63 | #ifdef CONFIG_PARAVIRT | |
64 | unsigned long long sched_clock(void) | |
65 | { | |
66 | return paravirt_sched_clock(); | |
67 | } | |
68 | #else | |
69 | unsigned long long | |
70 | sched_clock(void) __attribute__((alias("native_sched_clock"))); | |
71 | #endif | |
72 | ||
73 | int check_tsc_unstable(void) | |
74 | { | |
75 | return tsc_unstable; | |
76 | } | |
77 | EXPORT_SYMBOL_GPL(check_tsc_unstable); | |
78 | ||
79 | #ifdef CONFIG_X86_TSC | |
80 | int __init notsc_setup(char *str) | |
81 | { | |
82 | printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, " | |
83 | "cannot disable TSC completely.\n"); | |
84 | tsc_disabled = 1; | |
85 | return 1; | |
86 | } | |
87 | #else | |
88 | /* | |
89 | * disable flag for tsc. Takes effect by clearing the TSC cpu flag | |
90 | * in cpu/common.c | |
91 | */ | |
92 | int __init notsc_setup(char *str) | |
93 | { | |
94 | setup_clear_cpu_cap(X86_FEATURE_TSC); | |
95 | return 1; | |
96 | } | |
97 | #endif | |
98 | ||
99 | __setup("notsc", notsc_setup); | |
bfc0f594 AK |
100 | |
101 | #define MAX_RETRIES 5 | |
102 | #define SMI_TRESHOLD 50000 | |
103 | ||
104 | /* | |
105 | * Read TSC and the reference counters. Take care of SMI disturbance | |
106 | */ | |
107 | static u64 __init tsc_read_refs(u64 *pm, u64 *hpet) | |
108 | { | |
109 | u64 t1, t2; | |
110 | int i; | |
111 | ||
112 | for (i = 0; i < MAX_RETRIES; i++) { | |
113 | t1 = get_cycles(); | |
114 | if (hpet) | |
115 | *hpet = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF; | |
116 | else | |
117 | *pm = acpi_pm_read_early(); | |
118 | t2 = get_cycles(); | |
119 | if ((t2 - t1) < SMI_TRESHOLD) | |
120 | return t2; | |
121 | } | |
122 | return ULLONG_MAX; | |
123 | } | |
124 | ||
125 | /** | |
126 | * tsc_calibrate - calibrate the tsc on boot | |
127 | */ | |
128 | static unsigned int __init tsc_calibrate(void) | |
129 | { | |
130 | unsigned long flags; | |
131 | u64 tsc1, tsc2, tr1, tr2, delta, pm1, pm2, hpet1, hpet2; | |
132 | int hpet = is_hpet_enabled(); | |
133 | unsigned int tsc_khz_val = 0; | |
134 | ||
135 | local_irq_save(flags); | |
136 | ||
137 | tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL); | |
138 | ||
139 | outb((inb(0x61) & ~0x02) | 0x01, 0x61); | |
140 | ||
141 | outb(0xb0, 0x43); | |
142 | outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42); | |
143 | outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42); | |
144 | tr1 = get_cycles(); | |
145 | while ((inb(0x61) & 0x20) == 0); | |
146 | tr2 = get_cycles(); | |
147 | ||
148 | tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL); | |
149 | ||
150 | local_irq_restore(flags); | |
151 | ||
152 | /* | |
153 | * Preset the result with the raw and inaccurate PIT | |
154 | * calibration value | |
155 | */ | |
156 | delta = (tr2 - tr1); | |
157 | do_div(delta, 50); | |
158 | tsc_khz_val = delta; | |
159 | ||
160 | /* hpet or pmtimer available ? */ | |
161 | if (!hpet && !pm1 && !pm2) { | |
162 | printk(KERN_INFO "TSC calibrated against PIT\n"); | |
163 | goto out; | |
164 | } | |
165 | ||
166 | /* Check, whether the sampling was disturbed by an SMI */ | |
167 | if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX) { | |
168 | printk(KERN_WARNING "TSC calibration disturbed by SMI, " | |
169 | "using PIT calibration result\n"); | |
170 | goto out; | |
171 | } | |
172 | ||
173 | tsc2 = (tsc2 - tsc1) * 1000000LL; | |
174 | ||
175 | if (hpet) { | |
176 | printk(KERN_INFO "TSC calibrated against HPET\n"); | |
177 | if (hpet2 < hpet1) | |
178 | hpet2 += 0x100000000ULL; | |
179 | hpet2 -= hpet1; | |
180 | tsc1 = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); | |
181 | do_div(tsc1, 1000000); | |
182 | } else { | |
183 | printk(KERN_INFO "TSC calibrated against PM_TIMER\n"); | |
184 | if (pm2 < pm1) | |
185 | pm2 += (u64)ACPI_PM_OVRRUN; | |
186 | pm2 -= pm1; | |
187 | tsc1 = pm2 * 1000000000LL; | |
188 | do_div(tsc1, PMTMR_TICKS_PER_SEC); | |
189 | } | |
190 | ||
191 | do_div(tsc2, tsc1); | |
192 | tsc_khz_val = tsc2; | |
193 | ||
194 | out: | |
195 | return tsc_khz_val; | |
196 | } | |
197 | ||
198 | unsigned long native_calculate_cpu_khz(void) | |
199 | { | |
200 | return tsc_calibrate(); | |
201 | } | |
202 | ||
203 | #ifdef CONFIG_X86_32 | |
204 | /* Only called from the Powernow K7 cpu freq driver */ | |
205 | int recalibrate_cpu_khz(void) | |
206 | { | |
207 | #ifndef CONFIG_SMP | |
208 | unsigned long cpu_khz_old = cpu_khz; | |
209 | ||
210 | if (cpu_has_tsc) { | |
211 | cpu_khz = calculate_cpu_khz(); | |
212 | tsc_khz = cpu_khz; | |
213 | cpu_data(0).loops_per_jiffy = | |
214 | cpufreq_scale(cpu_data(0).loops_per_jiffy, | |
215 | cpu_khz_old, cpu_khz); | |
216 | return 0; | |
217 | } else | |
218 | return -ENODEV; | |
219 | #else | |
220 | return -ENODEV; | |
221 | #endif | |
222 | } | |
223 | ||
224 | EXPORT_SYMBOL(recalibrate_cpu_khz); | |
225 | ||
226 | #endif /* CONFIG_X86_32 */ | |
2dbe06fa AK |
227 | |
228 | /* Accelerators for sched_clock() | |
229 | * convert from cycles(64bits) => nanoseconds (64bits) | |
230 | * basic equation: | |
231 | * ns = cycles / (freq / ns_per_sec) | |
232 | * ns = cycles * (ns_per_sec / freq) | |
233 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) | |
234 | * ns = cycles * (10^6 / cpu_khz) | |
235 | * | |
236 | * Then we use scaling math (suggested by george@mvista.com) to get: | |
237 | * ns = cycles * (10^6 * SC / cpu_khz) / SC | |
238 | * ns = cycles * cyc2ns_scale / SC | |
239 | * | |
240 | * And since SC is a constant power of two, we can convert the div | |
241 | * into a shift. | |
242 | * | |
243 | * We can use khz divisor instead of mhz to keep a better precision, since | |
244 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | |
245 | * (mathieu.desnoyers@polymtl.ca) | |
246 | * | |
247 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | |
248 | */ | |
249 | ||
250 | DEFINE_PER_CPU(unsigned long, cyc2ns); | |
251 | ||
8fbbc4b4 | 252 | static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) |
2dbe06fa AK |
253 | { |
254 | unsigned long long tsc_now, ns_now; | |
255 | unsigned long flags, *scale; | |
256 | ||
257 | local_irq_save(flags); | |
258 | sched_clock_idle_sleep_event(); | |
259 | ||
260 | scale = &per_cpu(cyc2ns, cpu); | |
261 | ||
262 | rdtscll(tsc_now); | |
263 | ns_now = __cycles_2_ns(tsc_now); | |
264 | ||
265 | if (cpu_khz) | |
266 | *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz; | |
267 | ||
268 | sched_clock_idle_wakeup_event(0); | |
269 | local_irq_restore(flags); | |
270 | } | |
271 | ||
272 | #ifdef CONFIG_CPU_FREQ | |
273 | ||
274 | /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency | |
275 | * changes. | |
276 | * | |
277 | * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's | |
278 | * not that important because current Opteron setups do not support | |
279 | * scaling on SMP anyroads. | |
280 | * | |
281 | * Should fix up last_tsc too. Currently gettimeofday in the | |
282 | * first tick after the change will be slightly wrong. | |
283 | */ | |
284 | ||
285 | static unsigned int ref_freq; | |
286 | static unsigned long loops_per_jiffy_ref; | |
287 | static unsigned long tsc_khz_ref; | |
288 | ||
289 | static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | |
290 | void *data) | |
291 | { | |
292 | struct cpufreq_freqs *freq = data; | |
293 | unsigned long *lpj, dummy; | |
294 | ||
295 | if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC)) | |
296 | return 0; | |
297 | ||
298 | lpj = &dummy; | |
299 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) | |
300 | #ifdef CONFIG_SMP | |
301 | lpj = &cpu_data(freq->cpu).loops_per_jiffy; | |
302 | #else | |
303 | lpj = &boot_cpu_data.loops_per_jiffy; | |
304 | #endif | |
305 | ||
306 | if (!ref_freq) { | |
307 | ref_freq = freq->old; | |
308 | loops_per_jiffy_ref = *lpj; | |
309 | tsc_khz_ref = tsc_khz; | |
310 | } | |
311 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || | |
312 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || | |
313 | (val == CPUFREQ_RESUMECHANGE)) { | |
314 | *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new); | |
315 | ||
316 | tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); | |
317 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) | |
318 | mark_tsc_unstable("cpufreq changes"); | |
319 | } | |
320 | ||
321 | set_cyc2ns_scale(tsc_khz_ref, freq->cpu); | |
322 | ||
323 | return 0; | |
324 | } | |
325 | ||
326 | static struct notifier_block time_cpufreq_notifier_block = { | |
327 | .notifier_call = time_cpufreq_notifier | |
328 | }; | |
329 | ||
330 | static int __init cpufreq_tsc(void) | |
331 | { | |
332 | cpufreq_register_notifier(&time_cpufreq_notifier_block, | |
333 | CPUFREQ_TRANSITION_NOTIFIER); | |
334 | return 0; | |
335 | } | |
336 | ||
337 | core_initcall(cpufreq_tsc); | |
338 | ||
339 | #endif /* CONFIG_CPU_FREQ */ | |
8fbbc4b4 AK |
340 | |
341 | /* clocksource code */ | |
342 | ||
343 | static struct clocksource clocksource_tsc; | |
344 | ||
345 | /* | |
346 | * We compare the TSC to the cycle_last value in the clocksource | |
347 | * structure to avoid a nasty time-warp. This can be observed in a | |
348 | * very small window right after one CPU updated cycle_last under | |
349 | * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which | |
350 | * is smaller than the cycle_last reference value due to a TSC which | |
351 | * is slighty behind. This delta is nowhere else observable, but in | |
352 | * that case it results in a forward time jump in the range of hours | |
353 | * due to the unsigned delta calculation of the time keeping core | |
354 | * code, which is necessary to support wrapping clocksources like pm | |
355 | * timer. | |
356 | */ | |
357 | static cycle_t read_tsc(void) | |
358 | { | |
359 | cycle_t ret = (cycle_t)get_cycles(); | |
360 | ||
361 | return ret >= clocksource_tsc.cycle_last ? | |
362 | ret : clocksource_tsc.cycle_last; | |
363 | } | |
364 | ||
365 | static cycle_t __vsyscall_fn vread_tsc(void) | |
366 | { | |
367 | cycle_t ret = (cycle_t)vget_cycles(); | |
368 | ||
369 | return ret >= __vsyscall_gtod_data.clock.cycle_last ? | |
370 | ret : __vsyscall_gtod_data.clock.cycle_last; | |
371 | } | |
372 | ||
373 | static struct clocksource clocksource_tsc = { | |
374 | .name = "tsc", | |
375 | .rating = 300, | |
376 | .read = read_tsc, | |
377 | .mask = CLOCKSOURCE_MASK(64), | |
378 | .shift = 22, | |
379 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | | |
380 | CLOCK_SOURCE_MUST_VERIFY, | |
381 | #ifdef CONFIG_X86_64 | |
382 | .vread = vread_tsc, | |
383 | #endif | |
384 | }; | |
385 | ||
386 | void mark_tsc_unstable(char *reason) | |
387 | { | |
388 | if (!tsc_unstable) { | |
389 | tsc_unstable = 1; | |
390 | printk("Marking TSC unstable due to %s\n", reason); | |
391 | /* Change only the rating, when not registered */ | |
392 | if (clocksource_tsc.mult) | |
393 | clocksource_change_rating(&clocksource_tsc, 0); | |
394 | else | |
395 | clocksource_tsc.rating = 0; | |
396 | } | |
397 | } | |
398 | ||
399 | EXPORT_SYMBOL_GPL(mark_tsc_unstable); | |
400 | ||
401 | static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d) | |
402 | { | |
403 | printk(KERN_NOTICE "%s detected: marking TSC unstable.\n", | |
404 | d->ident); | |
405 | tsc_unstable = 1; | |
406 | return 0; | |
407 | } | |
408 | ||
409 | /* List of systems that have known TSC problems */ | |
410 | static struct dmi_system_id __initdata bad_tsc_dmi_table[] = { | |
411 | { | |
412 | .callback = dmi_mark_tsc_unstable, | |
413 | .ident = "IBM Thinkpad 380XD", | |
414 | .matches = { | |
415 | DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), | |
416 | DMI_MATCH(DMI_BOARD_NAME, "2635FA0"), | |
417 | }, | |
418 | }, | |
419 | {} | |
420 | }; | |
421 | ||
422 | /* | |
423 | * Geode_LX - the OLPC CPU has a possibly a very reliable TSC | |
424 | */ | |
425 | #ifdef CONFIG_MGEODE_LX | |
426 | /* RTSC counts during suspend */ | |
427 | #define RTSC_SUSP 0x100 | |
428 | ||
429 | static void __init check_geode_tsc_reliable(void) | |
430 | { | |
431 | unsigned long res_low, res_high; | |
432 | ||
433 | rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high); | |
434 | if (res_low & RTSC_SUSP) | |
435 | clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; | |
436 | } | |
437 | #else | |
438 | static inline void check_geode_tsc_reliable(void) { } | |
439 | #endif | |
440 | ||
441 | /* | |
442 | * Make an educated guess if the TSC is trustworthy and synchronized | |
443 | * over all CPUs. | |
444 | */ | |
445 | __cpuinit int unsynchronized_tsc(void) | |
446 | { | |
447 | if (!cpu_has_tsc || tsc_unstable) | |
448 | return 1; | |
449 | ||
450 | #ifdef CONFIG_SMP | |
451 | if (apic_is_clustered_box()) | |
452 | return 1; | |
453 | #endif | |
454 | ||
455 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) | |
456 | return 0; | |
457 | /* | |
458 | * Intel systems are normally all synchronized. | |
459 | * Exceptions must mark TSC as unstable: | |
460 | */ | |
461 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { | |
462 | /* assume multi socket systems are not synchronized: */ | |
463 | if (num_possible_cpus() > 1) | |
464 | tsc_unstable = 1; | |
465 | } | |
466 | ||
467 | return tsc_unstable; | |
468 | } | |
469 | ||
470 | static void __init init_tsc_clocksource(void) | |
471 | { | |
472 | clocksource_tsc.mult = clocksource_khz2mult(tsc_khz, | |
473 | clocksource_tsc.shift); | |
474 | /* lower the rating if we already know its unstable: */ | |
475 | if (check_tsc_unstable()) { | |
476 | clocksource_tsc.rating = 0; | |
477 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; | |
478 | } | |
479 | clocksource_register(&clocksource_tsc); | |
480 | } | |
481 | ||
482 | void __init tsc_init(void) | |
483 | { | |
484 | u64 lpj; | |
485 | int cpu; | |
486 | ||
487 | if (!cpu_has_tsc) | |
488 | return; | |
489 | ||
490 | cpu_khz = calculate_cpu_khz(); | |
491 | tsc_khz = cpu_khz; | |
492 | ||
493 | if (!cpu_khz) { | |
494 | mark_tsc_unstable("could not calculate TSC khz"); | |
495 | return; | |
496 | } | |
497 | ||
498 | #ifdef CONFIG_X86_64 | |
499 | if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) && | |
500 | (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)) | |
501 | cpu_khz = calibrate_cpu(); | |
502 | #endif | |
503 | ||
504 | lpj = ((u64)tsc_khz * 1000); | |
505 | do_div(lpj, HZ); | |
506 | lpj_fine = lpj; | |
507 | ||
508 | printk("Detected %lu.%03lu MHz processor.\n", | |
509 | (unsigned long)cpu_khz / 1000, | |
510 | (unsigned long)cpu_khz % 1000); | |
511 | ||
512 | /* | |
513 | * Secondary CPUs do not run through tsc_init(), so set up | |
514 | * all the scale factors for all CPUs, assuming the same | |
515 | * speed as the bootup CPU. (cpufreq notifiers will fix this | |
516 | * up if their speed diverges) | |
517 | */ | |
518 | for_each_possible_cpu(cpu) | |
519 | set_cyc2ns_scale(cpu_khz, cpu); | |
520 | ||
521 | if (tsc_disabled > 0) | |
522 | return; | |
523 | ||
524 | /* now allow native_sched_clock() to use rdtsc */ | |
525 | tsc_disabled = 0; | |
526 | ||
527 | use_tsc_delay(); | |
528 | /* Check and install the TSC clocksource */ | |
529 | dmi_check_system(bad_tsc_dmi_table); | |
530 | ||
531 | if (unsynchronized_tsc()) | |
532 | mark_tsc_unstable("TSCs unsynchronized"); | |
533 | ||
534 | check_geode_tsc_reliable(); | |
535 | init_tsc_clocksource(); | |
536 | } | |
537 |