Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/ia64/kernel/time.c | |
3 | * | |
4 | * Copyright (C) 1998-2003 Hewlett-Packard Co | |
5 | * Stephane Eranian <eranian@hpl.hp.com> | |
6 | * David Mosberger <davidm@hpl.hp.com> | |
7 | * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> | |
8 | * Copyright (C) 1999-2000 VA Linux Systems | |
9 | * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com> | |
10 | */ | |
1da177e4 LT |
11 | |
12 | #include <linux/cpu.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/profile.h> | |
17 | #include <linux/sched.h> | |
18 | #include <linux/time.h> | |
19 | #include <linux/interrupt.h> | |
20 | #include <linux/efi.h> | |
1da177e4 | 21 | #include <linux/timex.h> |
0aa366f3 | 22 | #include <linux/clocksource.h> |
5e3fd9e5 | 23 | #include <linux/platform_device.h> |
1da177e4 LT |
24 | |
25 | #include <asm/machvec.h> | |
26 | #include <asm/delay.h> | |
27 | #include <asm/hw_irq.h> | |
00d21d82 | 28 | #include <asm/paravirt.h> |
1da177e4 LT |
29 | #include <asm/ptrace.h> |
30 | #include <asm/sal.h> | |
31 | #include <asm/sections.h> | |
32 | #include <asm/system.h> | |
33 | ||
0aa366f3 TL |
34 | #include "fsyscall_gtod_data.h" |
35 | ||
8e19608e | 36 | static cycle_t itc_get_cycles(struct clocksource *cs); |
0aa366f3 TL |
37 | |
38 | struct fsyscall_gtod_data_t fsyscall_gtod_data = { | |
39 | .lock = SEQLOCK_UNLOCKED, | |
40 | }; | |
41 | ||
42 | struct itc_jitter_data_t itc_jitter_data; | |
43 | ||
ff741906 | 44 | volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */ |
1da177e4 LT |
45 | |
46 | #ifdef CONFIG_IA64_DEBUG_IRQ | |
47 | ||
48 | unsigned long last_cli_ip; | |
49 | EXPORT_SYMBOL(last_cli_ip); | |
50 | ||
51 | #endif | |
52 | ||
f927da17 IY |
53 | #ifdef CONFIG_PARAVIRT |
54 | /* We need to define a real function for sched_clock, to override the | |
55 | weak default version */ | |
56 | unsigned long long sched_clock(void) | |
57 | { | |
58 | return paravirt_sched_clock(); | |
59 | } | |
60 | #endif | |
61 | ||
00d21d82 IY |
62 | #ifdef CONFIG_PARAVIRT |
63 | static void | |
17622339 | 64 | paravirt_clocksource_resume(struct clocksource *cs) |
00d21d82 IY |
65 | { |
66 | if (pv_time_ops.clocksource_resume) | |
67 | pv_time_ops.clocksource_resume(); | |
68 | } | |
69 | #endif | |
70 | ||
0aa366f3 | 71 | static struct clocksource clocksource_itc = { |
3eb05676 LZ |
72 | .name = "itc", |
73 | .rating = 350, | |
74 | .read = itc_get_cycles, | |
75 | .mask = CLOCKSOURCE_MASK(64), | |
76 | .mult = 0, /*to be calculated*/ | |
77 | .shift = 16, | |
78 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | |
00d21d82 IY |
79 | #ifdef CONFIG_PARAVIRT |
80 | .resume = paravirt_clocksource_resume, | |
81 | #endif | |
1da177e4 | 82 | }; |
0aa366f3 | 83 | static struct clocksource *itc_clocksource; |
1da177e4 | 84 | |
b64f34cd HS |
85 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
86 | ||
87 | #include <linux/kernel_stat.h> | |
88 | ||
89 | extern cputime_t cycle_to_cputime(u64 cyc); | |
90 | ||
91 | /* | |
92 | * Called from the context switch with interrupts disabled, to charge all | |
93 | * accumulated times to the current process, and to prepare accounting on | |
94 | * the next process. | |
95 | */ | |
96 | void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next) | |
97 | { | |
98 | struct thread_info *pi = task_thread_info(prev); | |
99 | struct thread_info *ni = task_thread_info(next); | |
100 | cputime_t delta_stime, delta_utime; | |
101 | __u64 now; | |
102 | ||
103 | now = ia64_get_itc(); | |
104 | ||
105 | delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp)); | |
79741dd3 MS |
106 | if (idle_task(smp_processor_id()) != prev) |
107 | account_system_time(prev, 0, delta_stime, delta_stime); | |
108 | else | |
109 | account_idle_time(delta_stime); | |
b64f34cd HS |
110 | |
111 | if (pi->ac_utime) { | |
112 | delta_utime = cycle_to_cputime(pi->ac_utime); | |
457533a7 | 113 | account_user_time(prev, delta_utime, delta_utime); |
b64f34cd HS |
114 | } |
115 | ||
116 | pi->ac_stamp = ni->ac_stamp = now; | |
117 | ni->ac_stime = ni->ac_utime = 0; | |
118 | } | |
119 | ||
120 | /* | |
121 | * Account time for a transition between system, hard irq or soft irq state. | |
122 | * Note that this function is called with interrupts enabled. | |
123 | */ | |
124 | void account_system_vtime(struct task_struct *tsk) | |
125 | { | |
126 | struct thread_info *ti = task_thread_info(tsk); | |
127 | unsigned long flags; | |
128 | cputime_t delta_stime; | |
129 | __u64 now; | |
130 | ||
131 | local_irq_save(flags); | |
132 | ||
133 | now = ia64_get_itc(); | |
134 | ||
135 | delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp)); | |
79741dd3 MS |
136 | if (irq_count() || idle_task(smp_processor_id()) != tsk) |
137 | account_system_time(tsk, 0, delta_stime, delta_stime); | |
138 | else | |
139 | account_idle_time(delta_stime); | |
b64f34cd HS |
140 | ti->ac_stime = 0; |
141 | ||
142 | ti->ac_stamp = now; | |
143 | ||
144 | local_irq_restore(flags); | |
145 | } | |
3a677d21 | 146 | EXPORT_SYMBOL_GPL(account_system_vtime); |
b64f34cd HS |
147 | |
148 | /* | |
149 | * Called from the timer interrupt handler to charge accumulated user time | |
150 | * to the current process. Must be called with interrupts disabled. | |
151 | */ | |
152 | void account_process_tick(struct task_struct *p, int user_tick) | |
153 | { | |
154 | struct thread_info *ti = task_thread_info(p); | |
155 | cputime_t delta_utime; | |
156 | ||
157 | if (ti->ac_utime) { | |
158 | delta_utime = cycle_to_cputime(ti->ac_utime); | |
457533a7 | 159 | account_user_time(p, delta_utime, delta_utime); |
b64f34cd HS |
160 | ti->ac_utime = 0; |
161 | } | |
162 | } | |
163 | ||
164 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | |
165 | ||
1da177e4 | 166 | static irqreturn_t |
7d12e780 | 167 | timer_interrupt (int irq, void *dev_id) |
1da177e4 LT |
168 | { |
169 | unsigned long new_itm; | |
170 | ||
171 | if (unlikely(cpu_is_offline(smp_processor_id()))) { | |
172 | return IRQ_HANDLED; | |
173 | } | |
174 | ||
7d12e780 | 175 | platform_timer_interrupt(irq, dev_id); |
1da177e4 LT |
176 | |
177 | new_itm = local_cpu_data->itm_next; | |
178 | ||
179 | if (!time_after(ia64_get_itc(), new_itm)) | |
180 | printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n", | |
181 | ia64_get_itc(), new_itm); | |
182 | ||
7d12e780 | 183 | profile_tick(CPU_PROFILING); |
1da177e4 | 184 | |
00d21d82 IY |
185 | if (paravirt_do_steal_accounting(&new_itm)) |
186 | goto skip_process_time_accounting; | |
187 | ||
1da177e4 | 188 | while (1) { |
7d12e780 | 189 | update_process_times(user_mode(get_irq_regs())); |
1da177e4 LT |
190 | |
191 | new_itm += local_cpu_data->itm_delta; | |
192 | ||
ff741906 | 193 | if (smp_processor_id() == time_keeper_id) { |
1da177e4 LT |
194 | /* |
195 | * Here we are in the timer irq handler. We have irqs locally | |
196 | * disabled, but we don't know if the timer_bh is running on | |
197 | * another CPU. We need to avoid to SMP race by acquiring the | |
198 | * xtime_lock. | |
199 | */ | |
200 | write_seqlock(&xtime_lock); | |
3171a030 | 201 | do_timer(1); |
1da177e4 LT |
202 | local_cpu_data->itm_next = new_itm; |
203 | write_sequnlock(&xtime_lock); | |
204 | } else | |
205 | local_cpu_data->itm_next = new_itm; | |
206 | ||
207 | if (time_after(new_itm, ia64_get_itc())) | |
208 | break; | |
accaddb2 JS |
209 | |
210 | /* | |
211 | * Allow IPIs to interrupt the timer loop. | |
212 | */ | |
213 | local_irq_enable(); | |
214 | local_irq_disable(); | |
1da177e4 LT |
215 | } |
216 | ||
00d21d82 IY |
217 | skip_process_time_accounting: |
218 | ||
1da177e4 LT |
219 | do { |
220 | /* | |
221 | * If we're too close to the next clock tick for | |
222 | * comfort, we increase the safety margin by | |
223 | * intentionally dropping the next tick(s). We do NOT | |
224 | * update itm.next because that would force us to call | |
225 | * do_timer() which in turn would let our clock run | |
226 | * too fast (with the potentially devastating effect | |
227 | * of losing monotony of time). | |
228 | */ | |
229 | while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2)) | |
230 | new_itm += local_cpu_data->itm_delta; | |
231 | ia64_set_itm(new_itm); | |
232 | /* double check, in case we got hit by a (slow) PMI: */ | |
233 | } while (time_after_eq(ia64_get_itc(), new_itm)); | |
234 | return IRQ_HANDLED; | |
235 | } | |
236 | ||
237 | /* | |
238 | * Encapsulate access to the itm structure for SMP. | |
239 | */ | |
240 | void | |
241 | ia64_cpu_local_tick (void) | |
242 | { | |
243 | int cpu = smp_processor_id(); | |
244 | unsigned long shift = 0, delta; | |
245 | ||
246 | /* arrange for the cycle counter to generate a timer interrupt: */ | |
247 | ia64_set_itv(IA64_TIMER_VECTOR); | |
248 | ||
249 | delta = local_cpu_data->itm_delta; | |
250 | /* | |
251 | * Stagger the timer tick for each CPU so they don't occur all at (almost) the | |
252 | * same time: | |
253 | */ | |
254 | if (cpu) { | |
255 | unsigned long hi = 1UL << ia64_fls(cpu); | |
256 | shift = (2*(cpu - hi) + 1) * delta/hi/2; | |
257 | } | |
258 | local_cpu_data->itm_next = ia64_get_itc() + delta + shift; | |
259 | ia64_set_itm(local_cpu_data->itm_next); | |
260 | } | |
261 | ||
262 | static int nojitter; | |
263 | ||
264 | static int __init nojitter_setup(char *str) | |
265 | { | |
266 | nojitter = 1; | |
267 | printk("Jitter checking for ITC timers disabled\n"); | |
268 | return 1; | |
269 | } | |
270 | ||
271 | __setup("nojitter", nojitter_setup); | |
272 | ||
273 | ||
274 | void __devinit | |
275 | ia64_init_itm (void) | |
276 | { | |
277 | unsigned long platform_base_freq, itc_freq; | |
278 | struct pal_freq_ratio itc_ratio, proc_ratio; | |
279 | long status, platform_base_drift, itc_drift; | |
280 | ||
281 | /* | |
282 | * According to SAL v2.6, we need to use a SAL call to determine the platform base | |
283 | * frequency and then a PAL call to determine the frequency ratio between the ITC | |
284 | * and the base frequency. | |
285 | */ | |
286 | status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM, | |
287 | &platform_base_freq, &platform_base_drift); | |
288 | if (status != 0) { | |
289 | printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", ia64_sal_strerror(status)); | |
290 | } else { | |
291 | status = ia64_pal_freq_ratios(&proc_ratio, NULL, &itc_ratio); | |
292 | if (status != 0) | |
293 | printk(KERN_ERR "PAL_FREQ_RATIOS failed with status=%ld\n", status); | |
294 | } | |
295 | if (status != 0) { | |
296 | /* invent "random" values */ | |
297 | printk(KERN_ERR | |
298 | "SAL/PAL failed to obtain frequency info---inventing reasonable values\n"); | |
299 | platform_base_freq = 100000000; | |
300 | platform_base_drift = -1; /* no drift info */ | |
301 | itc_ratio.num = 3; | |
302 | itc_ratio.den = 1; | |
303 | } | |
304 | if (platform_base_freq < 40000000) { | |
305 | printk(KERN_ERR "Platform base frequency %lu bogus---resetting to 75MHz!\n", | |
306 | platform_base_freq); | |
307 | platform_base_freq = 75000000; | |
308 | platform_base_drift = -1; | |
309 | } | |
310 | if (!proc_ratio.den) | |
311 | proc_ratio.den = 1; /* avoid division by zero */ | |
312 | if (!itc_ratio.den) | |
313 | itc_ratio.den = 1; /* avoid division by zero */ | |
314 | ||
315 | itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den; | |
316 | ||
317 | local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ; | |
2ab9391d | 318 | printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, " |
1da177e4 LT |
319 | "ITC freq=%lu.%03luMHz", smp_processor_id(), |
320 | platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000, | |
321 | itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000); | |
322 | ||
323 | if (platform_base_drift != -1) { | |
324 | itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den; | |
325 | printk("+/-%ldppm\n", itc_drift); | |
326 | } else { | |
327 | itc_drift = -1; | |
328 | printk("\n"); | |
329 | } | |
330 | ||
331 | local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den; | |
332 | local_cpu_data->itc_freq = itc_freq; | |
333 | local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC; | |
334 | local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT) | |
335 | + itc_freq/2)/itc_freq; | |
336 | ||
337 | if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) { | |
1da177e4 LT |
338 | #ifdef CONFIG_SMP |
339 | /* On IA64 in an SMP configuration ITCs are never accurately synchronized. | |
340 | * Jitter compensation requires a cmpxchg which may limit | |
341 | * the scalability of the syscalls for retrieving time. | |
342 | * The ITC synchronization is usually successful to within a few | |
343 | * ITC ticks but this is not a sure thing. If you need to improve | |
344 | * timer performance in SMP situations then boot the kernel with the | |
345 | * "nojitter" option. However, doing so may result in time fluctuating (maybe | |
346 | * even going backward) if the ITC offsets between the individual CPUs | |
347 | * are too large. | |
348 | */ | |
0aa366f3 TL |
349 | if (!nojitter) |
350 | itc_jitter_data.itc_jitter = 1; | |
1da177e4 | 351 | #endif |
b718f91c CL |
352 | } else |
353 | /* | |
354 | * ITC is drifty and we have not synchronized the ITCs in smpboot.c. | |
355 | * ITC values may fluctuate significantly between processors. | |
356 | * Clock should not be used for hrtimers. Mark itc as only | |
357 | * useful for boot and testing. | |
358 | * | |
359 | * Note that jitter compensation is off! There is no point of | |
360 | * synchronizing ITCs since they may be large differentials | |
361 | * that change over time. | |
362 | * | |
363 | * The only way to fix this would be to repeatedly sync the | |
364 | * ITCs. Until that time we have to avoid ITC. | |
365 | */ | |
366 | clocksource_itc.rating = 50; | |
1da177e4 | 367 | |
00d21d82 IY |
368 | paravirt_init_missing_ticks_accounting(smp_processor_id()); |
369 | ||
370 | /* avoid softlock up message when cpu is unplug and plugged again. */ | |
371 | touch_softlockup_watchdog(); | |
372 | ||
1da177e4 LT |
373 | /* Setup the CPU local timer tick */ |
374 | ia64_cpu_local_tick(); | |
0aa366f3 TL |
375 | |
376 | if (!itc_clocksource) { | |
377 | /* Sort out mult/shift values: */ | |
378 | clocksource_itc.mult = | |
379 | clocksource_hz2mult(local_cpu_data->itc_freq, | |
380 | clocksource_itc.shift); | |
381 | clocksource_register(&clocksource_itc); | |
382 | itc_clocksource = &clocksource_itc; | |
383 | } | |
1da177e4 LT |
384 | } |
385 | ||
8e19608e | 386 | static cycle_t itc_get_cycles(struct clocksource *cs) |
0aa366f3 | 387 | { |
e088a4ad | 388 | unsigned long lcycle, now, ret; |
0aa366f3 TL |
389 | |
390 | if (!itc_jitter_data.itc_jitter) | |
391 | return get_cycles(); | |
392 | ||
393 | lcycle = itc_jitter_data.itc_lastcycle; | |
394 | now = get_cycles(); | |
395 | if (lcycle && time_after(lcycle, now)) | |
396 | return lcycle; | |
397 | ||
398 | /* | |
399 | * Keep track of the last timer value returned. | |
400 | * In an SMP environment, you could lose out in contention of | |
401 | * cmpxchg. If so, your cmpxchg returns new value which the | |
402 | * winner of contention updated to. Use the new value instead. | |
403 | */ | |
404 | ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now); | |
405 | if (unlikely(ret != lcycle)) | |
406 | return ret; | |
407 | ||
408 | return now; | |
409 | } | |
410 | ||
411 | ||
1da177e4 LT |
412 | static struct irqaction timer_irqaction = { |
413 | .handler = timer_interrupt, | |
d217c265 | 414 | .flags = IRQF_DISABLED | IRQF_IRQPOLL, |
1da177e4 LT |
415 | .name = "timer" |
416 | }; | |
417 | ||
5e3fd9e5 | 418 | static struct platform_device rtc_efi_dev = { |
419 | .name = "rtc-efi", | |
420 | .id = -1, | |
421 | }; | |
422 | ||
423 | static int __init rtc_init(void) | |
424 | { | |
425 | if (platform_device_register(&rtc_efi_dev) < 0) | |
426 | printk(KERN_ERR "unable to register rtc device...\n"); | |
427 | ||
428 | /* not necessarily an error */ | |
429 | return 0; | |
430 | } | |
431 | module_init(rtc_init); | |
432 | ||
6ffdc577 JS |
433 | void read_persistent_clock(struct timespec *ts) |
434 | { | |
435 | efi_gettimeofday(ts); | |
436 | } | |
437 | ||
1da177e4 LT |
438 | void __init |
439 | time_init (void) | |
440 | { | |
441 | register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction); | |
1da177e4 | 442 | ia64_init_itm(); |
1da177e4 | 443 | } |
f5899b5d | 444 | |
defbb2c9 | 445 | /* |
446 | * Generic udelay assumes that if preemption is allowed and the thread | |
447 | * migrates to another CPU, that the ITC values are synchronized across | |
448 | * all CPUs. | |
449 | */ | |
450 | static void | |
451 | ia64_itc_udelay (unsigned long usecs) | |
f5899b5d | 452 | { |
defbb2c9 | 453 | unsigned long start = ia64_get_itc(); |
454 | unsigned long end = start + usecs*local_cpu_data->cyc_per_usec; | |
f5899b5d | 455 | |
defbb2c9 | 456 | while (time_before(ia64_get_itc(), end)) |
457 | cpu_relax(); | |
458 | } | |
f5899b5d | 459 | |
defbb2c9 | 460 | void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay; |
f5899b5d | 461 | |
defbb2c9 | 462 | void |
463 | udelay (unsigned long usecs) | |
464 | { | |
465 | (*ia64_udelay)(usecs); | |
f5899b5d JH |
466 | } |
467 | EXPORT_SYMBOL(udelay); | |
d6e56a2a | 468 | |
2c622148 TB |
469 | /* IA64 doesn't cache the timezone */ |
470 | void update_vsyscall_tz(void) | |
471 | { | |
472 | } | |
473 | ||
0696b711 | 474 | void update_vsyscall(struct timespec *wall, struct clocksource *c, u32 mult) |
0aa366f3 TL |
475 | { |
476 | unsigned long flags; | |
477 | ||
478 | write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags); | |
479 | ||
480 | /* copy fsyscall clock data */ | |
481 | fsyscall_gtod_data.clk_mask = c->mask; | |
0696b711 | 482 | fsyscall_gtod_data.clk_mult = mult; |
0aa366f3 TL |
483 | fsyscall_gtod_data.clk_shift = c->shift; |
484 | fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio; | |
485 | fsyscall_gtod_data.clk_cycle_last = c->cycle_last; | |
486 | ||
487 | /* copy kernel time structures */ | |
488 | fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec; | |
489 | fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec; | |
490 | fsyscall_gtod_data.monotonic_time.tv_sec = wall_to_monotonic.tv_sec | |
491 | + wall->tv_sec; | |
492 | fsyscall_gtod_data.monotonic_time.tv_nsec = wall_to_monotonic.tv_nsec | |
493 | + wall->tv_nsec; | |
494 | ||
495 | /* normalize */ | |
496 | while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) { | |
497 | fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC; | |
498 | fsyscall_gtod_data.monotonic_time.tv_sec++; | |
499 | } | |
500 | ||
501 | write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags); | |
502 | } | |
503 |