Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/parisc/kernel/time.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds | |
5 | * Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King | |
6 | * Copyright (C) 1999 SuSE GmbH, (Philipp Rumpf, prumpf@tux.org) | |
7 | * | |
8 | * 1994-07-02 Alan Modra | |
9 | * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime | |
10 | * 1998-12-20 Updated NTP code according to technical memorandum Jan '96 | |
11 | * "A Kernel Model for Precision Timekeeping" by Dave Mills | |
12 | */ | |
1da177e4 LT |
13 | #include <linux/errno.h> |
14 | #include <linux/module.h> | |
15 | #include <linux/sched.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/param.h> | |
18 | #include <linux/string.h> | |
19 | #include <linux/mm.h> | |
20 | #include <linux/interrupt.h> | |
21 | #include <linux/time.h> | |
22 | #include <linux/init.h> | |
23 | #include <linux/smp.h> | |
24 | #include <linux/profile.h> | |
25 | ||
26 | #include <asm/uaccess.h> | |
27 | #include <asm/io.h> | |
28 | #include <asm/irq.h> | |
29 | #include <asm/param.h> | |
30 | #include <asm/pdc.h> | |
31 | #include <asm/led.h> | |
32 | ||
33 | #include <linux/timex.h> | |
34 | ||
bed583f7 | 35 | static unsigned long clocktick __read_mostly; /* timer cycles per tick */ |
1da177e4 | 36 | |
1604f318 MW |
37 | /* |
38 | * We keep time on PA-RISC Linux by using the Interval Timer which is | |
39 | * a pair of registers; one is read-only and one is write-only; both | |
40 | * accessed through CR16. The read-only register is 32 or 64 bits wide, | |
41 | * and increments by 1 every CPU clock tick. The architecture only | |
42 | * guarantees us a rate between 0.5 and 2, but all implementations use a | |
43 | * rate of 1. The write-only register is 32-bits wide. When the lowest | |
44 | * 32 bits of the read-only register compare equal to the write-only | |
45 | * register, it raises a maskable external interrupt. Each processor has | |
46 | * an Interval Timer of its own and they are not synchronised. | |
47 | * | |
48 | * We want to generate an interrupt every 1/HZ seconds. So we program | |
49 | * CR16 to interrupt every @clocktick cycles. The it_value in cpu_data | |
50 | * is programmed with the intended time of the next tick. We can be | |
51 | * held off for an arbitrarily long period of time by interrupts being | |
52 | * disabled, so we may miss one or more ticks. | |
53 | */ | |
c7753f18 | 54 | irqreturn_t timer_interrupt(int irq, void *dev_id) |
1da177e4 | 55 | { |
bed583f7 GG |
56 | unsigned long now; |
57 | unsigned long next_tick; | |
1604f318 | 58 | unsigned long cycles_elapsed, ticks_elapsed; |
6e5dc42b GG |
59 | unsigned long cycles_remainder; |
60 | unsigned int cpu = smp_processor_id(); | |
c7753f18 | 61 | struct cpuinfo_parisc *cpuinfo = &cpu_data[cpu]; |
1da177e4 | 62 | |
6b799d92 | 63 | /* gcc can optimize for "read-only" case with a local clocktick */ |
6e5dc42b | 64 | unsigned long cpt = clocktick; |
6b799d92 | 65 | |
be577a52 | 66 | profile_tick(CPU_PROFILING); |
1da177e4 | 67 | |
bed583f7 | 68 | /* Initialize next_tick to the expected tick time. */ |
c7753f18 | 69 | next_tick = cpuinfo->it_value; |
1da177e4 | 70 | |
bed583f7 GG |
71 | /* Get current interval timer. |
72 | * CR16 reads as 64 bits in CPU wide mode. | |
73 | * CR16 reads as 32 bits in CPU narrow mode. | |
1da177e4 | 74 | */ |
bed583f7 | 75 | now = mfctl(16); |
1da177e4 | 76 | |
bed583f7 GG |
77 | cycles_elapsed = now - next_tick; |
78 | ||
6e5dc42b GG |
79 | if ((cycles_elapsed >> 5) < cpt) { |
80 | /* use "cheap" math (add/subtract) instead | |
81 | * of the more expensive div/mul method | |
bed583f7 | 82 | */ |
6b799d92 | 83 | cycles_remainder = cycles_elapsed; |
1604f318 | 84 | ticks_elapsed = 1; |
6e5dc42b GG |
85 | while (cycles_remainder > cpt) { |
86 | cycles_remainder -= cpt; | |
1604f318 | 87 | ticks_elapsed++; |
6e5dc42b | 88 | } |
6b799d92 | 89 | } else { |
6e5dc42b | 90 | cycles_remainder = cycles_elapsed % cpt; |
1604f318 | 91 | ticks_elapsed = 1 + cycles_elapsed / cpt; |
6b799d92 | 92 | } |
bed583f7 GG |
93 | |
94 | /* Can we differentiate between "early CR16" (aka Scenario 1) and | |
95 | * "long delay" (aka Scenario 3)? I don't think so. | |
96 | * | |
97 | * We expected timer_interrupt to be delivered at least a few hundred | |
98 | * cycles after the IT fires. But it's arbitrary how much time passes | |
99 | * before we call it "late". I've picked one second. | |
100 | */ | |
1604f318 | 101 | if (ticks_elapsed > HZ) { |
bed583f7 | 102 | /* Scenario 3: very long delay? bad in any case */ |
6b799d92 | 103 | printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!" |
6e5dc42b | 104 | " cycles %lX rem %lX " |
bed583f7 GG |
105 | " next/now %lX/%lX\n", |
106 | cpu, | |
6e5dc42b | 107 | cycles_elapsed, cycles_remainder, |
bed583f7 | 108 | next_tick, now ); |
bed583f7 GG |
109 | } |
110 | ||
6e5dc42b GG |
111 | /* convert from "division remainder" to "remainder of clock tick" */ |
112 | cycles_remainder = cpt - cycles_remainder; | |
bed583f7 GG |
113 | |
114 | /* Determine when (in CR16 cycles) next IT interrupt will fire. | |
115 | * We want IT to fire modulo clocktick even if we miss/skip some. | |
116 | * But those interrupts don't in fact get delivered that regularly. | |
117 | */ | |
6e5dc42b GG |
118 | next_tick = now + cycles_remainder; |
119 | ||
c7753f18 | 120 | cpuinfo->it_value = next_tick; |
6b799d92 GG |
121 | |
122 | /* Skip one clocktick on purpose if we are likely to miss next_tick. | |
6e5dc42b GG |
123 | * We want to avoid the new next_tick being less than CR16. |
124 | * If that happened, itimer wouldn't fire until CR16 wrapped. | |
125 | * We'll catch the tick we missed on the tick after that. | |
126 | */ | |
127 | if (!(cycles_remainder >> 13)) | |
128 | next_tick += cpt; | |
bed583f7 GG |
129 | |
130 | /* Program the IT when to deliver the next interrupt. */ | |
c7753f18 | 131 | /* Only bottom 32-bits of next_tick are written to cr16. */ |
6b799d92 | 132 | mtctl(next_tick, 16); |
1da177e4 | 133 | |
6e5dc42b GG |
134 | |
135 | /* Done mucking with unreliable delivery of interrupts. | |
136 | * Go do system house keeping. | |
bed583f7 | 137 | */ |
c7753f18 MW |
138 | |
139 | if (!--cpuinfo->prof_counter) { | |
140 | cpuinfo->prof_counter = cpuinfo->prof_multiplier; | |
141 | update_process_times(user_mode(get_irq_regs())); | |
142 | } | |
143 | ||
6e5dc42b GG |
144 | if (cpu == 0) { |
145 | write_seqlock(&xtime_lock); | |
1604f318 | 146 | do_timer(ticks_elapsed); |
6e5dc42b | 147 | write_sequnlock(&xtime_lock); |
1da177e4 | 148 | } |
6e5dc42b | 149 | |
1da177e4 LT |
150 | /* check soft power switch status */ |
151 | if (cpu == 0 && !atomic_read(&power_tasklet.count)) | |
152 | tasklet_schedule(&power_tasklet); | |
153 | ||
154 | return IRQ_HANDLED; | |
155 | } | |
156 | ||
5cd55b0e RC |
157 | |
158 | unsigned long profile_pc(struct pt_regs *regs) | |
159 | { | |
160 | unsigned long pc = instruction_pointer(regs); | |
161 | ||
162 | if (regs->gr[0] & PSW_N) | |
163 | pc -= 4; | |
164 | ||
165 | #ifdef CONFIG_SMP | |
166 | if (in_lock_functions(pc)) | |
167 | pc = regs->gr[2]; | |
168 | #endif | |
169 | ||
170 | return pc; | |
171 | } | |
172 | EXPORT_SYMBOL(profile_pc); | |
173 | ||
174 | ||
1da177e4 LT |
175 | /* |
176 | * Return the number of micro-seconds that elapsed since the last | |
8ef38609 | 177 | * update to wall time (aka xtime). The xtime_lock |
1da177e4 LT |
178 | * must be at least read-locked when calling this routine. |
179 | */ | |
6e5dc42b | 180 | static inline unsigned long gettimeoffset (void) |
1da177e4 LT |
181 | { |
182 | #ifndef CONFIG_SMP | |
183 | /* | |
184 | * FIXME: This won't work on smp because jiffies are updated by cpu 0. | |
185 | * Once parisc-linux learns the cr16 difference between processors, | |
186 | * this could be made to work. | |
187 | */ | |
bed583f7 GG |
188 | unsigned long now; |
189 | unsigned long prev_tick; | |
190 | unsigned long next_tick; | |
191 | unsigned long elapsed_cycles; | |
192 | unsigned long usec; | |
6b799d92 | 193 | unsigned long cpuid = smp_processor_id(); |
6e5dc42b | 194 | unsigned long cpt = clocktick; |
1da177e4 | 195 | |
6b799d92 | 196 | next_tick = cpu_data[cpuid].it_value; |
bed583f7 | 197 | now = mfctl(16); /* Read the hardware interval timer. */ |
1da177e4 | 198 | |
6e5dc42b | 199 | prev_tick = next_tick - cpt; |
bed583f7 GG |
200 | |
201 | /* Assume Scenario 1: "now" is later than prev_tick. */ | |
202 | elapsed_cycles = now - prev_tick; | |
203 | ||
6e5dc42b GG |
204 | /* aproximate HZ with shifts. Intended math is "(elapsed/clocktick) > HZ" */ |
205 | #if HZ == 1000 | |
206 | if (elapsed_cycles > (cpt << 10) ) | |
207 | #elif HZ == 250 | |
208 | if (elapsed_cycles > (cpt << 8) ) | |
209 | #elif HZ == 100 | |
210 | if (elapsed_cycles > (cpt << 7) ) | |
211 | #else | |
212 | #warn WTF is HZ set to anyway? | |
213 | if (elapsed_cycles > (HZ * cpt) ) | |
214 | #endif | |
215 | { | |
bed583f7 | 216 | /* Scenario 3: clock ticks are missing. */ |
6e5dc42b GG |
217 | printk (KERN_CRIT "gettimeoffset(CPU %ld): missing %ld ticks!" |
218 | " cycles %lX prev/now/next %lX/%lX/%lX clock %lX\n", | |
219 | cpuid, elapsed_cycles / cpt, | |
220 | elapsed_cycles, prev_tick, now, next_tick, cpt); | |
bed583f7 GG |
221 | } |
222 | ||
223 | /* FIXME: Can we improve the precision? Not with PAGE0. */ | |
224 | usec = (elapsed_cycles * 10000) / PAGE0->mem_10msec; | |
bed583f7 | 225 | return usec; |
1da177e4 LT |
226 | #else |
227 | return 0; | |
228 | #endif | |
229 | } | |
230 | ||
231 | void | |
232 | do_gettimeofday (struct timeval *tv) | |
233 | { | |
234 | unsigned long flags, seq, usec, sec; | |
235 | ||
bed583f7 | 236 | /* Hold xtime_lock and adjust timeval. */ |
1da177e4 LT |
237 | do { |
238 | seq = read_seqbegin_irqsave(&xtime_lock, flags); | |
239 | usec = gettimeoffset(); | |
240 | sec = xtime.tv_sec; | |
241 | usec += (xtime.tv_nsec / 1000); | |
242 | } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); | |
243 | ||
bed583f7 | 244 | /* Move adjusted usec's into sec's. */ |
61c34016 JB |
245 | while (usec >= USEC_PER_SEC) { |
246 | usec -= USEC_PER_SEC; | |
1da177e4 LT |
247 | ++sec; |
248 | } | |
249 | ||
bed583f7 | 250 | /* Return adjusted result. */ |
1da177e4 LT |
251 | tv->tv_sec = sec; |
252 | tv->tv_usec = usec; | |
253 | } | |
254 | ||
255 | EXPORT_SYMBOL(do_gettimeofday); | |
256 | ||
257 | int | |
258 | do_settimeofday (struct timespec *tv) | |
259 | { | |
260 | time_t wtm_sec, sec = tv->tv_sec; | |
261 | long wtm_nsec, nsec = tv->tv_nsec; | |
262 | ||
263 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | |
264 | return -EINVAL; | |
265 | ||
266 | write_seqlock_irq(&xtime_lock); | |
267 | { | |
268 | /* | |
269 | * This is revolting. We need to set "xtime" | |
270 | * correctly. However, the value in this location is | |
271 | * the value at the most recent update of wall time. | |
272 | * Discover what correction gettimeofday would have | |
273 | * done, and then undo it! | |
274 | */ | |
275 | nsec -= gettimeoffset() * 1000; | |
276 | ||
277 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | |
278 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | |
279 | ||
280 | set_normalized_timespec(&xtime, sec, nsec); | |
281 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | |
282 | ||
b149ee22 | 283 | ntp_clear(); |
1da177e4 LT |
284 | } |
285 | write_sequnlock_irq(&xtime_lock); | |
286 | clock_was_set(); | |
287 | return 0; | |
288 | } | |
289 | EXPORT_SYMBOL(do_settimeofday); | |
290 | ||
291 | /* | |
292 | * XXX: We can do better than this. | |
293 | * Returns nanoseconds | |
294 | */ | |
295 | ||
296 | unsigned long long sched_clock(void) | |
297 | { | |
298 | return (unsigned long long)jiffies * (1000000000 / HZ); | |
299 | } | |
300 | ||
301 | ||
56f335c8 GG |
302 | void __init start_cpu_itimer(void) |
303 | { | |
304 | unsigned int cpu = smp_processor_id(); | |
305 | unsigned long next_tick = mfctl(16) + clocktick; | |
306 | ||
307 | mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */ | |
308 | ||
309 | cpu_data[cpu].it_value = next_tick; | |
310 | } | |
311 | ||
1da177e4 LT |
312 | void __init time_init(void) |
313 | { | |
1da177e4 LT |
314 | static struct pdc_tod tod_data; |
315 | ||
316 | clocktick = (100 * PAGE0->mem_10msec) / HZ; | |
1da177e4 | 317 | |
56f335c8 | 318 | start_cpu_itimer(); /* get CPU 0 started */ |
1da177e4 | 319 | |
09690b18 KM |
320 | if (pdc_tod_read(&tod_data) == 0) { |
321 | unsigned long flags; | |
322 | ||
323 | write_seqlock_irqsave(&xtime_lock, flags); | |
1da177e4 LT |
324 | xtime.tv_sec = tod_data.tod_sec; |
325 | xtime.tv_nsec = tod_data.tod_usec * 1000; | |
326 | set_normalized_timespec(&wall_to_monotonic, | |
327 | -xtime.tv_sec, -xtime.tv_nsec); | |
09690b18 | 328 | write_sequnlock_irqrestore(&xtime_lock, flags); |
1da177e4 LT |
329 | } else { |
330 | printk(KERN_ERR "Error reading tod clock\n"); | |
331 | xtime.tv_sec = 0; | |
332 | xtime.tv_nsec = 0; | |
333 | } | |
334 | } | |
335 |