Merge tag 'pwm/for-4.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/thierry...
[deliverable/linux.git] / arch / parisc / kernel / time.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/parisc/kernel/time.c
3 *
4 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
5 * Modifications for ARM (C) 1994, 1995, 1996,1997 Russell King
6 * Copyright (C) 1999 SuSE GmbH, (Philipp Rumpf, prumpf@tux.org)
7 *
8 * 1994-07-02 Alan Modra
9 * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
10 * 1998-12-20 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 */
1da177e4
LT
13#include <linux/errno.h>
14#include <linux/module.h>
ca6da801 15#include <linux/rtc.h>
1da177e4
LT
16#include <linux/sched.h>
17#include <linux/kernel.h>
18#include <linux/param.h>
19#include <linux/string.h>
20#include <linux/mm.h>
21#include <linux/interrupt.h>
22#include <linux/time.h>
23#include <linux/init.h>
24#include <linux/smp.h>
25#include <linux/profile.h>
12df29b6 26#include <linux/clocksource.h>
9eb16864 27#include <linux/platform_device.h>
d75f054a 28#include <linux/ftrace.h>
1da177e4
LT
29
30#include <asm/uaccess.h>
31#include <asm/io.h>
32#include <asm/irq.h>
4a8a0788 33#include <asm/page.h>
1da177e4
LT
34#include <asm/param.h>
35#include <asm/pdc.h>
36#include <asm/led.h>
37
38#include <linux/timex.h>
39
bed583f7 40static unsigned long clocktick __read_mostly; /* timer cycles per tick */
1da177e4 41
54b66800
HD
42#ifndef CONFIG_64BIT
43/*
44 * The processor-internal cycle counter (Control Register 16) is used as time
45 * source for the sched_clock() function. This register is 64bit wide on a
46 * 64-bit kernel and 32bit on a 32-bit kernel. Since sched_clock() always
47 * requires a 64bit counter we emulate on the 32-bit kernel the higher 32bits
48 * with a per-cpu variable which we increase every time the counter
49 * wraps-around (which happens every ~4 secounds).
50 */
51static DEFINE_PER_CPU(unsigned long, cr16_high_32_bits);
52#endif
53
1604f318
MW
54/*
55 * We keep time on PA-RISC Linux by using the Interval Timer which is
56 * a pair of registers; one is read-only and one is write-only; both
57 * accessed through CR16. The read-only register is 32 or 64 bits wide,
58 * and increments by 1 every CPU clock tick. The architecture only
59 * guarantees us a rate between 0.5 and 2, but all implementations use a
60 * rate of 1. The write-only register is 32-bits wide. When the lowest
61 * 32 bits of the read-only register compare equal to the write-only
62 * register, it raises a maskable external interrupt. Each processor has
63 * an Interval Timer of its own and they are not synchronised.
64 *
65 * We want to generate an interrupt every 1/HZ seconds. So we program
66 * CR16 to interrupt every @clocktick cycles. The it_value in cpu_data
67 * is programmed with the intended time of the next tick. We can be
68 * held off for an arbitrarily long period of time by interrupts being
69 * disabled, so we may miss one or more ticks.
70 */
d75f054a 71irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
1da177e4 72{
84be31be 73 unsigned long now, now2;
bed583f7 74 unsigned long next_tick;
84be31be 75 unsigned long cycles_elapsed, ticks_elapsed = 1;
6e5dc42b
GG
76 unsigned long cycles_remainder;
77 unsigned int cpu = smp_processor_id();
ef017beb 78 struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu);
1da177e4 79
6b799d92 80 /* gcc can optimize for "read-only" case with a local clocktick */
6e5dc42b 81 unsigned long cpt = clocktick;
6b799d92 82
be577a52 83 profile_tick(CPU_PROFILING);
1da177e4 84
bed583f7 85 /* Initialize next_tick to the expected tick time. */
c7753f18 86 next_tick = cpuinfo->it_value;
1da177e4 87
84be31be 88 /* Get current cycle counter (Control Register 16). */
bed583f7 89 now = mfctl(16);
1da177e4 90
bed583f7
GG
91 cycles_elapsed = now - next_tick;
92
84be31be 93 if ((cycles_elapsed >> 6) < cpt) {
6e5dc42b
GG
94 /* use "cheap" math (add/subtract) instead
95 * of the more expensive div/mul method
bed583f7 96 */
6b799d92 97 cycles_remainder = cycles_elapsed;
6e5dc42b
GG
98 while (cycles_remainder > cpt) {
99 cycles_remainder -= cpt;
1604f318 100 ticks_elapsed++;
6e5dc42b 101 }
6b799d92 102 } else {
84be31be 103 /* TODO: Reduce this to one fdiv op */
6e5dc42b 104 cycles_remainder = cycles_elapsed % cpt;
84be31be 105 ticks_elapsed += cycles_elapsed / cpt;
bed583f7
GG
106 }
107
6e5dc42b
GG
108 /* convert from "division remainder" to "remainder of clock tick" */
109 cycles_remainder = cpt - cycles_remainder;
bed583f7
GG
110
111 /* Determine when (in CR16 cycles) next IT interrupt will fire.
112 * We want IT to fire modulo clocktick even if we miss/skip some.
113 * But those interrupts don't in fact get delivered that regularly.
114 */
6e5dc42b
GG
115 next_tick = now + cycles_remainder;
116
c7753f18 117 cpuinfo->it_value = next_tick;
6b799d92 118
84be31be
GG
119 /* Program the IT when to deliver the next interrupt.
120 * Only bottom 32-bits of next_tick are writable in CR16!
6e5dc42b 121 */
6b799d92 122 mtctl(next_tick, 16);
1da177e4 123
54b66800
HD
124#if !defined(CONFIG_64BIT)
125 /* check for overflow on a 32bit kernel (every ~4 seconds). */
126 if (unlikely(next_tick < now))
127 this_cpu_inc(cr16_high_32_bits);
128#endif
129
84be31be
GG
130 /* Skip one clocktick on purpose if we missed next_tick.
131 * The new CR16 must be "later" than current CR16 otherwise
132 * itimer would not fire until CR16 wrapped - e.g 4 seconds
133 * later on a 1Ghz processor. We'll account for the missed
134 * tick on the next timer interrupt.
135 *
136 * "next_tick - now" will always give the difference regardless
137 * if one or the other wrapped. If "now" is "bigger" we'll end up
138 * with a very large unsigned number.
139 */
140 now2 = mfctl(16);
141 if (next_tick - now2 > cpt)
142 mtctl(next_tick+cpt, 16);
143
144#if 1
145/*
146 * GGG: DEBUG code for how many cycles programming CR16 used.
147 */
148 if (unlikely(now2 - now > 0x3000)) /* 12K cycles */
149 printk (KERN_CRIT "timer_interrupt(CPU %d): SLOW! 0x%lx cycles!"
150 " cyc %lX rem %lX "
151 " next/now %lX/%lX\n",
152 cpu, now2 - now, cycles_elapsed, cycles_remainder,
153 next_tick, now );
154#endif
155
156 /* Can we differentiate between "early CR16" (aka Scenario 1) and
157 * "long delay" (aka Scenario 3)? I don't think so.
158 *
159 * Timer_interrupt will be delivered at least a few hundred cycles
160 * after the IT fires. But it's arbitrary how much time passes
161 * before we call it "late". I've picked one second.
162 *
163 * It's important NO printk's are between reading CR16 and
164 * setting up the next value. May introduce huge variance.
165 */
166 if (unlikely(ticks_elapsed > HZ)) {
167 /* Scenario 3: very long delay? bad in any case */
168 printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!"
169 " cycles %lX rem %lX "
170 " next/now %lX/%lX\n",
171 cpu,
172 cycles_elapsed, cycles_remainder,
173 next_tick, now );
174 }
6e5dc42b
GG
175
176 /* Done mucking with unreliable delivery of interrupts.
177 * Go do system house keeping.
bed583f7 178 */
c7753f18
MW
179
180 if (!--cpuinfo->prof_counter) {
181 cpuinfo->prof_counter = cpuinfo->prof_multiplier;
182 update_process_times(user_mode(get_irq_regs()));
183 }
184
bb1dfc1c
TH
185 if (cpu == 0)
186 xtime_update(ticks_elapsed);
6e5dc42b 187
1da177e4
LT
188 return IRQ_HANDLED;
189}
190
5cd55b0e
RC
191
192unsigned long profile_pc(struct pt_regs *regs)
193{
194 unsigned long pc = instruction_pointer(regs);
195
196 if (regs->gr[0] & PSW_N)
197 pc -= 4;
198
199#ifdef CONFIG_SMP
200 if (in_lock_functions(pc))
201 pc = regs->gr[2];
202#endif
203
204 return pc;
205}
206EXPORT_SYMBOL(profile_pc);
207
208
12df29b6 209/* clock source code */
1da177e4 210
ebc30a0f 211static cycle_t read_cr16(struct clocksource *cs)
1da177e4 212{
12df29b6 213 return get_cycles();
1da177e4 214}
bed583f7 215
12df29b6
HD
216static struct clocksource clocksource_cr16 = {
217 .name = "cr16",
218 .rating = 300,
219 .read = read_cr16,
220 .mask = CLOCKSOURCE_MASK(BITS_PER_LONG),
87c81747 221 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
12df29b6 222};
bed583f7 223
b2a8289a 224int update_cr16_clocksource(void)
1da177e4 225{
7022672e 226 /* since the cr16 cycle counters are not synchronized across CPUs,
324c7e65
HD
227 we'll check if we should switch to a safe clocksource: */
228 if (clocksource_cr16.rating != 0 && num_online_cpus() > 1) {
00d1f3c3 229 clocksource_change_rating(&clocksource_cr16, 0);
730e844d 230 return 1;
1da177e4
LT
231 }
232
730e844d 233 return 0;
1da177e4 234}
1da177e4 235
56f335c8
GG
236void __init start_cpu_itimer(void)
237{
238 unsigned int cpu = smp_processor_id();
239 unsigned long next_tick = mfctl(16) + clocktick;
240
54b66800
HD
241#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT)
242 /* With multiple 64bit CPUs online, the cr16's are not syncronized. */
243 if (cpu != 0)
244 clear_sched_clock_stable();
245#endif
246
56f335c8
GG
247 mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
248
ef017beb 249 per_cpu(cpu_data, cpu).it_value = next_tick;
56f335c8
GG
250}
251
ca6da801
AB
252#if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
253static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
254{
255 struct pdc_tod tod_data;
256
257 memset(tm, 0, sizeof(*tm));
258 if (pdc_tod_read(&tod_data) < 0)
259 return -EOPNOTSUPP;
260
261 /* we treat tod_sec as unsigned, so this can work until year 2106 */
262 rtc_time64_to_tm(tod_data.tod_sec, tm);
263 return rtc_valid_tm(tm);
264}
265
266static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
267{
268 time64_t secs = rtc_tm_to_time64(tm);
269
270 if (pdc_tod_set(secs, 0) < 0)
271 return -EOPNOTSUPP;
272
273 return 0;
274}
275
276static const struct rtc_class_ops rtc_generic_ops = {
277 .read_time = rtc_generic_get_time,
278 .set_time = rtc_generic_set_time,
279};
280
9eb16864
KM
281static int __init rtc_init(void)
282{
6dc0dcde 283 struct platform_device *pdev;
9eb16864 284
ca6da801
AB
285 pdev = platform_device_register_data(NULL, "rtc-generic", -1,
286 &rtc_generic_ops,
287 sizeof(rtc_generic_ops));
288
6dc0dcde 289 return PTR_ERR_OR_ZERO(pdev);
9eb16864 290}
6dc0dcde 291device_initcall(rtc_init);
ca6da801 292#endif
9eb16864 293
c6018524 294void read_persistent_clock(struct timespec *ts)
1da177e4 295{
1da177e4 296 static struct pdc_tod tod_data;
c6018524 297 if (pdc_tod_read(&tod_data) == 0) {
298 ts->tv_sec = tod_data.tod_sec;
299 ts->tv_nsec = tod_data.tod_usec * 1000;
300 } else {
301 printk(KERN_ERR "Error reading tod clock\n");
302 ts->tv_sec = 0;
303 ts->tv_nsec = 0;
304 }
305}
306
54b66800
HD
307
308/*
309 * sched_clock() framework
310 */
311
312static u32 cyc2ns_mul __read_mostly;
313static u32 cyc2ns_shift __read_mostly;
314
315u64 sched_clock(void)
316{
317 u64 now;
318
319 /* Get current cycle counter (Control Register 16). */
320#ifdef CONFIG_64BIT
321 now = mfctl(16);
322#else
323 now = mfctl(16) + (((u64) this_cpu_read(cr16_high_32_bits)) << 32);
324#endif
325
326 /* return the value in ns (cycles_2_ns) */
327 return mul_u64_u32_shr(now, cyc2ns_mul, cyc2ns_shift);
328}
329
330
331/*
332 * timer interrupt and sched_clock() initialization
333 */
334
c6018524 335void __init time_init(void)
336{
12df29b6 337 unsigned long current_cr16_khz;
1da177e4 338
54b66800 339 current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */
1da177e4 340 clocktick = (100 * PAGE0->mem_10msec) / HZ;
1da177e4 341
54b66800
HD
342 /* calculate mult/shift values for cr16 */
343 clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz,
344 NSEC_PER_MSEC, 0);
345
56f335c8 346 start_cpu_itimer(); /* get CPU 0 started */
1da177e4 347
12df29b6 348 /* register at clocksource framework */
63e49634 349 clocksource_register_khz(&clocksource_cr16, current_cr16_khz);
1da177e4 350}
This page took 0.684117 seconds and 5 git commands to generate.