2 * linux/arch/arm/kernel/arch_timer.c
4 * Copyright (C) 2011 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/smp.h>
16 #include <linux/cpu.h>
17 #include <linux/jiffies.h>
18 #include <linux/clockchips.h>
19 #include <linux/interrupt.h>
20 #include <linux/of_irq.h>
23 #include <asm/delay.h>
24 #include <asm/localtimer.h>
25 #include <asm/arch_timer.h>
26 #include <asm/sched_clock.h>
28 static u32 arch_timer_rate
;
38 static int arch_timer_ppi
[MAX_TIMER_PPI
];
40 static struct clock_event_device __percpu
**arch_timer_evt
;
41 static struct delay_timer arch_delay_timer
;
43 static bool arch_timer_use_virtual
= true;
46 * Architected system timer support.
49 #define ARCH_TIMER_CTRL_ENABLE (1 << 0)
50 #define ARCH_TIMER_CTRL_IT_MASK (1 << 1)
51 #define ARCH_TIMER_CTRL_IT_STAT (1 << 2)
53 #define ARCH_TIMER_REG_CTRL 0
54 #define ARCH_TIMER_REG_FREQ 1
55 #define ARCH_TIMER_REG_TVAL 2
57 #define ARCH_TIMER_PHYS_ACCESS 0
58 #define ARCH_TIMER_VIRT_ACCESS 1
61 * These register accessors are marked inline so the compiler can
62 * nicely work out which register we want, and chuck away the rest of
63 * the code. At least it does so with a recent GCC (4.6.3).
65 static inline void arch_timer_reg_write(const int access
, const int reg
, u32 val
)
67 if (access
== ARCH_TIMER_PHYS_ACCESS
) {
69 case ARCH_TIMER_REG_CTRL
:
70 asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val
));
72 case ARCH_TIMER_REG_TVAL
:
73 asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val
));
78 if (access
== ARCH_TIMER_VIRT_ACCESS
) {
80 case ARCH_TIMER_REG_CTRL
:
81 asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val
));
83 case ARCH_TIMER_REG_TVAL
:
84 asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val
));
92 static inline u32
arch_timer_reg_read(const int access
, const int reg
)
96 if (access
== ARCH_TIMER_PHYS_ACCESS
) {
98 case ARCH_TIMER_REG_CTRL
:
99 asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val
));
101 case ARCH_TIMER_REG_TVAL
:
102 asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val
));
104 case ARCH_TIMER_REG_FREQ
:
105 asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val
));
110 if (access
== ARCH_TIMER_VIRT_ACCESS
) {
112 case ARCH_TIMER_REG_CTRL
:
113 asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val
));
115 case ARCH_TIMER_REG_TVAL
:
116 asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val
));
124 static inline u64
arch_counter_get_cntpct(void)
127 asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval
));
131 static inline u64
arch_counter_get_cntvct(void)
134 asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval
));
138 static irqreturn_t
inline timer_handler(const int access
,
139 struct clock_event_device
*evt
)
142 ctrl
= arch_timer_reg_read(access
, ARCH_TIMER_REG_CTRL
);
143 if (ctrl
& ARCH_TIMER_CTRL_IT_STAT
) {
144 ctrl
|= ARCH_TIMER_CTRL_IT_MASK
;
145 arch_timer_reg_write(access
, ARCH_TIMER_REG_CTRL
, ctrl
);
146 evt
->event_handler(evt
);
153 static irqreturn_t
arch_timer_handler_virt(int irq
, void *dev_id
)
155 struct clock_event_device
*evt
= *(struct clock_event_device
**)dev_id
;
157 return timer_handler(ARCH_TIMER_VIRT_ACCESS
, evt
);
160 static irqreturn_t
arch_timer_handler_phys(int irq
, void *dev_id
)
162 struct clock_event_device
*evt
= *(struct clock_event_device
**)dev_id
;
164 return timer_handler(ARCH_TIMER_PHYS_ACCESS
, evt
);
167 static inline void timer_set_mode(const int access
, int mode
)
171 case CLOCK_EVT_MODE_UNUSED
:
172 case CLOCK_EVT_MODE_SHUTDOWN
:
173 ctrl
= arch_timer_reg_read(access
, ARCH_TIMER_REG_CTRL
);
174 ctrl
&= ~ARCH_TIMER_CTRL_ENABLE
;
175 arch_timer_reg_write(access
, ARCH_TIMER_REG_CTRL
, ctrl
);
182 static void arch_timer_set_mode_virt(enum clock_event_mode mode
,
183 struct clock_event_device
*clk
)
185 timer_set_mode(ARCH_TIMER_VIRT_ACCESS
, mode
);
188 static void arch_timer_set_mode_phys(enum clock_event_mode mode
,
189 struct clock_event_device
*clk
)
191 timer_set_mode(ARCH_TIMER_PHYS_ACCESS
, mode
);
194 static inline void set_next_event(const int access
, unsigned long evt
)
197 ctrl
= arch_timer_reg_read(access
, ARCH_TIMER_REG_CTRL
);
198 ctrl
|= ARCH_TIMER_CTRL_ENABLE
;
199 ctrl
&= ~ARCH_TIMER_CTRL_IT_MASK
;
200 arch_timer_reg_write(access
, ARCH_TIMER_REG_TVAL
, evt
);
201 arch_timer_reg_write(access
, ARCH_TIMER_REG_CTRL
, ctrl
);
204 static int arch_timer_set_next_event_virt(unsigned long evt
,
205 struct clock_event_device
*unused
)
207 set_next_event(ARCH_TIMER_VIRT_ACCESS
, evt
);
211 static int arch_timer_set_next_event_phys(unsigned long evt
,
212 struct clock_event_device
*unused
)
214 set_next_event(ARCH_TIMER_PHYS_ACCESS
, evt
);
218 static int __cpuinit
arch_timer_setup(struct clock_event_device
*clk
)
220 clk
->features
= CLOCK_EVT_FEAT_ONESHOT
| CLOCK_EVT_FEAT_C3STOP
;
221 clk
->name
= "arch_sys_timer";
223 if (arch_timer_use_virtual
) {
224 clk
->irq
= arch_timer_ppi
[VIRT_PPI
];
225 clk
->set_mode
= arch_timer_set_mode_virt
;
226 clk
->set_next_event
= arch_timer_set_next_event_virt
;
228 clk
->irq
= arch_timer_ppi
[PHYS_SECURE_PPI
];
229 clk
->set_mode
= arch_timer_set_mode_phys
;
230 clk
->set_next_event
= arch_timer_set_next_event_phys
;
233 clk
->set_mode(CLOCK_EVT_MODE_SHUTDOWN
, NULL
);
235 clockevents_config_and_register(clk
, arch_timer_rate
,
238 *__this_cpu_ptr(arch_timer_evt
) = clk
;
240 if (arch_timer_use_virtual
)
241 enable_percpu_irq(arch_timer_ppi
[VIRT_PPI
], 0);
243 enable_percpu_irq(arch_timer_ppi
[PHYS_SECURE_PPI
], 0);
244 if (arch_timer_ppi
[PHYS_NONSECURE_PPI
])
245 enable_percpu_irq(arch_timer_ppi
[PHYS_NONSECURE_PPI
], 0);
251 static int arch_timer_available(void)
255 if (arch_timer_rate
== 0) {
256 freq
= arch_timer_reg_read(ARCH_TIMER_PHYS_ACCESS
,
257 ARCH_TIMER_REG_FREQ
);
259 /* Check the timer frequency. */
261 pr_warn("Architected timer frequency not available\n");
265 arch_timer_rate
= freq
;
268 pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n",
269 (unsigned long)arch_timer_rate
/ 1000000,
270 (unsigned long)(arch_timer_rate
/ 10000) % 100,
271 arch_timer_use_virtual
? "virt" : "phys");
276 * Some external users of arch_timer_read_counter (e.g. sched_clock) may try to
277 * call it before it has been initialised. Rather than incur a performance
278 * penalty checking for initialisation, provide a default implementation that
279 * won't lead to time appearing to jump backwards.
281 static u64
arch_timer_read_zero(void)
286 u64 (*arch_timer_read_counter
)(void) = arch_timer_read_zero
;
288 static u32
arch_timer_read_counter32(void)
290 return arch_timer_read_counter();
293 static cycle_t
arch_counter_read(struct clocksource
*cs
)
295 return arch_timer_read_counter();
298 static unsigned long arch_timer_read_current_timer(void)
300 return arch_timer_read_counter();
303 static cycle_t
arch_counter_read_cc(const struct cyclecounter
*cc
)
305 return arch_timer_read_counter();
308 static struct clocksource clocksource_counter
= {
309 .name
= "arch_sys_counter",
311 .read
= arch_counter_read
,
312 .mask
= CLOCKSOURCE_MASK(56),
313 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
,
316 static struct cyclecounter cyclecounter
= {
317 .read
= arch_counter_read_cc
,
318 .mask
= CLOCKSOURCE_MASK(56),
321 static struct timecounter timecounter
;
323 struct timecounter
*arch_timer_get_timecounter(void)
328 static void __cpuinit
arch_timer_stop(struct clock_event_device
*clk
)
330 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
331 clk
->irq
, smp_processor_id());
333 if (arch_timer_use_virtual
)
334 disable_percpu_irq(arch_timer_ppi
[VIRT_PPI
]);
336 disable_percpu_irq(arch_timer_ppi
[PHYS_SECURE_PPI
]);
337 if (arch_timer_ppi
[PHYS_NONSECURE_PPI
])
338 disable_percpu_irq(arch_timer_ppi
[PHYS_NONSECURE_PPI
]);
341 clk
->set_mode(CLOCK_EVT_MODE_UNUSED
, clk
);
344 static struct local_timer_ops arch_timer_ops __cpuinitdata
= {
345 .setup
= arch_timer_setup
,
346 .stop
= arch_timer_stop
,
349 static struct clock_event_device arch_timer_global_evt
;
351 static int __init
arch_timer_register(void)
356 err
= arch_timer_available();
360 arch_timer_evt
= alloc_percpu(struct clock_event_device
*);
361 if (!arch_timer_evt
) {
366 clocksource_register_hz(&clocksource_counter
, arch_timer_rate
);
367 cyclecounter
.mult
= clocksource_counter
.mult
;
368 cyclecounter
.shift
= clocksource_counter
.shift
;
369 timecounter_init(&timecounter
, &cyclecounter
,
370 arch_counter_get_cntpct());
372 if (arch_timer_use_virtual
) {
373 ppi
= arch_timer_ppi
[VIRT_PPI
];
374 err
= request_percpu_irq(ppi
, arch_timer_handler_virt
,
375 "arch_timer", arch_timer_evt
);
377 ppi
= arch_timer_ppi
[PHYS_SECURE_PPI
];
378 err
= request_percpu_irq(ppi
, arch_timer_handler_phys
,
379 "arch_timer", arch_timer_evt
);
380 if (!err
&& arch_timer_ppi
[PHYS_NONSECURE_PPI
]) {
381 ppi
= arch_timer_ppi
[PHYS_NONSECURE_PPI
];
382 err
= request_percpu_irq(ppi
, arch_timer_handler_phys
,
383 "arch_timer", arch_timer_evt
);
385 free_percpu_irq(arch_timer_ppi
[PHYS_SECURE_PPI
],
391 pr_err("arch_timer: can't register interrupt %d (%d)\n",
396 err
= local_timer_register(&arch_timer_ops
);
399 * We couldn't register as a local timer (could be
400 * because we're on a UP platform, or because some
401 * other local timer is already present...). Try as a
402 * global timer instead.
404 arch_timer_global_evt
.cpumask
= cpumask_of(0);
405 err
= arch_timer_setup(&arch_timer_global_evt
);
410 /* Use the architected timer for the delay loop. */
411 arch_delay_timer
.read_current_timer
= &arch_timer_read_current_timer
;
412 arch_delay_timer
.freq
= arch_timer_rate
;
413 register_current_timer_delay(&arch_delay_timer
);
417 if (arch_timer_use_virtual
)
418 free_percpu_irq(arch_timer_ppi
[VIRT_PPI
], arch_timer_evt
);
420 free_percpu_irq(arch_timer_ppi
[PHYS_SECURE_PPI
],
422 if (arch_timer_ppi
[PHYS_NONSECURE_PPI
])
423 free_percpu_irq(arch_timer_ppi
[PHYS_NONSECURE_PPI
],
428 free_percpu(arch_timer_evt
);
433 static const struct of_device_id arch_timer_of_match
[] __initconst
= {
434 { .compatible
= "arm,armv7-timer", },
438 int __init
arch_timer_of_register(void)
440 struct device_node
*np
;
444 np
= of_find_matching_node(NULL
, arch_timer_of_match
);
446 pr_err("arch_timer: can't find DT node\n");
450 /* Try to determine the frequency from the device tree or CNTFRQ */
451 if (!of_property_read_u32(np
, "clock-frequency", &freq
))
452 arch_timer_rate
= freq
;
454 for (i
= PHYS_SECURE_PPI
; i
< MAX_TIMER_PPI
; i
++)
455 arch_timer_ppi
[i
] = irq_of_parse_and_map(np
, i
);
460 * If no interrupt provided for virtual timer, we'll have to
461 * stick to the physical timer. It'd better be accessible...
463 if (!arch_timer_ppi
[VIRT_PPI
]) {
464 arch_timer_use_virtual
= false;
466 if (!arch_timer_ppi
[PHYS_SECURE_PPI
] ||
467 !arch_timer_ppi
[PHYS_NONSECURE_PPI
]) {
468 pr_warn("arch_timer: No interrupt available, giving up\n");
473 if (arch_timer_use_virtual
)
474 arch_timer_read_counter
= arch_counter_get_cntvct
;
476 arch_timer_read_counter
= arch_counter_get_cntpct
;
478 return arch_timer_register();
481 int __init
arch_timer_sched_clock_init(void)
485 err
= arch_timer_available();
489 setup_sched_clock(arch_timer_read_counter32
,
490 32, arch_timer_rate
);