ARM: iop: convert sched_clock() to use new infrastructure
[deliverable/linux.git] / arch / arm / plat-nomadik / timer.c
CommitLineData
28ad94ec 1/*
a0719f52 2 * linux/arch/arm/plat-nomadik/timer.c
28ad94ec
AR
3 *
4 * Copyright (C) 2008 STMicroelectronics
b102c01f 5 * Copyright (C) 2010 Alessandro Rubini
8fbb97a2 6 * Copyright (C) 2010 Linus Walleij for ST-Ericsson
28ad94ec
AR
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2, as
10 * published by the Free Software Foundation.
11 */
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/irq.h>
15#include <linux/io.h>
16#include <linux/clockchips.h>
ba327b1e 17#include <linux/clk.h>
28ad94ec 18#include <linux/jiffies.h>
ba327b1e 19#include <linux/err.h>
8fbb97a2
LW
20#include <linux/cnt32_to_63.h>
21#include <linux/timer.h>
5e06b649 22#include <linux/sched.h>
28ad94ec 23#include <asm/mach/time.h>
28ad94ec 24
59b559d7 25#include <plat/mtu.h>
28ad94ec 26
8fbb97a2 27void __iomem *mtu_base; /* Assigned by machine code */
59b559d7 28
2a847513
LW
29/*
30 * Kernel assumes that sched_clock can be called early
31 * but the MTU may not yet be initialized.
32 */
33static cycle_t nmdk_read_timer_dummy(struct clocksource *cs)
34{
35 return 0;
36}
37
b102c01f 38/* clocksource: MTU decrements, so we negate the value being read. */
28ad94ec
AR
39static cycle_t nmdk_read_timer(struct clocksource *cs)
40{
b102c01f 41 return -readl(mtu_base + MTU_VAL(0));
28ad94ec
AR
42}
43
44static struct clocksource nmdk_clksrc = {
45 .name = "mtu_0",
b102c01f 46 .rating = 200,
2a847513 47 .read = nmdk_read_timer_dummy,
b102c01f 48 .mask = CLOCKSOURCE_MASK(32),
28ad94ec
AR
49 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
50};
51
2a847513
LW
52/*
53 * Override the global weak sched_clock symbol with this
54 * local implementation which uses the clocksource to get some
8fbb97a2
LW
55 * better resolution when scheduling the kernel.
56 *
57 * Because the hardware timer period may be quite short
58 * (32.3 secs on the 133 MHz MTU timer selection on ux500)
59 * and because cnt32_to_63() needs to be called at least once per
60 * half period to work properly, a kernel keepwarm() timer is set up
61 * to ensure this requirement is always met.
62 *
63 * Also the sched_clock timer will wrap around at some point,
64 * here we set it to run continously for a year.
2a847513 65 */
8fbb97a2
LW
66#define SCHED_CLOCK_MIN_WRAP 3600*24*365
67static struct timer_list cnt32_to_63_keepwarm_timer;
68static u32 sched_mult;
69static u32 sched_shift;
70
2a847513
LW
71unsigned long long notrace sched_clock(void)
72{
8fbb97a2
LW
73 u64 cycles;
74
75 if (unlikely(!mtu_base))
76 return 0;
77
78 cycles = cnt32_to_63(-readl(mtu_base + MTU_VAL(0)));
79 /*
80 * sched_mult is guaranteed to be even so will
81 * shift out bit 63
82 */
83 return (cycles * sched_mult) >> sched_shift;
84}
85
86/* Just kick sched_clock every so often */
87static void cnt32_to_63_keepwarm(unsigned long data)
88{
89 mod_timer(&cnt32_to_63_keepwarm_timer, round_jiffies(jiffies + data));
90 (void) sched_clock();
91}
92
93/*
94 * Set up a timer to keep sched_clock():s 32_to_63 algorithm warm
95 * once in half a 32bit timer wrap interval.
96 */
97static void __init nmdk_sched_clock_init(unsigned long rate)
98{
99 u32 v;
100 unsigned long delta;
101 u64 days;
102
103 /* Find the apropriate mult and shift factors */
104 clocks_calc_mult_shift(&sched_mult, &sched_shift,
105 rate, NSEC_PER_SEC, SCHED_CLOCK_MIN_WRAP);
106 /* We need to multiply by an even number to get rid of bit 63 */
107 if (sched_mult & 1)
108 sched_mult++;
109
110 /* Let's see what we get, take max counter and scale it */
111 days = (0xFFFFFFFFFFFFFFFFLLU * sched_mult) >> sched_shift;
112 do_div(days, NSEC_PER_SEC);
113 do_div(days, (3600*24));
114
115 pr_info("sched_clock: using %d bits @ %lu Hz wrap in %lu days\n",
116 (64 - sched_shift), rate, (unsigned long) days);
117
118 /*
119 * Program a timer to kick us at half 32bit wraparound
120 * Formula: seconds per wrap = (2^32) / f
121 */
122 v = 0xFFFFFFFFUL / rate;
123 /* We want half of the wrap time to keep cnt32_to_63 warm */
124 v /= 2;
125 pr_debug("sched_clock: prescaled timer rate: %lu Hz, "
126 "initialize keepwarm timer every %d seconds\n", rate, v);
127 /* Convert seconds to jiffies */
128 delta = msecs_to_jiffies(v*1000);
129 setup_timer(&cnt32_to_63_keepwarm_timer, cnt32_to_63_keepwarm, delta);
130 mod_timer(&cnt32_to_63_keepwarm_timer, round_jiffies(jiffies + delta));
2a847513
LW
131}
132
b102c01f 133/* Clockevent device: use one-shot mode */
28ad94ec
AR
134static void nmdk_clkevt_mode(enum clock_event_mode mode,
135 struct clock_event_device *dev)
136{
b102c01f
AR
137 u32 cr;
138
28ad94ec
AR
139 switch (mode) {
140 case CLOCK_EVT_MODE_PERIODIC:
b102c01f 141 pr_err("%s: periodic mode not supported\n", __func__);
28ad94ec
AR
142 break;
143 case CLOCK_EVT_MODE_ONESHOT:
b102c01f
AR
144 /* Load highest value, enable device, enable interrupts */
145 cr = readl(mtu_base + MTU_CR(1));
146 writel(0, mtu_base + MTU_LR(1));
147 writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(1));
a0719f52 148 writel(1 << 1, mtu_base + MTU_IMSC);
b102c01f 149 break;
28ad94ec
AR
150 case CLOCK_EVT_MODE_SHUTDOWN:
151 case CLOCK_EVT_MODE_UNUSED:
b102c01f
AR
152 /* disable irq */
153 writel(0, mtu_base + MTU_IMSC);
2917947a
LW
154 /* disable timer */
155 cr = readl(mtu_base + MTU_CR(1));
156 cr &= ~MTU_CRn_ENA;
157 writel(cr, mtu_base + MTU_CR(1));
158 /* load some high default value */
159 writel(0xffffffff, mtu_base + MTU_LR(1));
28ad94ec
AR
160 break;
161 case CLOCK_EVT_MODE_RESUME:
162 break;
163 }
164}
165
b102c01f
AR
166static int nmdk_clkevt_next(unsigned long evt, struct clock_event_device *ev)
167{
168 /* writing the value has immediate effect */
169 writel(evt, mtu_base + MTU_LR(1));
170 return 0;
171}
172
28ad94ec 173static struct clock_event_device nmdk_clkevt = {
b102c01f
AR
174 .name = "mtu_1",
175 .features = CLOCK_EVT_FEAT_ONESHOT,
b102c01f 176 .rating = 200,
28ad94ec 177 .set_mode = nmdk_clkevt_mode,
b102c01f 178 .set_next_event = nmdk_clkevt_next,
28ad94ec
AR
179};
180
181/*
b102c01f 182 * IRQ Handler for timer 1 of the MTU block.
28ad94ec
AR
183 */
184static irqreturn_t nmdk_timer_interrupt(int irq, void *dev_id)
185{
b102c01f 186 struct clock_event_device *evdev = dev_id;
28ad94ec 187
b102c01f
AR
188 writel(1 << 1, mtu_base + MTU_ICR); /* Interrupt clear reg */
189 evdev->event_handler(evdev);
28ad94ec
AR
190 return IRQ_HANDLED;
191}
192
28ad94ec
AR
193static struct irqaction nmdk_timer_irq = {
194 .name = "Nomadik Timer Tick",
195 .flags = IRQF_DISABLED | IRQF_TIMER,
196 .handler = nmdk_timer_interrupt,
b102c01f 197 .dev_id = &nmdk_clkevt,
28ad94ec
AR
198};
199
59b559d7 200void __init nmdk_timer_init(void)
28ad94ec 201{
28ad94ec 202 unsigned long rate;
ba327b1e 203 struct clk *clk0;
a0719f52 204 u32 cr = MTU_CRn_32BITS;
ba327b1e
LW
205
206 clk0 = clk_get_sys("mtu0", NULL);
207 BUG_ON(IS_ERR(clk0));
208
ba327b1e 209 clk_enable(clk0);
b102c01f
AR
210
211 /*
a0719f52
LW
212 * Tick rate is 2.4MHz for Nomadik and 2.4Mhz, 100MHz or 133 MHz
213 * for ux500.
214 * Use a divide-by-16 counter if the tick rate is more than 32MHz.
215 * At 32 MHz, the timer (with 32 bit counter) can be programmed
216 * to wake-up at a max 127s a head in time. Dividing a 2.4 MHz timer
217 * with 16 gives too low timer resolution.
b102c01f 218 */
ba327b1e 219 rate = clk_get_rate(clk0);
a0719f52 220 if (rate > 32000000) {
b102c01f
AR
221 rate /= 16;
222 cr |= MTU_CRn_PRESCALE_16;
223 } else {
224 cr |= MTU_CRn_PRESCALE_1;
225 }
28ad94ec 226
b102c01f
AR
227 /* Timer 0 is the free running clocksource */
228 writel(cr, mtu_base + MTU_CR(0));
229 writel(0, mtu_base + MTU_LR(0));
230 writel(0, mtu_base + MTU_BGLR(0));
231 writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(0));
28ad94ec 232
8fbb97a2 233 /* Now the clock source is ready */
2a847513 234 nmdk_clksrc.read = nmdk_read_timer;
28ad94ec 235
8492fd28 236 if (clocksource_register_hz(&nmdk_clksrc, rate))
b102c01f
AR
237 pr_err("timer: failed to initialize clock source %s\n",
238 nmdk_clksrc.name);
239
8fbb97a2
LW
240 nmdk_sched_clock_init(rate);
241
99f76891
LW
242 /* Timer 1 is used for events */
243
2917947a
LW
244 clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
245
b102c01f 246 writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */
2917947a 247
b102c01f
AR
248 nmdk_clkevt.max_delta_ns =
249 clockevent_delta2ns(0xffffffff, &nmdk_clkevt);
250 nmdk_clkevt.min_delta_ns =
251 clockevent_delta2ns(0x00000002, &nmdk_clkevt);
252 nmdk_clkevt.cpumask = cpumask_of(0);
28ad94ec
AR
253
254 /* Register irq and clockevents */
255 setup_irq(IRQ_MTU0, &nmdk_timer_irq);
28ad94ec
AR
256 clockevents_register_device(&nmdk_clkevt);
257}
This page took 0.093191 seconds and 5 git commands to generate.