Commit | Line | Data |
---|---|---|
8b5f79f9 | 1 | /* |
8b5f79f9 VM |
2 | * Based on arm clockevents implementation and old bfin time tick. |
3 | * | |
96f1050d RG |
4 | * Copyright 2008-2009 Analog Devics Inc. |
5 | * 2008 GeoTechnologies | |
6 | * Vitja Makarov | |
8b5f79f9 | 7 | * |
96f1050d | 8 | * Licensed under the GPL-2 |
8b5f79f9 | 9 | */ |
96f1050d | 10 | |
8b5f79f9 VM |
11 | #include <linux/module.h> |
12 | #include <linux/profile.h> | |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/time.h> | |
764cb81c | 15 | #include <linux/timex.h> |
8b5f79f9 VM |
16 | #include <linux/irq.h> |
17 | #include <linux/clocksource.h> | |
18 | #include <linux/clockchips.h> | |
e6c91b64 | 19 | #include <linux/cpufreq.h> |
8b5f79f9 VM |
20 | |
21 | #include <asm/blackfin.h> | |
e6c91b64 | 22 | #include <asm/time.h> |
1fa9be72 | 23 | #include <asm/gptimers.h> |
8b5f79f9 | 24 | |
e6c91b64 MH |
25 | /* Accelerators for sched_clock() |
26 | * convert from cycles(64bits) => nanoseconds (64bits) | |
27 | * basic equation: | |
28 | * ns = cycles / (freq / ns_per_sec) | |
29 | * ns = cycles * (ns_per_sec / freq) | |
30 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) | |
31 | * ns = cycles * (10^6 / cpu_khz) | |
32 | * | |
33 | * Then we use scaling math (suggested by george@mvista.com) to get: | |
34 | * ns = cycles * (10^6 * SC / cpu_khz) / SC | |
35 | * ns = cycles * cyc2ns_scale / SC | |
36 | * | |
37 | * And since SC is a constant power of two, we can convert the div | |
38 | * into a shift. | |
39 | * | |
40 | * We can use khz divisor instead of mhz to keep a better precision, since | |
41 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | |
42 | * (mathieu.desnoyers@polymtl.ca) | |
43 | * | |
44 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | |
45 | */ | |
46 | ||
8b5f79f9 VM |
47 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ |
48 | ||
ceb33be9 YL |
49 | #if defined(CONFIG_CYCLES_CLOCKSOURCE) |
50 | ||
ceb33be9 | 51 | static notrace cycle_t bfin_read_cycles(struct clocksource *cs) |
8b5f79f9 | 52 | { |
1bfb4b21 | 53 | return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod); |
8b5f79f9 VM |
54 | } |
55 | ||
1fa9be72 GY |
56 | static struct clocksource bfin_cs_cycles = { |
57 | .name = "bfin_cs_cycles", | |
e78feaae | 58 | .rating = 400, |
1fa9be72 | 59 | .read = bfin_read_cycles, |
8b5f79f9 | 60 | .mask = CLOCKSOURCE_MASK(64), |
29857124 | 61 | .shift = CYC2NS_SCALE_FACTOR, |
8b5f79f9 VM |
62 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
63 | }; | |
64 | ||
ceb33be9 | 65 | static inline unsigned long long bfin_cs_cycles_sched_clock(void) |
8e19608e | 66 | { |
29857124 | 67 | return cyc2ns(&bfin_cs_cycles, bfin_read_cycles(&bfin_cs_cycles)); |
8e19608e MD |
68 | } |
69 | ||
1fa9be72 | 70 | static int __init bfin_cs_cycles_init(void) |
8b5f79f9 | 71 | { |
1fa9be72 GY |
72 | bfin_cs_cycles.mult = \ |
73 | clocksource_hz2mult(get_cclk(), bfin_cs_cycles.shift); | |
8b5f79f9 | 74 | |
1fa9be72 | 75 | if (clocksource_register(&bfin_cs_cycles)) |
8b5f79f9 VM |
76 | panic("failed to register clocksource"); |
77 | ||
78 | return 0; | |
79 | } | |
1fa9be72 GY |
80 | #else |
81 | # define bfin_cs_cycles_init() | |
82 | #endif | |
83 | ||
84 | #ifdef CONFIG_GPTMR0_CLOCKSOURCE | |
85 | ||
86 | void __init setup_gptimer0(void) | |
87 | { | |
88 | disable_gptimers(TIMER0bit); | |
89 | ||
90 | set_gptimer_config(TIMER0_id, \ | |
91 | TIMER_OUT_DIS | TIMER_PERIOD_CNT | TIMER_MODE_PWM); | |
92 | set_gptimer_period(TIMER0_id, -1); | |
93 | set_gptimer_pwidth(TIMER0_id, -2); | |
94 | SSYNC(); | |
95 | enable_gptimers(TIMER0bit); | |
96 | } | |
97 | ||
f7036d64 | 98 | static cycle_t bfin_read_gptimer0(struct clocksource *cs) |
1fa9be72 GY |
99 | { |
100 | return bfin_read_TIMER0_COUNTER(); | |
101 | } | |
102 | ||
103 | static struct clocksource bfin_cs_gptimer0 = { | |
104 | .name = "bfin_cs_gptimer0", | |
e78feaae | 105 | .rating = 350, |
1fa9be72 GY |
106 | .read = bfin_read_gptimer0, |
107 | .mask = CLOCKSOURCE_MASK(32), | |
29857124 | 108 | .shift = CYC2NS_SCALE_FACTOR, |
1fa9be72 GY |
109 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
110 | }; | |
111 | ||
ceb33be9 YL |
112 | static inline unsigned long long bfin_cs_gptimer0_sched_clock(void) |
113 | { | |
29857124 | 114 | return cyc2ns(&bfin_cs_gptimer0, bfin_read_TIMER0_COUNTER()); |
ceb33be9 YL |
115 | } |
116 | ||
1fa9be72 GY |
117 | static int __init bfin_cs_gptimer0_init(void) |
118 | { | |
119 | setup_gptimer0(); | |
8b5f79f9 | 120 | |
1fa9be72 GY |
121 | bfin_cs_gptimer0.mult = \ |
122 | clocksource_hz2mult(get_sclk(), bfin_cs_gptimer0.shift); | |
123 | ||
124 | if (clocksource_register(&bfin_cs_gptimer0)) | |
125 | panic("failed to register clocksource"); | |
126 | ||
127 | return 0; | |
128 | } | |
8b5f79f9 | 129 | #else |
1fa9be72 | 130 | # define bfin_cs_gptimer0_init() |
8b5f79f9 VM |
131 | #endif |
132 | ||
ceb33be9 YL |
133 | |
134 | #if defined(CONFIG_GPTMR0_CLOCKSOURCE) || defined(CONFIG_CYCLES_CLOCKSOURCE) | |
135 | /* prefer to use cycles since it has higher rating */ | |
136 | notrace unsigned long long sched_clock(void) | |
137 | { | |
138 | #if defined(CONFIG_CYCLES_CLOCKSOURCE) | |
139 | return bfin_cs_cycles_sched_clock(); | |
140 | #else | |
141 | return bfin_cs_gptimer0_sched_clock(); | |
142 | #endif | |
143 | } | |
144 | #endif | |
145 | ||
1fa9be72 GY |
146 | #ifdef CONFIG_CORE_TIMER_IRQ_L1 |
147 | __attribute__((l1_text)) | |
148 | #endif | |
149 | irqreturn_t timer_interrupt(int irq, void *dev_id); | |
150 | ||
151 | static int bfin_timer_set_next_event(unsigned long, \ | |
152 | struct clock_event_device *); | |
153 | ||
154 | static void bfin_timer_set_mode(enum clock_event_mode, \ | |
155 | struct clock_event_device *); | |
156 | ||
157 | static struct clock_event_device clockevent_bfin = { | |
158 | #if defined(CONFIG_TICKSOURCE_GPTMR0) | |
159 | .name = "bfin_gptimer0", | |
160 | .rating = 300, | |
161 | .irq = IRQ_TIMER0, | |
162 | #else | |
163 | .name = "bfin_core_timer", | |
164 | .rating = 350, | |
165 | .irq = IRQ_CORETMR, | |
166 | #endif | |
167 | .shift = 32, | |
168 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | |
169 | .set_next_event = bfin_timer_set_next_event, | |
170 | .set_mode = bfin_timer_set_mode, | |
171 | }; | |
172 | ||
173 | static struct irqaction bfin_timer_irq = { | |
174 | #if defined(CONFIG_TICKSOURCE_GPTMR0) | |
175 | .name = "Blackfin GPTimer0", | |
176 | #else | |
177 | .name = "Blackfin CoreTimer", | |
178 | #endif | |
179 | .flags = IRQF_DISABLED | IRQF_TIMER | \ | |
180 | IRQF_IRQPOLL | IRQF_PERCPU, | |
181 | .handler = timer_interrupt, | |
182 | .dev_id = &clockevent_bfin, | |
183 | }; | |
184 | ||
185 | #if defined(CONFIG_TICKSOURCE_GPTMR0) | |
8b5f79f9 VM |
186 | static int bfin_timer_set_next_event(unsigned long cycles, |
187 | struct clock_event_device *evt) | |
188 | { | |
1fa9be72 GY |
189 | disable_gptimers(TIMER0bit); |
190 | ||
191 | /* it starts counting three SCLK cycles after the TIMENx bit is set */ | |
192 | set_gptimer_pwidth(TIMER0_id, cycles - 3); | |
193 | enable_gptimers(TIMER0bit); | |
194 | return 0; | |
195 | } | |
196 | ||
197 | static void bfin_timer_set_mode(enum clock_event_mode mode, | |
198 | struct clock_event_device *evt) | |
199 | { | |
200 | switch (mode) { | |
201 | case CLOCK_EVT_MODE_PERIODIC: { | |
202 | set_gptimer_config(TIMER0_id, \ | |
203 | TIMER_OUT_DIS | TIMER_IRQ_ENA | \ | |
204 | TIMER_PERIOD_CNT | TIMER_MODE_PWM); | |
205 | set_gptimer_period(TIMER0_id, get_sclk() / HZ); | |
206 | set_gptimer_pwidth(TIMER0_id, get_sclk() / HZ - 1); | |
207 | enable_gptimers(TIMER0bit); | |
208 | break; | |
209 | } | |
210 | case CLOCK_EVT_MODE_ONESHOT: | |
211 | disable_gptimers(TIMER0bit); | |
212 | set_gptimer_config(TIMER0_id, \ | |
213 | TIMER_OUT_DIS | TIMER_IRQ_ENA | TIMER_MODE_PWM); | |
214 | set_gptimer_period(TIMER0_id, 0); | |
215 | break; | |
216 | case CLOCK_EVT_MODE_UNUSED: | |
217 | case CLOCK_EVT_MODE_SHUTDOWN: | |
218 | disable_gptimers(TIMER0bit); | |
219 | break; | |
220 | case CLOCK_EVT_MODE_RESUME: | |
221 | break; | |
222 | } | |
223 | } | |
224 | ||
225 | static void bfin_timer_ack(void) | |
226 | { | |
227 | set_gptimer_status(TIMER_GROUP1, TIMER_STATUS_TIMIL0); | |
228 | } | |
229 | ||
230 | static void __init bfin_timer_init(void) | |
231 | { | |
232 | disable_gptimers(TIMER0bit); | |
233 | } | |
234 | ||
235 | static unsigned long __init bfin_clockevent_check(void) | |
236 | { | |
237 | setup_irq(IRQ_TIMER0, &bfin_timer_irq); | |
238 | return get_sclk(); | |
239 | } | |
240 | ||
241 | #else /* CONFIG_TICKSOURCE_CORETMR */ | |
242 | ||
243 | static int bfin_timer_set_next_event(unsigned long cycles, | |
244 | struct clock_event_device *evt) | |
245 | { | |
246 | bfin_write_TCNTL(TMPWR); | |
247 | CSYNC(); | |
8b5f79f9 VM |
248 | bfin_write_TCOUNT(cycles); |
249 | CSYNC(); | |
1fa9be72 | 250 | bfin_write_TCNTL(TMPWR | TMREN); |
8b5f79f9 VM |
251 | return 0; |
252 | } | |
253 | ||
254 | static void bfin_timer_set_mode(enum clock_event_mode mode, | |
1fa9be72 | 255 | struct clock_event_device *evt) |
8b5f79f9 VM |
256 | { |
257 | switch (mode) { | |
258 | case CLOCK_EVT_MODE_PERIODIC: { | |
e6c91b64 | 259 | unsigned long tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1); |
8b5f79f9 VM |
260 | bfin_write_TCNTL(TMPWR); |
261 | CSYNC(); | |
1fa9be72 | 262 | bfin_write_TSCALE(TIME_SCALE - 1); |
8b5f79f9 VM |
263 | bfin_write_TPERIOD(tcount); |
264 | bfin_write_TCOUNT(tcount); | |
8b5f79f9 | 265 | CSYNC(); |
1fa9be72 | 266 | bfin_write_TCNTL(TMPWR | TMREN | TAUTORLD); |
8b5f79f9 VM |
267 | break; |
268 | } | |
269 | case CLOCK_EVT_MODE_ONESHOT: | |
1fa9be72 GY |
270 | bfin_write_TCNTL(TMPWR); |
271 | CSYNC(); | |
1bfb4b21 | 272 | bfin_write_TSCALE(TIME_SCALE - 1); |
1fa9be72 | 273 | bfin_write_TPERIOD(0); |
8b5f79f9 | 274 | bfin_write_TCOUNT(0); |
8b5f79f9 VM |
275 | break; |
276 | case CLOCK_EVT_MODE_UNUSED: | |
277 | case CLOCK_EVT_MODE_SHUTDOWN: | |
278 | bfin_write_TCNTL(0); | |
279 | CSYNC(); | |
280 | break; | |
281 | case CLOCK_EVT_MODE_RESUME: | |
282 | break; | |
283 | } | |
284 | } | |
285 | ||
1fa9be72 GY |
286 | static void bfin_timer_ack(void) |
287 | { | |
288 | } | |
289 | ||
8b5f79f9 VM |
290 | static void __init bfin_timer_init(void) |
291 | { | |
292 | /* power up the timer, but don't enable it just yet */ | |
293 | bfin_write_TCNTL(TMPWR); | |
294 | CSYNC(); | |
295 | ||
296 | /* | |
297 | * the TSCALE prescaler counter. | |
298 | */ | |
e6c91b64 | 299 | bfin_write_TSCALE(TIME_SCALE - 1); |
8b5f79f9 VM |
300 | bfin_write_TPERIOD(0); |
301 | bfin_write_TCOUNT(0); | |
302 | ||
8b5f79f9 VM |
303 | CSYNC(); |
304 | } | |
305 | ||
1fa9be72 GY |
306 | static unsigned long __init bfin_clockevent_check(void) |
307 | { | |
308 | setup_irq(IRQ_CORETMR, &bfin_timer_irq); | |
309 | return get_cclk() / TIME_SCALE; | |
310 | } | |
311 | ||
312 | void __init setup_core_timer(void) | |
313 | { | |
314 | bfin_timer_init(); | |
315 | bfin_timer_set_mode(CLOCK_EVT_MODE_PERIODIC, NULL); | |
316 | } | |
317 | #endif /* CONFIG_TICKSOURCE_GPTMR0 */ | |
318 | ||
8b5f79f9 VM |
319 | /* |
320 | * timer_interrupt() needs to keep up the real-time clock, | |
321 | * as well as call the "do_timer()" routine every clocktick | |
322 | */ | |
8b5f79f9 VM |
323 | irqreturn_t timer_interrupt(int irq, void *dev_id) |
324 | { | |
325 | struct clock_event_device *evt = dev_id; | |
1fa9be72 | 326 | smp_mb(); |
8b5f79f9 | 327 | evt->event_handler(evt); |
1fa9be72 | 328 | bfin_timer_ack(); |
8b5f79f9 VM |
329 | return IRQ_HANDLED; |
330 | } | |
331 | ||
332 | static int __init bfin_clockevent_init(void) | |
333 | { | |
1bfb4b21 VM |
334 | unsigned long timer_clk; |
335 | ||
1fa9be72 | 336 | timer_clk = bfin_clockevent_check(); |
1bfb4b21 | 337 | |
8b5f79f9 VM |
338 | bfin_timer_init(); |
339 | ||
1bfb4b21 | 340 | clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift); |
8b5f79f9 VM |
341 | clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin); |
342 | clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin); | |
320ab2b0 | 343 | clockevent_bfin.cpumask = cpumask_of(0); |
8b5f79f9 VM |
344 | clockevents_register_device(&clockevent_bfin); |
345 | ||
346 | return 0; | |
347 | } | |
348 | ||
349 | void __init time_init(void) | |
350 | { | |
351 | time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */ | |
352 | ||
353 | #ifdef CONFIG_RTC_DRV_BFIN | |
354 | /* [#2663] hack to filter junk RTC values that would cause | |
355 | * userspace to have to deal with time values greater than | |
356 | * 2^31 seconds (which uClibc cannot cope with yet) | |
357 | */ | |
358 | if ((bfin_read_RTC_STAT() & 0xC0000000) == 0xC0000000) { | |
359 | printk(KERN_NOTICE "bfin-rtc: invalid date; resetting\n"); | |
360 | bfin_write_RTC_STAT(0); | |
361 | } | |
362 | #endif | |
363 | ||
364 | /* Initialize xtime. From now on, xtime is updated with timer interrupts */ | |
365 | xtime.tv_sec = secs_since_1970; | |
366 | xtime.tv_nsec = 0; | |
367 | set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); | |
368 | ||
1fa9be72 GY |
369 | bfin_cs_cycles_init(); |
370 | bfin_cs_gptimer0_init(); | |
8b5f79f9 VM |
371 | bfin_clockevent_init(); |
372 | } |