Merge remote-tracking branch 'asoc/fix/intel' into asoc-linus
[deliverable/linux.git] / drivers / clocksource / timer-atlas7.c
1 /*
2 * System timer for CSR SiRFprimaII
3 *
4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
5 *
6 * Licensed under GPLv2 or later.
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/interrupt.h>
11 #include <linux/clockchips.h>
12 #include <linux/clocksource.h>
13 #include <linux/cpu.h>
14 #include <linux/bitops.h>
15 #include <linux/irq.h>
16 #include <linux/clk.h>
17 #include <linux/slab.h>
18 #include <linux/of.h>
19 #include <linux/of_irq.h>
20 #include <linux/of_address.h>
21 #include <linux/sched_clock.h>
22
23 #define SIRFSOC_TIMER_32COUNTER_0_CTRL 0x0000
24 #define SIRFSOC_TIMER_32COUNTER_1_CTRL 0x0004
25 #define SIRFSOC_TIMER_MATCH_0 0x0018
26 #define SIRFSOC_TIMER_MATCH_1 0x001c
27 #define SIRFSOC_TIMER_COUNTER_0 0x0048
28 #define SIRFSOC_TIMER_COUNTER_1 0x004c
29 #define SIRFSOC_TIMER_INTR_STATUS 0x0060
30 #define SIRFSOC_TIMER_WATCHDOG_EN 0x0064
31 #define SIRFSOC_TIMER_64COUNTER_CTRL 0x0068
32 #define SIRFSOC_TIMER_64COUNTER_LO 0x006c
33 #define SIRFSOC_TIMER_64COUNTER_HI 0x0070
34 #define SIRFSOC_TIMER_64COUNTER_LOAD_LO 0x0074
35 #define SIRFSOC_TIMER_64COUNTER_LOAD_HI 0x0078
36 #define SIRFSOC_TIMER_64COUNTER_RLATCHED_LO 0x007c
37 #define SIRFSOC_TIMER_64COUNTER_RLATCHED_HI 0x0080
38
39 #define SIRFSOC_TIMER_REG_CNT 6
40
41 static unsigned long atlas7_timer_rate;
42
43 static const u32 sirfsoc_timer_reg_list[SIRFSOC_TIMER_REG_CNT] = {
44 SIRFSOC_TIMER_WATCHDOG_EN,
45 SIRFSOC_TIMER_32COUNTER_0_CTRL,
46 SIRFSOC_TIMER_32COUNTER_1_CTRL,
47 SIRFSOC_TIMER_64COUNTER_CTRL,
48 SIRFSOC_TIMER_64COUNTER_RLATCHED_LO,
49 SIRFSOC_TIMER_64COUNTER_RLATCHED_HI,
50 };
51
52 static u32 sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT];
53
54 static void __iomem *sirfsoc_timer_base;
55
56 /* disable count and interrupt */
57 static inline void sirfsoc_timer_count_disable(int idx)
58 {
59 writel_relaxed(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL + 4 * idx) & ~0x7,
60 sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL + 4 * idx);
61 }
62
63 /* enable count and interrupt */
64 static inline void sirfsoc_timer_count_enable(int idx)
65 {
66 writel_relaxed(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL + 4 * idx) | 0x3,
67 sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL + 4 * idx);
68 }
69
70 /* timer interrupt handler */
71 static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id)
72 {
73 struct clock_event_device *ce = dev_id;
74 int cpu = smp_processor_id();
75
76 /* clear timer interrupt */
77 writel_relaxed(BIT(cpu), sirfsoc_timer_base + SIRFSOC_TIMER_INTR_STATUS);
78
79 if (ce->mode == CLOCK_EVT_MODE_ONESHOT)
80 sirfsoc_timer_count_disable(cpu);
81
82 ce->event_handler(ce);
83
84 return IRQ_HANDLED;
85 }
86
87 /* read 64-bit timer counter */
88 static cycle_t sirfsoc_timer_read(struct clocksource *cs)
89 {
90 u64 cycles;
91
92 writel_relaxed((readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL) |
93 BIT(0)) & ~BIT(1), sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL);
94
95 cycles = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_RLATCHED_HI);
96 cycles = (cycles << 32) | readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_RLATCHED_LO);
97
98 return cycles;
99 }
100
101 static int sirfsoc_timer_set_next_event(unsigned long delta,
102 struct clock_event_device *ce)
103 {
104 int cpu = smp_processor_id();
105
106 /* disable timer first, then modify the related registers */
107 sirfsoc_timer_count_disable(cpu);
108
109 writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_0 +
110 4 * cpu);
111 writel_relaxed(delta, sirfsoc_timer_base + SIRFSOC_TIMER_MATCH_0 +
112 4 * cpu);
113
114 /* enable the tick */
115 sirfsoc_timer_count_enable(cpu);
116
117 return 0;
118 }
119
120 static void sirfsoc_timer_set_mode(enum clock_event_mode mode,
121 struct clock_event_device *ce)
122 {
123 switch (mode) {
124 case CLOCK_EVT_MODE_ONESHOT:
125 /* enable in set_next_event */
126 break;
127 default:
128 break;
129 }
130
131 sirfsoc_timer_count_disable(smp_processor_id());
132 }
133
134 static void sirfsoc_clocksource_suspend(struct clocksource *cs)
135 {
136 int i;
137
138 for (i = 0; i < SIRFSOC_TIMER_REG_CNT; i++)
139 sirfsoc_timer_reg_val[i] = readl_relaxed(sirfsoc_timer_base + sirfsoc_timer_reg_list[i]);
140 }
141
142 static void sirfsoc_clocksource_resume(struct clocksource *cs)
143 {
144 int i;
145
146 for (i = 0; i < SIRFSOC_TIMER_REG_CNT - 2; i++)
147 writel_relaxed(sirfsoc_timer_reg_val[i], sirfsoc_timer_base + sirfsoc_timer_reg_list[i]);
148
149 writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 2],
150 sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_LOAD_LO);
151 writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 1],
152 sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_LOAD_HI);
153
154 writel_relaxed(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL) |
155 BIT(1) | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL);
156 }
157
158 static struct clock_event_device __percpu *sirfsoc_clockevent;
159
160 static struct clocksource sirfsoc_clocksource = {
161 .name = "sirfsoc_clocksource",
162 .rating = 200,
163 .mask = CLOCKSOURCE_MASK(64),
164 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
165 .read = sirfsoc_timer_read,
166 .suspend = sirfsoc_clocksource_suspend,
167 .resume = sirfsoc_clocksource_resume,
168 };
169
170 static struct irqaction sirfsoc_timer_irq = {
171 .name = "sirfsoc_timer0",
172 .flags = IRQF_TIMER | IRQF_NOBALANCING,
173 .handler = sirfsoc_timer_interrupt,
174 };
175
176 static struct irqaction sirfsoc_timer1_irq = {
177 .name = "sirfsoc_timer1",
178 .flags = IRQF_TIMER | IRQF_NOBALANCING,
179 .handler = sirfsoc_timer_interrupt,
180 };
181
182 static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
183 {
184 int cpu = smp_processor_id();
185 struct irqaction *action;
186
187 if (cpu == 0)
188 action = &sirfsoc_timer_irq;
189 else
190 action = &sirfsoc_timer1_irq;
191
192 ce->irq = action->irq;
193 ce->name = "local_timer";
194 ce->features = CLOCK_EVT_FEAT_ONESHOT;
195 ce->rating = 200;
196 ce->set_mode = sirfsoc_timer_set_mode;
197 ce->set_next_event = sirfsoc_timer_set_next_event;
198 clockevents_calc_mult_shift(ce, atlas7_timer_rate, 60);
199 ce->max_delta_ns = clockevent_delta2ns(-2, ce);
200 ce->min_delta_ns = clockevent_delta2ns(2, ce);
201 ce->cpumask = cpumask_of(cpu);
202
203 action->dev_id = ce;
204 BUG_ON(setup_irq(ce->irq, action));
205 irq_force_affinity(action->irq, cpumask_of(cpu));
206
207 clockevents_register_device(ce);
208 return 0;
209 }
210
211 static void sirfsoc_local_timer_stop(struct clock_event_device *ce)
212 {
213 int cpu = smp_processor_id();
214
215 sirfsoc_timer_count_disable(1);
216
217 if (cpu == 0)
218 remove_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq);
219 else
220 remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq);
221 }
222
223 static int sirfsoc_cpu_notify(struct notifier_block *self,
224 unsigned long action, void *hcpu)
225 {
226 /*
227 * Grab cpu pointer in each case to avoid spurious
228 * preemptible warnings
229 */
230 switch (action & ~CPU_TASKS_FROZEN) {
231 case CPU_STARTING:
232 sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent));
233 break;
234 case CPU_DYING:
235 sirfsoc_local_timer_stop(this_cpu_ptr(sirfsoc_clockevent));
236 break;
237 }
238
239 return NOTIFY_OK;
240 }
241
242 static struct notifier_block sirfsoc_cpu_nb = {
243 .notifier_call = sirfsoc_cpu_notify,
244 };
245
246 static void __init sirfsoc_clockevent_init(void)
247 {
248 sirfsoc_clockevent = alloc_percpu(struct clock_event_device);
249 BUG_ON(!sirfsoc_clockevent);
250
251 BUG_ON(register_cpu_notifier(&sirfsoc_cpu_nb));
252
253 /* Immediately configure the timer on the boot CPU */
254 sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent));
255 }
256
257 /* initialize the kernel jiffy timer source */
258 static void __init sirfsoc_atlas7_timer_init(struct device_node *np)
259 {
260 struct clk *clk;
261
262 clk = of_clk_get(np, 0);
263 BUG_ON(IS_ERR(clk));
264
265 BUG_ON(clk_prepare_enable(clk));
266
267 atlas7_timer_rate = clk_get_rate(clk);
268
269 /* timer dividers: 0, not divided */
270 writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL);
271 writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_0_CTRL);
272 writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_32COUNTER_1_CTRL);
273
274 /* Initialize timer counters to 0 */
275 writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_LOAD_LO);
276 writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_LOAD_HI);
277 writel_relaxed(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL) |
278 BIT(1) | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_64COUNTER_CTRL);
279 writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_0);
280 writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_1);
281
282 /* Clear all interrupts */
283 writel_relaxed(0xFFFF, sirfsoc_timer_base + SIRFSOC_TIMER_INTR_STATUS);
284
285 BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, atlas7_timer_rate));
286
287 sirfsoc_clockevent_init();
288 }
289
290 static void __init sirfsoc_of_timer_init(struct device_node *np)
291 {
292 sirfsoc_timer_base = of_iomap(np, 0);
293 if (!sirfsoc_timer_base)
294 panic("unable to map timer cpu registers\n");
295
296 sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0);
297 if (!sirfsoc_timer_irq.irq)
298 panic("No irq passed for timer0 via DT\n");
299
300 sirfsoc_timer1_irq.irq = irq_of_parse_and_map(np, 1);
301 if (!sirfsoc_timer1_irq.irq)
302 panic("No irq passed for timer1 via DT\n");
303
304 sirfsoc_atlas7_timer_init(np);
305 }
306 CLOCKSOURCE_OF_DECLARE(sirfsoc_atlas7_timer, "sirf,atlas7-tick", sirfsoc_of_timer_init);
This page took 0.03872 seconds and 6 git commands to generate.