Commit | Line | Data |
---|---|---|
9570ef20 MD |
1 | /* |
2 | * SuperH Timer Support - TMU | |
3 | * | |
4 | * Copyright (C) 2009 Magnus Damm | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
9570ef20 MD |
14 | */ |
15 | ||
13931f80 LP |
16 | #include <linux/clk.h> |
17 | #include <linux/clockchips.h> | |
18 | #include <linux/clocksource.h> | |
19 | #include <linux/delay.h> | |
20 | #include <linux/err.h> | |
9570ef20 | 21 | #include <linux/init.h> |
9570ef20 | 22 | #include <linux/interrupt.h> |
9570ef20 | 23 | #include <linux/io.h> |
13931f80 | 24 | #include <linux/ioport.h> |
9570ef20 | 25 | #include <linux/irq.h> |
7deeab5d | 26 | #include <linux/module.h> |
13931f80 | 27 | #include <linux/platform_device.h> |
2ee619f9 | 28 | #include <linux/pm_domain.h> |
eaa49a8c | 29 | #include <linux/pm_runtime.h> |
13931f80 LP |
30 | #include <linux/sh_timer.h> |
31 | #include <linux/slab.h> | |
32 | #include <linux/spinlock.h> | |
9570ef20 | 33 | |
8c7f21e6 LP |
34 | enum sh_tmu_model { |
35 | SH_TMU_LEGACY, | |
36 | SH_TMU, | |
37 | SH_TMU_SH3, | |
38 | }; | |
39 | ||
0a72aa39 | 40 | struct sh_tmu_device; |
de2d12c7 LP |
41 | |
42 | struct sh_tmu_channel { | |
0a72aa39 | 43 | struct sh_tmu_device *tmu; |
fe68eb80 | 44 | unsigned int index; |
de2d12c7 | 45 | |
de693461 | 46 | void __iomem *base; |
1c56cf6b | 47 | int irq; |
de2d12c7 | 48 | |
9570ef20 MD |
49 | unsigned long rate; |
50 | unsigned long periodic; | |
51 | struct clock_event_device ced; | |
52 | struct clocksource cs; | |
eaa49a8c | 53 | bool cs_enabled; |
61a53bfa | 54 | unsigned int enable_count; |
9570ef20 MD |
55 | }; |
56 | ||
0a72aa39 | 57 | struct sh_tmu_device { |
de2d12c7 LP |
58 | struct platform_device *pdev; |
59 | ||
60 | void __iomem *mapbase; | |
61 | struct clk *clk; | |
62 | ||
8c7f21e6 LP |
63 | enum sh_tmu_model model; |
64 | ||
a5de49f4 LP |
65 | struct sh_tmu_channel *channels; |
66 | unsigned int num_channels; | |
8c7f21e6 LP |
67 | |
68 | bool has_clockevent; | |
69 | bool has_clocksource; | |
de2d12c7 LP |
70 | }; |
71 | ||
c2225a57 | 72 | static DEFINE_RAW_SPINLOCK(sh_tmu_lock); |
9570ef20 MD |
73 | |
74 | #define TSTR -1 /* shared register */ | |
75 | #define TCOR 0 /* channel register */ | |
76 | #define TCNT 1 /* channel register */ | |
77 | #define TCR 2 /* channel register */ | |
78 | ||
5cfe2d15 LP |
79 | #define TCR_UNF (1 << 8) |
80 | #define TCR_UNIE (1 << 5) | |
81 | #define TCR_TPSC_CLK4 (0 << 0) | |
82 | #define TCR_TPSC_CLK16 (1 << 0) | |
83 | #define TCR_TPSC_CLK64 (2 << 0) | |
84 | #define TCR_TPSC_CLK256 (3 << 0) | |
85 | #define TCR_TPSC_CLK1024 (4 << 0) | |
86 | #define TCR_TPSC_MASK (7 << 0) | |
87 | ||
de2d12c7 | 88 | static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr) |
9570ef20 | 89 | { |
9570ef20 MD |
90 | unsigned long offs; |
91 | ||
8c7f21e6 LP |
92 | if (reg_nr == TSTR) { |
93 | switch (ch->tmu->model) { | |
94 | case SH_TMU_LEGACY: | |
95 | return ioread8(ch->tmu->mapbase); | |
96 | case SH_TMU_SH3: | |
97 | return ioread8(ch->tmu->mapbase + 2); | |
98 | case SH_TMU: | |
99 | return ioread8(ch->tmu->mapbase + 4); | |
100 | } | |
101 | } | |
9570ef20 MD |
102 | |
103 | offs = reg_nr << 2; | |
104 | ||
105 | if (reg_nr == TCR) | |
de693461 | 106 | return ioread16(ch->base + offs); |
9570ef20 | 107 | else |
de693461 | 108 | return ioread32(ch->base + offs); |
9570ef20 MD |
109 | } |
110 | ||
de2d12c7 | 111 | static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr, |
9570ef20 MD |
112 | unsigned long value) |
113 | { | |
9570ef20 MD |
114 | unsigned long offs; |
115 | ||
116 | if (reg_nr == TSTR) { | |
8c7f21e6 LP |
117 | switch (ch->tmu->model) { |
118 | case SH_TMU_LEGACY: | |
119 | return iowrite8(value, ch->tmu->mapbase); | |
120 | case SH_TMU_SH3: | |
121 | return iowrite8(value, ch->tmu->mapbase + 2); | |
122 | case SH_TMU: | |
123 | return iowrite8(value, ch->tmu->mapbase + 4); | |
124 | } | |
9570ef20 MD |
125 | } |
126 | ||
127 | offs = reg_nr << 2; | |
128 | ||
129 | if (reg_nr == TCR) | |
de693461 | 130 | iowrite16(value, ch->base + offs); |
9570ef20 | 131 | else |
de693461 | 132 | iowrite32(value, ch->base + offs); |
9570ef20 MD |
133 | } |
134 | ||
de2d12c7 | 135 | static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start) |
9570ef20 | 136 | { |
9570ef20 MD |
137 | unsigned long flags, value; |
138 | ||
139 | /* start stop register shared by multiple timer channels */ | |
c2225a57 | 140 | raw_spin_lock_irqsave(&sh_tmu_lock, flags); |
de2d12c7 | 141 | value = sh_tmu_read(ch, TSTR); |
9570ef20 MD |
142 | |
143 | if (start) | |
fe68eb80 | 144 | value |= 1 << ch->index; |
9570ef20 | 145 | else |
fe68eb80 | 146 | value &= ~(1 << ch->index); |
9570ef20 | 147 | |
de2d12c7 | 148 | sh_tmu_write(ch, TSTR, value); |
c2225a57 | 149 | raw_spin_unlock_irqrestore(&sh_tmu_lock, flags); |
9570ef20 MD |
150 | } |
151 | ||
de2d12c7 | 152 | static int __sh_tmu_enable(struct sh_tmu_channel *ch) |
9570ef20 | 153 | { |
9570ef20 MD |
154 | int ret; |
155 | ||
d4905ce3 | 156 | /* enable clock */ |
de2d12c7 | 157 | ret = clk_enable(ch->tmu->clk); |
9570ef20 | 158 | if (ret) { |
fe68eb80 LP |
159 | dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n", |
160 | ch->index); | |
9570ef20 MD |
161 | return ret; |
162 | } | |
163 | ||
164 | /* make sure channel is disabled */ | |
de2d12c7 | 165 | sh_tmu_start_stop_ch(ch, 0); |
9570ef20 MD |
166 | |
167 | /* maximum timeout */ | |
de2d12c7 LP |
168 | sh_tmu_write(ch, TCOR, 0xffffffff); |
169 | sh_tmu_write(ch, TCNT, 0xffffffff); | |
9570ef20 MD |
170 | |
171 | /* configure channel to parent clock / 4, irq off */ | |
de2d12c7 | 172 | ch->rate = clk_get_rate(ch->tmu->clk) / 4; |
5cfe2d15 | 173 | sh_tmu_write(ch, TCR, TCR_TPSC_CLK4); |
9570ef20 MD |
174 | |
175 | /* enable channel */ | |
de2d12c7 | 176 | sh_tmu_start_stop_ch(ch, 1); |
9570ef20 MD |
177 | |
178 | return 0; | |
179 | } | |
180 | ||
de2d12c7 | 181 | static int sh_tmu_enable(struct sh_tmu_channel *ch) |
61a53bfa | 182 | { |
de2d12c7 | 183 | if (ch->enable_count++ > 0) |
61a53bfa RW |
184 | return 0; |
185 | ||
de2d12c7 LP |
186 | pm_runtime_get_sync(&ch->tmu->pdev->dev); |
187 | dev_pm_syscore_device(&ch->tmu->pdev->dev, true); | |
61a53bfa | 188 | |
de2d12c7 | 189 | return __sh_tmu_enable(ch); |
61a53bfa RW |
190 | } |
191 | ||
de2d12c7 | 192 | static void __sh_tmu_disable(struct sh_tmu_channel *ch) |
9570ef20 MD |
193 | { |
194 | /* disable channel */ | |
de2d12c7 | 195 | sh_tmu_start_stop_ch(ch, 0); |
9570ef20 | 196 | |
be890a1a | 197 | /* disable interrupts in TMU block */ |
5cfe2d15 | 198 | sh_tmu_write(ch, TCR, TCR_TPSC_CLK4); |
be890a1a | 199 | |
d4905ce3 | 200 | /* stop clock */ |
de2d12c7 | 201 | clk_disable(ch->tmu->clk); |
9570ef20 MD |
202 | } |
203 | ||
de2d12c7 | 204 | static void sh_tmu_disable(struct sh_tmu_channel *ch) |
61a53bfa | 205 | { |
de2d12c7 | 206 | if (WARN_ON(ch->enable_count == 0)) |
61a53bfa RW |
207 | return; |
208 | ||
de2d12c7 | 209 | if (--ch->enable_count > 0) |
61a53bfa RW |
210 | return; |
211 | ||
de2d12c7 | 212 | __sh_tmu_disable(ch); |
61a53bfa | 213 | |
de2d12c7 LP |
214 | dev_pm_syscore_device(&ch->tmu->pdev->dev, false); |
215 | pm_runtime_put(&ch->tmu->pdev->dev); | |
61a53bfa RW |
216 | } |
217 | ||
de2d12c7 | 218 | static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta, |
9570ef20 MD |
219 | int periodic) |
220 | { | |
221 | /* stop timer */ | |
de2d12c7 | 222 | sh_tmu_start_stop_ch(ch, 0); |
9570ef20 MD |
223 | |
224 | /* acknowledge interrupt */ | |
de2d12c7 | 225 | sh_tmu_read(ch, TCR); |
9570ef20 MD |
226 | |
227 | /* enable interrupt */ | |
5cfe2d15 | 228 | sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4); |
9570ef20 MD |
229 | |
230 | /* reload delta value in case of periodic timer */ | |
231 | if (periodic) | |
de2d12c7 | 232 | sh_tmu_write(ch, TCOR, delta); |
9570ef20 | 233 | else |
de2d12c7 | 234 | sh_tmu_write(ch, TCOR, 0xffffffff); |
9570ef20 | 235 | |
de2d12c7 | 236 | sh_tmu_write(ch, TCNT, delta); |
9570ef20 MD |
237 | |
238 | /* start timer */ | |
de2d12c7 | 239 | sh_tmu_start_stop_ch(ch, 1); |
9570ef20 MD |
240 | } |
241 | ||
242 | static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id) | |
243 | { | |
de2d12c7 | 244 | struct sh_tmu_channel *ch = dev_id; |
9570ef20 MD |
245 | |
246 | /* disable or acknowledge interrupt */ | |
de2d12c7 | 247 | if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT) |
5cfe2d15 | 248 | sh_tmu_write(ch, TCR, TCR_TPSC_CLK4); |
9570ef20 | 249 | else |
5cfe2d15 | 250 | sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4); |
9570ef20 MD |
251 | |
252 | /* notify clockevent layer */ | |
de2d12c7 | 253 | ch->ced.event_handler(&ch->ced); |
9570ef20 MD |
254 | return IRQ_HANDLED; |
255 | } | |
256 | ||
de2d12c7 | 257 | static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs) |
9570ef20 | 258 | { |
de2d12c7 | 259 | return container_of(cs, struct sh_tmu_channel, cs); |
9570ef20 MD |
260 | } |
261 | ||
262 | static cycle_t sh_tmu_clocksource_read(struct clocksource *cs) | |
263 | { | |
de2d12c7 | 264 | struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); |
9570ef20 | 265 | |
de2d12c7 | 266 | return sh_tmu_read(ch, TCNT) ^ 0xffffffff; |
9570ef20 MD |
267 | } |
268 | ||
269 | static int sh_tmu_clocksource_enable(struct clocksource *cs) | |
270 | { | |
de2d12c7 | 271 | struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); |
0aeac458 | 272 | int ret; |
9570ef20 | 273 | |
de2d12c7 | 274 | if (WARN_ON(ch->cs_enabled)) |
61a53bfa RW |
275 | return 0; |
276 | ||
de2d12c7 | 277 | ret = sh_tmu_enable(ch); |
eaa49a8c | 278 | if (!ret) { |
de2d12c7 LP |
279 | __clocksource_updatefreq_hz(cs, ch->rate); |
280 | ch->cs_enabled = true; | |
eaa49a8c | 281 | } |
61a53bfa | 282 | |
0aeac458 | 283 | return ret; |
9570ef20 MD |
284 | } |
285 | ||
286 | static void sh_tmu_clocksource_disable(struct clocksource *cs) | |
287 | { | |
de2d12c7 | 288 | struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); |
eaa49a8c | 289 | |
de2d12c7 | 290 | if (WARN_ON(!ch->cs_enabled)) |
61a53bfa | 291 | return; |
eaa49a8c | 292 | |
de2d12c7 LP |
293 | sh_tmu_disable(ch); |
294 | ch->cs_enabled = false; | |
eaa49a8c RW |
295 | } |
296 | ||
297 | static void sh_tmu_clocksource_suspend(struct clocksource *cs) | |
298 | { | |
de2d12c7 | 299 | struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); |
eaa49a8c | 300 | |
de2d12c7 | 301 | if (!ch->cs_enabled) |
61a53bfa | 302 | return; |
eaa49a8c | 303 | |
de2d12c7 LP |
304 | if (--ch->enable_count == 0) { |
305 | __sh_tmu_disable(ch); | |
306 | pm_genpd_syscore_poweroff(&ch->tmu->pdev->dev); | |
61a53bfa | 307 | } |
eaa49a8c RW |
308 | } |
309 | ||
310 | static void sh_tmu_clocksource_resume(struct clocksource *cs) | |
311 | { | |
de2d12c7 | 312 | struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); |
eaa49a8c | 313 | |
de2d12c7 | 314 | if (!ch->cs_enabled) |
61a53bfa RW |
315 | return; |
316 | ||
de2d12c7 LP |
317 | if (ch->enable_count++ == 0) { |
318 | pm_genpd_syscore_poweron(&ch->tmu->pdev->dev); | |
319 | __sh_tmu_enable(ch); | |
61a53bfa | 320 | } |
9570ef20 MD |
321 | } |
322 | ||
de2d12c7 | 323 | static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch, |
f1010ed1 | 324 | const char *name) |
9570ef20 | 325 | { |
de2d12c7 | 326 | struct clocksource *cs = &ch->cs; |
9570ef20 MD |
327 | |
328 | cs->name = name; | |
f1010ed1 | 329 | cs->rating = 200; |
9570ef20 MD |
330 | cs->read = sh_tmu_clocksource_read; |
331 | cs->enable = sh_tmu_clocksource_enable; | |
332 | cs->disable = sh_tmu_clocksource_disable; | |
eaa49a8c RW |
333 | cs->suspend = sh_tmu_clocksource_suspend; |
334 | cs->resume = sh_tmu_clocksource_resume; | |
9570ef20 MD |
335 | cs->mask = CLOCKSOURCE_MASK(32); |
336 | cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; | |
66f49121 | 337 | |
fe68eb80 LP |
338 | dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n", |
339 | ch->index); | |
0aeac458 MD |
340 | |
341 | /* Register with dummy 1 Hz value, gets updated in ->enable() */ | |
342 | clocksource_register_hz(cs, 1); | |
9570ef20 MD |
343 | return 0; |
344 | } | |
345 | ||
de2d12c7 | 346 | static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced) |
9570ef20 | 347 | { |
de2d12c7 | 348 | return container_of(ced, struct sh_tmu_channel, ced); |
9570ef20 MD |
349 | } |
350 | ||
de2d12c7 | 351 | static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic) |
9570ef20 | 352 | { |
de2d12c7 | 353 | struct clock_event_device *ced = &ch->ced; |
9570ef20 | 354 | |
de2d12c7 | 355 | sh_tmu_enable(ch); |
9570ef20 | 356 | |
de2d12c7 | 357 | clockevents_config(ced, ch->rate); |
9570ef20 MD |
358 | |
359 | if (periodic) { | |
de2d12c7 LP |
360 | ch->periodic = (ch->rate + HZ/2) / HZ; |
361 | sh_tmu_set_next(ch, ch->periodic, 1); | |
9570ef20 MD |
362 | } |
363 | } | |
364 | ||
365 | static void sh_tmu_clock_event_mode(enum clock_event_mode mode, | |
366 | struct clock_event_device *ced) | |
367 | { | |
de2d12c7 | 368 | struct sh_tmu_channel *ch = ced_to_sh_tmu(ced); |
9570ef20 MD |
369 | int disabled = 0; |
370 | ||
371 | /* deal with old setting first */ | |
372 | switch (ced->mode) { | |
373 | case CLOCK_EVT_MODE_PERIODIC: | |
374 | case CLOCK_EVT_MODE_ONESHOT: | |
de2d12c7 | 375 | sh_tmu_disable(ch); |
9570ef20 MD |
376 | disabled = 1; |
377 | break; | |
378 | default: | |
379 | break; | |
380 | } | |
381 | ||
382 | switch (mode) { | |
383 | case CLOCK_EVT_MODE_PERIODIC: | |
de2d12c7 | 384 | dev_info(&ch->tmu->pdev->dev, |
fe68eb80 | 385 | "ch%u: used for periodic clock events\n", ch->index); |
de2d12c7 | 386 | sh_tmu_clock_event_start(ch, 1); |
9570ef20 MD |
387 | break; |
388 | case CLOCK_EVT_MODE_ONESHOT: | |
de2d12c7 | 389 | dev_info(&ch->tmu->pdev->dev, |
fe68eb80 | 390 | "ch%u: used for oneshot clock events\n", ch->index); |
de2d12c7 | 391 | sh_tmu_clock_event_start(ch, 0); |
9570ef20 MD |
392 | break; |
393 | case CLOCK_EVT_MODE_UNUSED: | |
394 | if (!disabled) | |
de2d12c7 | 395 | sh_tmu_disable(ch); |
9570ef20 MD |
396 | break; |
397 | case CLOCK_EVT_MODE_SHUTDOWN: | |
398 | default: | |
399 | break; | |
400 | } | |
401 | } | |
402 | ||
403 | static int sh_tmu_clock_event_next(unsigned long delta, | |
404 | struct clock_event_device *ced) | |
405 | { | |
de2d12c7 | 406 | struct sh_tmu_channel *ch = ced_to_sh_tmu(ced); |
9570ef20 MD |
407 | |
408 | BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT); | |
409 | ||
410 | /* program new delta value */ | |
de2d12c7 | 411 | sh_tmu_set_next(ch, delta, 0); |
9570ef20 MD |
412 | return 0; |
413 | } | |
414 | ||
eaa49a8c RW |
415 | static void sh_tmu_clock_event_suspend(struct clock_event_device *ced) |
416 | { | |
de2d12c7 | 417 | pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->tmu->pdev->dev); |
eaa49a8c RW |
418 | } |
419 | ||
420 | static void sh_tmu_clock_event_resume(struct clock_event_device *ced) | |
421 | { | |
de2d12c7 | 422 | pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->tmu->pdev->dev); |
eaa49a8c RW |
423 | } |
424 | ||
de2d12c7 | 425 | static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch, |
f1010ed1 | 426 | const char *name) |
9570ef20 | 427 | { |
de2d12c7 | 428 | struct clock_event_device *ced = &ch->ced; |
9570ef20 MD |
429 | int ret; |
430 | ||
9570ef20 MD |
431 | ced->name = name; |
432 | ced->features = CLOCK_EVT_FEAT_PERIODIC; | |
433 | ced->features |= CLOCK_EVT_FEAT_ONESHOT; | |
f1010ed1 | 434 | ced->rating = 200; |
9570ef20 MD |
435 | ced->cpumask = cpumask_of(0); |
436 | ced->set_next_event = sh_tmu_clock_event_next; | |
437 | ced->set_mode = sh_tmu_clock_event_mode; | |
eaa49a8c RW |
438 | ced->suspend = sh_tmu_clock_event_suspend; |
439 | ced->resume = sh_tmu_clock_event_resume; | |
9570ef20 | 440 | |
fe68eb80 LP |
441 | dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n", |
442 | ch->index); | |
3977407e PM |
443 | |
444 | clockevents_config_and_register(ced, 1, 0x300, 0xffffffff); | |
da64c2a8 | 445 | |
de2d12c7 | 446 | ret = request_irq(ch->irq, sh_tmu_interrupt, |
1c56cf6b | 447 | IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, |
de2d12c7 | 448 | dev_name(&ch->tmu->pdev->dev), ch); |
9570ef20 | 449 | if (ret) { |
fe68eb80 LP |
450 | dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n", |
451 | ch->index, ch->irq); | |
9570ef20 MD |
452 | return; |
453 | } | |
9570ef20 MD |
454 | } |
455 | ||
84876d05 | 456 | static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name, |
f1010ed1 | 457 | bool clockevent, bool clocksource) |
9570ef20 | 458 | { |
8c7f21e6 LP |
459 | if (clockevent) { |
460 | ch->tmu->has_clockevent = true; | |
f1010ed1 | 461 | sh_tmu_register_clockevent(ch, name); |
8c7f21e6 LP |
462 | } else if (clocksource) { |
463 | ch->tmu->has_clocksource = true; | |
f1010ed1 | 464 | sh_tmu_register_clocksource(ch, name); |
8c7f21e6 | 465 | } |
9570ef20 MD |
466 | |
467 | return 0; | |
468 | } | |
469 | ||
8c7f21e6 LP |
470 | static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index, |
471 | bool clockevent, bool clocksource, | |
a94ddaa6 LP |
472 | struct sh_tmu_device *tmu) |
473 | { | |
8c7f21e6 LP |
474 | /* Skip unused channels. */ |
475 | if (!clockevent && !clocksource) | |
476 | return 0; | |
a94ddaa6 | 477 | |
a94ddaa6 LP |
478 | ch->tmu = tmu; |
479 | ||
8c7f21e6 LP |
480 | if (tmu->model == SH_TMU_LEGACY) { |
481 | struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; | |
482 | ||
483 | /* | |
484 | * The SH3 variant (SH770x, SH7705, SH7710 and SH7720) maps | |
485 | * channel registers blocks at base + 2 + 12 * index, while all | |
486 | * other variants map them at base + 4 + 12 * index. We can | |
487 | * compute the index by just dividing by 12, the 2 bytes or 4 | |
488 | * bytes offset being hidden by the integer division. | |
489 | */ | |
490 | ch->index = cfg->channel_offset / 12; | |
491 | ch->base = tmu->mapbase + cfg->channel_offset; | |
492 | } else { | |
493 | ch->index = index; | |
494 | ||
495 | if (tmu->model == SH_TMU_SH3) | |
496 | ch->base = tmu->mapbase + 4 + ch->index * 12; | |
497 | else | |
498 | ch->base = tmu->mapbase + 8 + ch->index * 12; | |
499 | } | |
fe68eb80 | 500 | |
8c7f21e6 | 501 | ch->irq = platform_get_irq(tmu->pdev, ch->index); |
a94ddaa6 | 502 | if (ch->irq < 0) { |
fe68eb80 LP |
503 | dev_err(&tmu->pdev->dev, "ch%u: failed to get irq\n", |
504 | ch->index); | |
a94ddaa6 LP |
505 | return ch->irq; |
506 | } | |
507 | ||
508 | ch->cs_enabled = false; | |
509 | ch->enable_count = 0; | |
510 | ||
84876d05 | 511 | return sh_tmu_register(ch, dev_name(&tmu->pdev->dev), |
8c7f21e6 | 512 | clockevent, clocksource); |
a94ddaa6 LP |
513 | } |
514 | ||
8c7f21e6 | 515 | static int sh_tmu_map_memory(struct sh_tmu_device *tmu) |
9570ef20 | 516 | { |
9570ef20 | 517 | struct resource *res; |
9570ef20 | 518 | |
0a72aa39 | 519 | res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0); |
9570ef20 | 520 | if (!res) { |
0a72aa39 | 521 | dev_err(&tmu->pdev->dev, "failed to get I/O memory\n"); |
8c7f21e6 | 522 | return -ENXIO; |
9570ef20 MD |
523 | } |
524 | ||
8c7f21e6 LP |
525 | tmu->mapbase = ioremap_nocache(res->start, resource_size(res)); |
526 | if (tmu->mapbase == NULL) | |
527 | return -ENXIO; | |
528 | ||
de693461 | 529 | /* |
8c7f21e6 LP |
530 | * In legacy platform device configuration (with one device per channel) |
531 | * the resource points to the channel base address. | |
de693461 | 532 | */ |
8c7f21e6 LP |
533 | if (tmu->model == SH_TMU_LEGACY) { |
534 | struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; | |
535 | tmu->mapbase -= cfg->channel_offset; | |
9570ef20 MD |
536 | } |
537 | ||
8c7f21e6 LP |
538 | return 0; |
539 | } | |
de693461 | 540 | |
8c7f21e6 LP |
541 | static void sh_tmu_unmap_memory(struct sh_tmu_device *tmu) |
542 | { | |
543 | if (tmu->model == SH_TMU_LEGACY) { | |
544 | struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; | |
545 | tmu->mapbase += cfg->channel_offset; | |
546 | } | |
547 | ||
548 | iounmap(tmu->mapbase); | |
549 | } | |
550 | ||
551 | static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev) | |
552 | { | |
553 | struct sh_timer_config *cfg = pdev->dev.platform_data; | |
554 | const struct platform_device_id *id = pdev->id_entry; | |
555 | unsigned int i; | |
556 | int ret; | |
557 | ||
558 | if (!cfg) { | |
559 | dev_err(&tmu->pdev->dev, "missing platform data\n"); | |
560 | return -ENXIO; | |
561 | } | |
562 | ||
563 | tmu->pdev = pdev; | |
564 | tmu->model = id->driver_data; | |
565 | ||
566 | /* Get hold of clock. */ | |
a27d9227 LP |
567 | tmu->clk = clk_get(&tmu->pdev->dev, |
568 | tmu->model == SH_TMU_LEGACY ? "tmu_fck" : "fck"); | |
0a72aa39 LP |
569 | if (IS_ERR(tmu->clk)) { |
570 | dev_err(&tmu->pdev->dev, "cannot get clock\n"); | |
8c7f21e6 | 571 | return PTR_ERR(tmu->clk); |
9570ef20 | 572 | } |
1c09eb3e | 573 | |
0a72aa39 | 574 | ret = clk_prepare(tmu->clk); |
1c09eb3e | 575 | if (ret < 0) |
8c7f21e6 LP |
576 | goto err_clk_put; |
577 | ||
578 | /* Map the memory resource. */ | |
579 | ret = sh_tmu_map_memory(tmu); | |
580 | if (ret < 0) { | |
581 | dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n"); | |
582 | goto err_clk_unprepare; | |
583 | } | |
1c09eb3e | 584 | |
8c7f21e6 LP |
585 | /* Allocate and setup the channels. */ |
586 | if (tmu->model == SH_TMU_LEGACY) | |
587 | tmu->num_channels = 1; | |
588 | else | |
589 | tmu->num_channels = hweight8(cfg->channels_mask); | |
590 | ||
591 | tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels, | |
592 | GFP_KERNEL); | |
a5de49f4 LP |
593 | if (tmu->channels == NULL) { |
594 | ret = -ENOMEM; | |
8c7f21e6 | 595 | goto err_unmap; |
a5de49f4 LP |
596 | } |
597 | ||
8c7f21e6 LP |
598 | if (tmu->model == SH_TMU_LEGACY) { |
599 | ret = sh_tmu_channel_setup(&tmu->channels[0], 0, | |
600 | cfg->clockevent_rating != 0, | |
601 | cfg->clocksource_rating != 0, tmu); | |
602 | if (ret < 0) | |
603 | goto err_unmap; | |
604 | } else { | |
605 | /* | |
606 | * Use the first channel as a clock event device and the second | |
607 | * channel as a clock source. | |
608 | */ | |
609 | for (i = 0; i < tmu->num_channels; ++i) { | |
610 | ret = sh_tmu_channel_setup(&tmu->channels[i], i, | |
611 | i == 0, i == 1, tmu); | |
612 | if (ret < 0) | |
613 | goto err_unmap; | |
614 | } | |
615 | } | |
a5de49f4 | 616 | |
8c7f21e6 | 617 | platform_set_drvdata(pdev, tmu); |
394a4486 LP |
618 | |
619 | return 0; | |
620 | ||
8c7f21e6 | 621 | err_unmap: |
a5de49f4 | 622 | kfree(tmu->channels); |
8c7f21e6 LP |
623 | sh_tmu_unmap_memory(tmu); |
624 | err_clk_unprepare: | |
0a72aa39 | 625 | clk_unprepare(tmu->clk); |
8c7f21e6 | 626 | err_clk_put: |
0a72aa39 | 627 | clk_put(tmu->clk); |
9570ef20 MD |
628 | return ret; |
629 | } | |
630 | ||
1850514b | 631 | static int sh_tmu_probe(struct platform_device *pdev) |
9570ef20 | 632 | { |
0a72aa39 | 633 | struct sh_tmu_device *tmu = platform_get_drvdata(pdev); |
9570ef20 MD |
634 | int ret; |
635 | ||
eaa49a8c | 636 | if (!is_early_platform_device(pdev)) { |
61a53bfa RW |
637 | pm_runtime_set_active(&pdev->dev); |
638 | pm_runtime_enable(&pdev->dev); | |
eaa49a8c | 639 | } |
2ee619f9 | 640 | |
0a72aa39 | 641 | if (tmu) { |
214a607a | 642 | dev_info(&pdev->dev, "kept as earlytimer\n"); |
61a53bfa | 643 | goto out; |
9570ef20 MD |
644 | } |
645 | ||
3b77a83e | 646 | tmu = kzalloc(sizeof(*tmu), GFP_KERNEL); |
0a72aa39 | 647 | if (tmu == NULL) { |
9570ef20 MD |
648 | dev_err(&pdev->dev, "failed to allocate driver data\n"); |
649 | return -ENOMEM; | |
650 | } | |
651 | ||
0a72aa39 | 652 | ret = sh_tmu_setup(tmu, pdev); |
9570ef20 | 653 | if (ret) { |
0a72aa39 | 654 | kfree(tmu); |
61a53bfa RW |
655 | pm_runtime_idle(&pdev->dev); |
656 | return ret; | |
9570ef20 | 657 | } |
61a53bfa RW |
658 | if (is_early_platform_device(pdev)) |
659 | return 0; | |
660 | ||
661 | out: | |
8c7f21e6 | 662 | if (tmu->has_clockevent || tmu->has_clocksource) |
61a53bfa RW |
663 | pm_runtime_irq_safe(&pdev->dev); |
664 | else | |
665 | pm_runtime_idle(&pdev->dev); | |
666 | ||
667 | return 0; | |
9570ef20 MD |
668 | } |
669 | ||
1850514b | 670 | static int sh_tmu_remove(struct platform_device *pdev) |
9570ef20 MD |
671 | { |
672 | return -EBUSY; /* cannot unregister clockevent and clocksource */ | |
673 | } | |
674 | ||
8c7f21e6 LP |
675 | static const struct platform_device_id sh_tmu_id_table[] = { |
676 | { "sh_tmu", SH_TMU_LEGACY }, | |
677 | { "sh-tmu", SH_TMU }, | |
678 | { "sh-tmu-sh3", SH_TMU_SH3 }, | |
679 | { } | |
680 | }; | |
681 | MODULE_DEVICE_TABLE(platform, sh_tmu_id_table); | |
682 | ||
9570ef20 MD |
683 | static struct platform_driver sh_tmu_device_driver = { |
684 | .probe = sh_tmu_probe, | |
1850514b | 685 | .remove = sh_tmu_remove, |
9570ef20 MD |
686 | .driver = { |
687 | .name = "sh_tmu", | |
8c7f21e6 LP |
688 | }, |
689 | .id_table = sh_tmu_id_table, | |
9570ef20 MD |
690 | }; |
691 | ||
692 | static int __init sh_tmu_init(void) | |
693 | { | |
694 | return platform_driver_register(&sh_tmu_device_driver); | |
695 | } | |
696 | ||
697 | static void __exit sh_tmu_exit(void) | |
698 | { | |
699 | platform_driver_unregister(&sh_tmu_device_driver); | |
700 | } | |
701 | ||
702 | early_platform_init("earlytimer", &sh_tmu_device_driver); | |
b9773c3f | 703 | subsys_initcall(sh_tmu_init); |
9570ef20 MD |
704 | module_exit(sh_tmu_exit); |
705 | ||
706 | MODULE_AUTHOR("Magnus Damm"); | |
707 | MODULE_DESCRIPTION("SuperH TMU Timer Driver"); | |
708 | MODULE_LICENSE("GPL v2"); |