abd087917f807e03a466337a0fcf26cfd3499243
[deliverable/linux.git] / drivers / soc / dove / pmu.c
1 /*
2 * Marvell Dove PMU support
3 */
4 #include <linux/io.h>
5 #include <linux/irq.h>
6 #include <linux/irqdomain.h>
7 #include <linux/of.h>
8 #include <linux/of_irq.h>
9 #include <linux/of_address.h>
10 #include <linux/platform_device.h>
11 #include <linux/pm_domain.h>
12 #include <linux/reset.h>
13 #include <linux/reset-controller.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/soc/dove/pmu.h>
17 #include <linux/spinlock.h>
18
19 #define NR_PMU_IRQS 7
20
21 #define PMC_SW_RST 0x30
22 #define PMC_IRQ_CAUSE 0x50
23 #define PMC_IRQ_MASK 0x54
24
25 #define PMU_PWR 0x10
26 #define PMU_ISO 0x58
27
28 struct pmu_data {
29 spinlock_t lock;
30 struct device_node *of_node;
31 void __iomem *pmc_base;
32 void __iomem *pmu_base;
33 struct irq_chip_generic *irq_gc;
34 struct irq_domain *irq_domain;
35 #ifdef CONFIG_RESET_CONTROLLER
36 struct reset_controller_dev reset;
37 #endif
38 };
39
40 /*
41 * The PMU contains a register to reset various subsystems within the
42 * SoC. Export this as a reset controller.
43 */
44 #ifdef CONFIG_RESET_CONTROLLER
45 #define rcdev_to_pmu(rcdev) container_of(rcdev, struct pmu_data, reset)
46
47 static int pmu_reset_reset(struct reset_controller_dev *rc, unsigned long id)
48 {
49 struct pmu_data *pmu = rcdev_to_pmu(rc);
50 unsigned long flags;
51 u32 val;
52
53 spin_lock_irqsave(&pmu->lock, flags);
54 val = readl_relaxed(pmu->pmc_base + PMC_SW_RST);
55 writel_relaxed(val & ~BIT(id), pmu->pmc_base + PMC_SW_RST);
56 writel_relaxed(val | BIT(id), pmu->pmc_base + PMC_SW_RST);
57 spin_unlock_irqrestore(&pmu->lock, flags);
58
59 return 0;
60 }
61
62 static int pmu_reset_assert(struct reset_controller_dev *rc, unsigned long id)
63 {
64 struct pmu_data *pmu = rcdev_to_pmu(rc);
65 unsigned long flags;
66 u32 val = ~BIT(id);
67
68 spin_lock_irqsave(&pmu->lock, flags);
69 val &= readl_relaxed(pmu->pmc_base + PMC_SW_RST);
70 writel_relaxed(val, pmu->pmc_base + PMC_SW_RST);
71 spin_unlock_irqrestore(&pmu->lock, flags);
72
73 return 0;
74 }
75
76 static int pmu_reset_deassert(struct reset_controller_dev *rc, unsigned long id)
77 {
78 struct pmu_data *pmu = rcdev_to_pmu(rc);
79 unsigned long flags;
80 u32 val = BIT(id);
81
82 spin_lock_irqsave(&pmu->lock, flags);
83 val |= readl_relaxed(pmu->pmc_base + PMC_SW_RST);
84 writel_relaxed(val, pmu->pmc_base + PMC_SW_RST);
85 spin_unlock_irqrestore(&pmu->lock, flags);
86
87 return 0;
88 }
89
90 static struct reset_control_ops pmu_reset_ops = {
91 .reset = pmu_reset_reset,
92 .assert = pmu_reset_assert,
93 .deassert = pmu_reset_deassert,
94 };
95
96 static struct reset_controller_dev pmu_reset __initdata = {
97 .ops = &pmu_reset_ops,
98 .owner = THIS_MODULE,
99 .nr_resets = 32,
100 };
101
102 static void __init pmu_reset_init(struct pmu_data *pmu)
103 {
104 int ret;
105
106 pmu->reset = pmu_reset;
107 pmu->reset.of_node = pmu->of_node;
108
109 ret = reset_controller_register(&pmu->reset);
110 if (ret)
111 pr_err("pmu: %s failed: %d\n", "reset_controller_register", ret);
112 }
113 #else
114 static void __init pmu_reset_init(struct pmu_data *pmu)
115 {
116 }
117 #endif
118
119 struct pmu_domain {
120 struct pmu_data *pmu;
121 u32 pwr_mask;
122 u32 rst_mask;
123 u32 iso_mask;
124 struct generic_pm_domain base;
125 };
126
127 #define to_pmu_domain(dom) container_of(dom, struct pmu_domain, base)
128
129 /*
130 * This deals with the "old" Marvell sequence of bringing a power domain
131 * down/up, which is: apply power, release reset, disable isolators.
132 *
133 * Later devices apparantly use a different sequence: power up, disable
134 * isolators, assert repair signal, enable SRMA clock, enable AXI clock,
135 * enable module clock, deassert reset.
136 *
137 * Note: reading the assembly, it seems that the IO accessors have an
138 * unfortunate side-effect - they cause memory already read into registers
139 * for the if () to be re-read for the bit-set or bit-clear operation.
140 * The code is written to avoid this.
141 */
142 static int pmu_domain_power_off(struct generic_pm_domain *domain)
143 {
144 struct pmu_domain *pmu_dom = to_pmu_domain(domain);
145 struct pmu_data *pmu = pmu_dom->pmu;
146 unsigned long flags;
147 unsigned int val;
148 void __iomem *pmu_base = pmu->pmu_base;
149 void __iomem *pmc_base = pmu->pmc_base;
150
151 spin_lock_irqsave(&pmu->lock, flags);
152
153 /* Enable isolators */
154 if (pmu_dom->iso_mask) {
155 val = ~pmu_dom->iso_mask;
156 val &= readl_relaxed(pmu_base + PMU_ISO);
157 writel_relaxed(val, pmu_base + PMU_ISO);
158 }
159
160 /* Reset unit */
161 if (pmu_dom->rst_mask) {
162 val = ~pmu_dom->rst_mask;
163 val &= readl_relaxed(pmc_base + PMC_SW_RST);
164 writel_relaxed(val, pmc_base + PMC_SW_RST);
165 }
166
167 /* Power down */
168 val = readl_relaxed(pmu_base + PMU_PWR) | pmu_dom->pwr_mask;
169 writel_relaxed(val, pmu_base + PMU_PWR);
170
171 spin_unlock_irqrestore(&pmu->lock, flags);
172
173 return 0;
174 }
175
176 static int pmu_domain_power_on(struct generic_pm_domain *domain)
177 {
178 struct pmu_domain *pmu_dom = to_pmu_domain(domain);
179 struct pmu_data *pmu = pmu_dom->pmu;
180 unsigned long flags;
181 unsigned int val;
182 void __iomem *pmu_base = pmu->pmu_base;
183 void __iomem *pmc_base = pmu->pmc_base;
184
185 spin_lock_irqsave(&pmu->lock, flags);
186
187 /* Power on */
188 val = ~pmu_dom->pwr_mask & readl_relaxed(pmu_base + PMU_PWR);
189 writel_relaxed(val, pmu_base + PMU_PWR);
190
191 /* Release reset */
192 if (pmu_dom->rst_mask) {
193 val = pmu_dom->rst_mask;
194 val |= readl_relaxed(pmc_base + PMC_SW_RST);
195 writel_relaxed(val, pmc_base + PMC_SW_RST);
196 }
197
198 /* Disable isolators */
199 if (pmu_dom->iso_mask) {
200 val = pmu_dom->iso_mask;
201 val |= readl_relaxed(pmu_base + PMU_ISO);
202 writel_relaxed(val, pmu_base + PMU_ISO);
203 }
204
205 spin_unlock_irqrestore(&pmu->lock, flags);
206
207 return 0;
208 }
209
210 static void __pmu_domain_register(struct pmu_domain *domain,
211 struct device_node *np)
212 {
213 unsigned int val = readl_relaxed(domain->pmu->pmu_base + PMU_PWR);
214
215 domain->base.power_off = pmu_domain_power_off;
216 domain->base.power_on = pmu_domain_power_on;
217
218 pm_genpd_init(&domain->base, NULL, !(val & domain->pwr_mask));
219
220 if (np)
221 of_genpd_add_provider_simple(np, &domain->base);
222 }
223
224 /* PMU IRQ controller */
225 static void pmu_irq_handler(struct irq_desc *desc)
226 {
227 struct pmu_data *pmu = irq_desc_get_handler_data(desc);
228 struct irq_chip_generic *gc = pmu->irq_gc;
229 struct irq_domain *domain = pmu->irq_domain;
230 void __iomem *base = gc->reg_base;
231 u32 stat = readl_relaxed(base + PMC_IRQ_CAUSE) & gc->mask_cache;
232 u32 done = ~0;
233
234 if (stat == 0) {
235 handle_bad_irq(desc);
236 return;
237 }
238
239 while (stat) {
240 u32 hwirq = fls(stat) - 1;
241
242 stat &= ~(1 << hwirq);
243 done &= ~(1 << hwirq);
244
245 generic_handle_irq(irq_find_mapping(domain, hwirq));
246 }
247
248 /*
249 * The PMU mask register is not RW0C: it is RW. This means that
250 * the bits take whatever value is written to them; if you write
251 * a '1', you will set the interrupt.
252 *
253 * Unfortunately this means there is NO race free way to clear
254 * these interrupts.
255 *
256 * So, let's structure the code so that the window is as small as
257 * possible.
258 */
259 irq_gc_lock(gc);
260 done &= readl_relaxed(base + PMC_IRQ_CAUSE);
261 writel_relaxed(done, base + PMC_IRQ_CAUSE);
262 irq_gc_unlock(gc);
263 }
264
265 static int __init dove_init_pmu_irq(struct pmu_data *pmu, int irq)
266 {
267 const char *name = "pmu_irq";
268 struct irq_chip_generic *gc;
269 struct irq_domain *domain;
270 int ret;
271
272 /* mask and clear all interrupts */
273 writel(0, pmu->pmc_base + PMC_IRQ_MASK);
274 writel(0, pmu->pmc_base + PMC_IRQ_CAUSE);
275
276 domain = irq_domain_add_linear(pmu->of_node, NR_PMU_IRQS,
277 &irq_generic_chip_ops, NULL);
278 if (!domain) {
279 pr_err("%s: unable to add irq domain\n", name);
280 return -ENOMEM;
281 }
282
283 ret = irq_alloc_domain_generic_chips(domain, NR_PMU_IRQS, 1, name,
284 handle_level_irq,
285 IRQ_NOREQUEST | IRQ_NOPROBE, 0,
286 IRQ_GC_INIT_MASK_CACHE);
287 if (ret) {
288 pr_err("%s: unable to alloc irq domain gc: %d\n", name, ret);
289 irq_domain_remove(domain);
290 return ret;
291 }
292
293 gc = irq_get_domain_generic_chip(domain, 0);
294 gc->reg_base = pmu->pmc_base;
295 gc->chip_types[0].regs.mask = PMC_IRQ_MASK;
296 gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
297 gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
298
299 pmu->irq_domain = domain;
300 pmu->irq_gc = gc;
301
302 irq_set_handler_data(irq, pmu);
303 irq_set_chained_handler(irq, pmu_irq_handler);
304
305 return 0;
306 }
307
308 /*
309 * pmu: power-manager@d0000 {
310 * compatible = "marvell,dove-pmu";
311 * reg = <0xd0000 0x8000> <0xd8000 0x8000>;
312 * interrupts = <33>;
313 * interrupt-controller;
314 * #reset-cells = 1;
315 * vpu_domain: vpu-domain {
316 * #power-domain-cells = <0>;
317 * marvell,pmu_pwr_mask = <0x00000008>;
318 * marvell,pmu_iso_mask = <0x00000001>;
319 * resets = <&pmu 16>;
320 * };
321 * gpu_domain: gpu-domain {
322 * #power-domain-cells = <0>;
323 * marvell,pmu_pwr_mask = <0x00000004>;
324 * marvell,pmu_iso_mask = <0x00000002>;
325 * resets = <&pmu 18>;
326 * };
327 * };
328 */
329 int __init dove_init_pmu(void)
330 {
331 struct device_node *np_pmu, *domains_node, *np;
332 struct pmu_data *pmu;
333 int ret, parent_irq;
334
335 /* Lookup the PMU node */
336 np_pmu = of_find_compatible_node(NULL, NULL, "marvell,dove-pmu");
337 if (!np_pmu)
338 return 0;
339
340 domains_node = of_get_child_by_name(np_pmu, "domains");
341 if (!domains_node) {
342 pr_err("%s: failed to find domains sub-node\n", np_pmu->name);
343 return 0;
344 }
345
346 pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
347 if (!pmu)
348 return -ENOMEM;
349
350 spin_lock_init(&pmu->lock);
351 pmu->of_node = np_pmu;
352 pmu->pmc_base = of_iomap(pmu->of_node, 0);
353 pmu->pmu_base = of_iomap(pmu->of_node, 1);
354 if (!pmu->pmc_base || !pmu->pmu_base) {
355 pr_err("%s: failed to map PMU\n", np_pmu->name);
356 iounmap(pmu->pmu_base);
357 iounmap(pmu->pmc_base);
358 kfree(pmu);
359 return -ENOMEM;
360 }
361
362 pmu_reset_init(pmu);
363
364 for_each_available_child_of_node(domains_node, np) {
365 struct of_phandle_args args;
366 struct pmu_domain *domain;
367
368 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
369 if (!domain)
370 break;
371
372 domain->pmu = pmu;
373 domain->base.name = kstrdup(np->name, GFP_KERNEL);
374 if (!domain->base.name) {
375 kfree(domain);
376 break;
377 }
378
379 of_property_read_u32(np, "marvell,pmu_pwr_mask",
380 &domain->pwr_mask);
381 of_property_read_u32(np, "marvell,pmu_iso_mask",
382 &domain->iso_mask);
383
384 /*
385 * We parse the reset controller property directly here
386 * to ensure that we can operate when the reset controller
387 * support is not configured into the kernel.
388 */
389 ret = of_parse_phandle_with_args(np, "resets", "#reset-cells",
390 0, &args);
391 if (ret == 0) {
392 if (args.np == pmu->of_node)
393 domain->rst_mask = BIT(args.args[0]);
394 of_node_put(args.np);
395 }
396
397 __pmu_domain_register(domain, np);
398 }
399
400 /* Loss of the interrupt controller is not a fatal error. */
401 parent_irq = irq_of_parse_and_map(pmu->of_node, 0);
402 if (!parent_irq) {
403 pr_err("%s: no interrupt specified\n", np_pmu->name);
404 } else {
405 ret = dove_init_pmu_irq(pmu, parent_irq);
406 if (ret)
407 pr_err("dove_init_pmu_irq() failed: %d\n", ret);
408 }
409
410 return 0;
411 }
This page took 0.037139 seconds and 4 git commands to generate.