Merge branch 'next/devel-exynos5250-1' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / arch / arm / mach-exynos / common.c
CommitLineData
cc511b8d
KK
1/*
2 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
4 *
5 * Common Codes for EXYNOS
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/kernel.h>
13#include <linux/interrupt.h>
14#include <linux/irq.h>
15#include <linux/io.h>
7affca35 16#include <linux/device.h>
cc511b8d
KK
17#include <linux/gpio.h>
18#include <linux/sched.h>
19#include <linux/serial_core.h>
237c78be
AB
20#include <linux/of.h>
21#include <linux/of_irq.h>
1e60bc0b
TA
22#include <linux/export.h>
23#include <linux/irqdomain.h>
e873a47c 24#include <linux/of_address.h>
cc511b8d
KK
25
26#include <asm/proc-fns.h>
40ba95fd 27#include <asm/exception.h>
cc511b8d
KK
28#include <asm/hardware/cache-l2x0.h>
29#include <asm/hardware/gic.h>
30#include <asm/mach/map.h>
31#include <asm/mach/irq.h>
b756a50f 32#include <asm/cacheflush.h>
cc511b8d
KK
33
34#include <mach/regs-irq.h>
35#include <mach/regs-pmu.h>
36#include <mach/regs-gpio.h>
b756a50f 37#include <mach/pmu.h>
cc511b8d
KK
38
39#include <plat/cpu.h>
40#include <plat/clock.h>
41#include <plat/devs.h>
42#include <plat/pm.h>
cc511b8d
KK
43#include <plat/sdhci.h>
44#include <plat/gpio-cfg.h>
45#include <plat/adc-core.h>
46#include <plat/fb-core.h>
47#include <plat/fimc-core.h>
48#include <plat/iic-core.h>
49#include <plat/tv-core.h>
50#include <plat/regs-serial.h>
51
52#include "common.h"
6cdeddcc
ADK
53#define L2_AUX_VAL 0x7C470001
54#define L2_AUX_MASK 0xC200ffff
cc511b8d 55
cc511b8d
KK
56static const char name_exynos4210[] = "EXYNOS4210";
57static const char name_exynos4212[] = "EXYNOS4212";
58static const char name_exynos4412[] = "EXYNOS4412";
94c7ca71 59static const char name_exynos5250[] = "EXYNOS5250";
cc511b8d 60
906c789c 61static void exynos4_map_io(void);
94c7ca71 62static void exynos5_map_io(void);
906c789c 63static void exynos4_init_clocks(int xtal);
94c7ca71 64static void exynos5_init_clocks(int xtal);
920f4880 65static void exynos_init_uarts(struct s3c2410_uartcfg *cfg, int no);
906c789c 66static int exynos_init(void);
cc511b8d
KK
67
68static struct cpu_table cpu_ids[] __initdata = {
69 {
70 .idcode = EXYNOS4210_CPU_ID,
71 .idmask = EXYNOS4_CPU_MASK,
72 .map_io = exynos4_map_io,
73 .init_clocks = exynos4_init_clocks,
920f4880 74 .init_uarts = exynos_init_uarts,
cc511b8d
KK
75 .init = exynos_init,
76 .name = name_exynos4210,
77 }, {
78 .idcode = EXYNOS4212_CPU_ID,
79 .idmask = EXYNOS4_CPU_MASK,
80 .map_io = exynos4_map_io,
81 .init_clocks = exynos4_init_clocks,
920f4880 82 .init_uarts = exynos_init_uarts,
cc511b8d
KK
83 .init = exynos_init,
84 .name = name_exynos4212,
85 }, {
86 .idcode = EXYNOS4412_CPU_ID,
87 .idmask = EXYNOS4_CPU_MASK,
88 .map_io = exynos4_map_io,
89 .init_clocks = exynos4_init_clocks,
920f4880 90 .init_uarts = exynos_init_uarts,
cc511b8d
KK
91 .init = exynos_init,
92 .name = name_exynos4412,
94c7ca71
KK
93 }, {
94 .idcode = EXYNOS5250_SOC_ID,
95 .idmask = EXYNOS5_SOC_MASK,
96 .map_io = exynos5_map_io,
97 .init_clocks = exynos5_init_clocks,
98 .init_uarts = exynos_init_uarts,
99 .init = exynos_init,
100 .name = name_exynos5250,
cc511b8d
KK
101 },
102};
103
104/* Initial IO mappings */
105
106static struct map_desc exynos_iodesc[] __initdata = {
107 {
108 .virtual = (unsigned long)S5P_VA_CHIPID,
94c7ca71 109 .pfn = __phys_to_pfn(EXYNOS_PA_CHIPID),
cc511b8d
KK
110 .length = SZ_4K,
111 .type = MT_DEVICE,
94c7ca71
KK
112 },
113};
114
115static struct map_desc exynos4_iodesc[] __initdata = {
116 {
cc511b8d
KK
117 .virtual = (unsigned long)S3C_VA_SYS,
118 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSCON),
119 .length = SZ_64K,
120 .type = MT_DEVICE,
121 }, {
122 .virtual = (unsigned long)S3C_VA_TIMER,
123 .pfn = __phys_to_pfn(EXYNOS4_PA_TIMER),
124 .length = SZ_16K,
125 .type = MT_DEVICE,
126 }, {
127 .virtual = (unsigned long)S3C_VA_WATCHDOG,
128 .pfn = __phys_to_pfn(EXYNOS4_PA_WATCHDOG),
129 .length = SZ_4K,
130 .type = MT_DEVICE,
131 }, {
132 .virtual = (unsigned long)S5P_VA_SROMC,
133 .pfn = __phys_to_pfn(EXYNOS4_PA_SROMC),
134 .length = SZ_4K,
135 .type = MT_DEVICE,
136 }, {
137 .virtual = (unsigned long)S5P_VA_SYSTIMER,
138 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSTIMER),
139 .length = SZ_4K,
140 .type = MT_DEVICE,
141 }, {
142 .virtual = (unsigned long)S5P_VA_PMU,
143 .pfn = __phys_to_pfn(EXYNOS4_PA_PMU),
144 .length = SZ_64K,
145 .type = MT_DEVICE,
146 }, {
147 .virtual = (unsigned long)S5P_VA_COMBINER_BASE,
148 .pfn = __phys_to_pfn(EXYNOS4_PA_COMBINER),
149 .length = SZ_4K,
150 .type = MT_DEVICE,
151 }, {
152 .virtual = (unsigned long)S5P_VA_GIC_CPU,
153 .pfn = __phys_to_pfn(EXYNOS4_PA_GIC_CPU),
154 .length = SZ_64K,
155 .type = MT_DEVICE,
156 }, {
157 .virtual = (unsigned long)S5P_VA_GIC_DIST,
158 .pfn = __phys_to_pfn(EXYNOS4_PA_GIC_DIST),
159 .length = SZ_64K,
160 .type = MT_DEVICE,
161 }, {
162 .virtual = (unsigned long)S3C_VA_UART,
163 .pfn = __phys_to_pfn(EXYNOS4_PA_UART),
164 .length = SZ_512K,
165 .type = MT_DEVICE,
94c7ca71 166 }, {
cc511b8d
KK
167 .virtual = (unsigned long)S5P_VA_CMU,
168 .pfn = __phys_to_pfn(EXYNOS4_PA_CMU),
169 .length = SZ_128K,
170 .type = MT_DEVICE,
171 }, {
172 .virtual = (unsigned long)S5P_VA_COREPERI_BASE,
173 .pfn = __phys_to_pfn(EXYNOS4_PA_COREPERI),
174 .length = SZ_8K,
175 .type = MT_DEVICE,
176 }, {
177 .virtual = (unsigned long)S5P_VA_L2CC,
178 .pfn = __phys_to_pfn(EXYNOS4_PA_L2CC),
179 .length = SZ_4K,
180 .type = MT_DEVICE,
cc511b8d
KK
181 }, {
182 .virtual = (unsigned long)S5P_VA_DMC0,
183 .pfn = __phys_to_pfn(EXYNOS4_PA_DMC0),
2bde0b08
MH
184 .length = SZ_64K,
185 .type = MT_DEVICE,
186 }, {
187 .virtual = (unsigned long)S5P_VA_DMC1,
188 .pfn = __phys_to_pfn(EXYNOS4_PA_DMC1),
189 .length = SZ_64K,
cc511b8d 190 .type = MT_DEVICE,
cc511b8d
KK
191 }, {
192 .virtual = (unsigned long)S3C_VA_USB_HSPHY,
193 .pfn = __phys_to_pfn(EXYNOS4_PA_HSPHY),
194 .length = SZ_4K,
195 .type = MT_DEVICE,
196 },
197};
198
199static struct map_desc exynos4_iodesc0[] __initdata = {
200 {
201 .virtual = (unsigned long)S5P_VA_SYSRAM,
202 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSRAM0),
203 .length = SZ_4K,
204 .type = MT_DEVICE,
205 },
206};
207
208static struct map_desc exynos4_iodesc1[] __initdata = {
209 {
210 .virtual = (unsigned long)S5P_VA_SYSRAM,
211 .pfn = __phys_to_pfn(EXYNOS4_PA_SYSRAM1),
212 .length = SZ_4K,
213 .type = MT_DEVICE,
214 },
215};
216
94c7ca71
KK
217static struct map_desc exynos5_iodesc[] __initdata = {
218 {
219 .virtual = (unsigned long)S3C_VA_SYS,
220 .pfn = __phys_to_pfn(EXYNOS5_PA_SYSCON),
221 .length = SZ_64K,
222 .type = MT_DEVICE,
223 }, {
224 .virtual = (unsigned long)S3C_VA_TIMER,
225 .pfn = __phys_to_pfn(EXYNOS5_PA_TIMER),
226 .length = SZ_16K,
227 .type = MT_DEVICE,
228 }, {
229 .virtual = (unsigned long)S3C_VA_WATCHDOG,
230 .pfn = __phys_to_pfn(EXYNOS5_PA_WATCHDOG),
231 .length = SZ_4K,
232 .type = MT_DEVICE,
233 }, {
234 .virtual = (unsigned long)S5P_VA_SROMC,
235 .pfn = __phys_to_pfn(EXYNOS5_PA_SROMC),
236 .length = SZ_4K,
237 .type = MT_DEVICE,
238 }, {
239 .virtual = (unsigned long)S5P_VA_SYSTIMER,
240 .pfn = __phys_to_pfn(EXYNOS5_PA_SYSTIMER),
241 .length = SZ_4K,
242 .type = MT_DEVICE,
243 }, {
244 .virtual = (unsigned long)S5P_VA_SYSRAM,
245 .pfn = __phys_to_pfn(EXYNOS5_PA_SYSRAM),
246 .length = SZ_4K,
247 .type = MT_DEVICE,
248 }, {
249 .virtual = (unsigned long)S5P_VA_CMU,
250 .pfn = __phys_to_pfn(EXYNOS5_PA_CMU),
251 .length = 144 * SZ_1K,
252 .type = MT_DEVICE,
253 }, {
254 .virtual = (unsigned long)S5P_VA_PMU,
255 .pfn = __phys_to_pfn(EXYNOS5_PA_PMU),
256 .length = SZ_64K,
257 .type = MT_DEVICE,
258 }, {
259 .virtual = (unsigned long)S5P_VA_COMBINER_BASE,
260 .pfn = __phys_to_pfn(EXYNOS5_PA_COMBINER),
261 .length = SZ_4K,
262 .type = MT_DEVICE,
263 }, {
264 .virtual = (unsigned long)S3C_VA_UART,
265 .pfn = __phys_to_pfn(EXYNOS5_PA_UART),
266 .length = SZ_512K,
267 .type = MT_DEVICE,
268 }, {
269 .virtual = (unsigned long)S5P_VA_GIC_CPU,
270 .pfn = __phys_to_pfn(EXYNOS5_PA_GIC_CPU),
c9ce7dbd 271 .length = SZ_8K,
94c7ca71
KK
272 .type = MT_DEVICE,
273 }, {
274 .virtual = (unsigned long)S5P_VA_GIC_DIST,
275 .pfn = __phys_to_pfn(EXYNOS5_PA_GIC_DIST),
c9ce7dbd 276 .length = SZ_4K,
94c7ca71
KK
277 .type = MT_DEVICE,
278 },
279};
280
9eb48595 281void exynos4_restart(char mode, const char *cmd)
cc511b8d
KK
282{
283 __raw_writel(0x1, S5P_SWRESET);
284}
285
94c7ca71
KK
286void exynos5_restart(char mode, const char *cmd)
287{
288 __raw_writel(0x1, EXYNOS_SWRESET);
289}
290
cc511b8d
KK
291/*
292 * exynos_map_io
293 *
294 * register the standard cpu IO areas
295 */
296
297void __init exynos_init_io(struct map_desc *mach_desc, int size)
298{
299 /* initialize the io descriptors we need for initialization */
300 iotable_init(exynos_iodesc, ARRAY_SIZE(exynos_iodesc));
301 if (mach_desc)
302 iotable_init(mach_desc, size);
303
304 /* detect cpu id and rev. */
305 s5p_init_cpu(S5P_VA_CHIPID);
306
307 s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
308}
309
906c789c 310static void __init exynos4_map_io(void)
cc511b8d
KK
311{
312 iotable_init(exynos4_iodesc, ARRAY_SIZE(exynos4_iodesc));
313
314 if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_0)
315 iotable_init(exynos4_iodesc0, ARRAY_SIZE(exynos4_iodesc0));
316 else
317 iotable_init(exynos4_iodesc1, ARRAY_SIZE(exynos4_iodesc1));
318
319 /* initialize device information early */
320 exynos4_default_sdhci0();
321 exynos4_default_sdhci1();
322 exynos4_default_sdhci2();
323 exynos4_default_sdhci3();
324
325 s3c_adc_setname("samsung-adc-v3");
326
327 s3c_fimc_setname(0, "exynos4-fimc");
328 s3c_fimc_setname(1, "exynos4-fimc");
329 s3c_fimc_setname(2, "exynos4-fimc");
330 s3c_fimc_setname(3, "exynos4-fimc");
331
8482c81c
TA
332 s3c_sdhci_setname(0, "exynos4-sdhci");
333 s3c_sdhci_setname(1, "exynos4-sdhci");
334 s3c_sdhci_setname(2, "exynos4-sdhci");
335 s3c_sdhci_setname(3, "exynos4-sdhci");
336
cc511b8d
KK
337 /* The I2C bus controllers are directly compatible with s3c2440 */
338 s3c_i2c0_setname("s3c2440-i2c");
339 s3c_i2c1_setname("s3c2440-i2c");
340 s3c_i2c2_setname("s3c2440-i2c");
341
342 s5p_fb_setname(0, "exynos4-fb");
343 s5p_hdmi_setname("exynos4-hdmi");
344}
345
94c7ca71
KK
346static void __init exynos5_map_io(void)
347{
348 iotable_init(exynos5_iodesc, ARRAY_SIZE(exynos5_iodesc));
349
bb19a751
KK
350 s3c_device_i2c0.resource[0].start = EXYNOS5_PA_IIC(0);
351 s3c_device_i2c0.resource[0].end = EXYNOS5_PA_IIC(0) + SZ_4K - 1;
352 s3c_device_i2c0.resource[1].start = EXYNOS5_IRQ_IIC;
353 s3c_device_i2c0.resource[1].end = EXYNOS5_IRQ_IIC;
354
8482c81c
TA
355 s3c_sdhci_setname(0, "exynos4-sdhci");
356 s3c_sdhci_setname(1, "exynos4-sdhci");
357 s3c_sdhci_setname(2, "exynos4-sdhci");
358 s3c_sdhci_setname(3, "exynos4-sdhci");
359
94c7ca71
KK
360 /* The I2C bus controllers are directly compatible with s3c2440 */
361 s3c_i2c0_setname("s3c2440-i2c");
362 s3c_i2c1_setname("s3c2440-i2c");
363 s3c_i2c2_setname("s3c2440-i2c");
364}
365
906c789c 366static void __init exynos4_init_clocks(int xtal)
cc511b8d
KK
367{
368 printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
369
370 s3c24xx_register_baseclocks(xtal);
371 s5p_register_clocks(xtal);
372
373 if (soc_is_exynos4210())
374 exynos4210_register_clocks();
375 else if (soc_is_exynos4212() || soc_is_exynos4412())
376 exynos4212_register_clocks();
377
378 exynos4_register_clocks();
379 exynos4_setup_clocks();
380}
381
94c7ca71
KK
382static void __init exynos5_init_clocks(int xtal)
383{
384 printk(KERN_DEBUG "%s: initializing clocks\n", __func__);
385
386 s3c24xx_register_baseclocks(xtal);
387 s5p_register_clocks(xtal);
388
389 exynos5_register_clocks();
390 exynos5_setup_clocks();
391}
392
cc511b8d
KK
393#define COMBINER_ENABLE_SET 0x0
394#define COMBINER_ENABLE_CLEAR 0x4
395#define COMBINER_INT_STATUS 0xC
396
397static DEFINE_SPINLOCK(irq_controller_lock);
398
399struct combiner_chip_data {
400 unsigned int irq_offset;
401 unsigned int irq_mask;
402 void __iomem *base;
403};
404
1e60bc0b 405static struct irq_domain *combiner_irq_domain;
cc511b8d
KK
406static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
407
408static inline void __iomem *combiner_base(struct irq_data *data)
409{
410 struct combiner_chip_data *combiner_data =
411 irq_data_get_irq_chip_data(data);
412
413 return combiner_data->base;
414}
415
416static void combiner_mask_irq(struct irq_data *data)
417{
1e60bc0b 418 u32 mask = 1 << (data->hwirq % 32);
cc511b8d
KK
419
420 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
421}
422
423static void combiner_unmask_irq(struct irq_data *data)
424{
1e60bc0b 425 u32 mask = 1 << (data->hwirq % 32);
cc511b8d
KK
426
427 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
428}
429
430static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
431{
432 struct combiner_chip_data *chip_data = irq_get_handler_data(irq);
433 struct irq_chip *chip = irq_get_chip(irq);
434 unsigned int cascade_irq, combiner_irq;
435 unsigned long status;
436
437 chained_irq_enter(chip, desc);
438
439 spin_lock(&irq_controller_lock);
440 status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
441 spin_unlock(&irq_controller_lock);
442 status &= chip_data->irq_mask;
443
444 if (status == 0)
445 goto out;
446
447 combiner_irq = __ffs(status);
448
449 cascade_irq = combiner_irq + (chip_data->irq_offset & ~31);
450 if (unlikely(cascade_irq >= NR_IRQS))
451 do_bad_IRQ(cascade_irq, desc);
452 else
453 generic_handle_irq(cascade_irq);
454
455 out:
456 chained_irq_exit(chip, desc);
457}
458
459static struct irq_chip combiner_chip = {
460 .name = "COMBINER",
461 .irq_mask = combiner_mask_irq,
462 .irq_unmask = combiner_unmask_irq,
463};
464
465static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq)
466{
bb19a751
KK
467 unsigned int max_nr;
468
469 if (soc_is_exynos5250())
470 max_nr = EXYNOS5_MAX_COMBINER_NR;
471 else
472 max_nr = EXYNOS4_MAX_COMBINER_NR;
473
474 if (combiner_nr >= max_nr)
cc511b8d
KK
475 BUG();
476 if (irq_set_handler_data(irq, &combiner_data[combiner_nr]) != 0)
477 BUG();
478 irq_set_chained_handler(irq, combiner_handle_cascade_irq);
479}
480
1e60bc0b
TA
481static void __init combiner_init_one(unsigned int combiner_nr,
482 void __iomem *base)
cc511b8d 483{
cc511b8d 484 combiner_data[combiner_nr].base = base;
1e60bc0b
TA
485 combiner_data[combiner_nr].irq_offset = irq_find_mapping(
486 combiner_irq_domain, combiner_nr * MAX_IRQ_IN_COMBINER);
cc511b8d
KK
487 combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
488
489 /* Disable all interrupts */
cc511b8d
KK
490 __raw_writel(combiner_data[combiner_nr].irq_mask,
491 base + COMBINER_ENABLE_CLEAR);
1e60bc0b 492}
cc511b8d 493
e873a47c
TA
494#ifdef CONFIG_OF
495static int combiner_irq_domain_xlate(struct irq_domain *d,
496 struct device_node *controller,
497 const u32 *intspec, unsigned int intsize,
498 unsigned long *out_hwirq,
499 unsigned int *out_type)
500{
501 if (d->of_node != controller)
502 return -EINVAL;
503
504 if (intsize < 2)
505 return -EINVAL;
506
507 *out_hwirq = intspec[0] * MAX_IRQ_IN_COMBINER + intspec[1];
508 *out_type = 0;
509
510 return 0;
511}
512#else
513static int combiner_irq_domain_xlate(struct irq_domain *d,
514 struct device_node *controller,
515 const u32 *intspec, unsigned int intsize,
516 unsigned long *out_hwirq,
517 unsigned int *out_type)
518{
519 return -EINVAL;
520}
521#endif
522
1e60bc0b
TA
523static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
524 irq_hw_number_t hw)
525{
526 irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
527 irq_set_chip_data(irq, &combiner_data[hw >> 3]);
528 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
529
530 return 0;
531}
cc511b8d 532
1e60bc0b 533static struct irq_domain_ops combiner_irq_domain_ops = {
e873a47c 534 .xlate = combiner_irq_domain_xlate,
1e60bc0b
TA
535 .map = combiner_irq_domain_map,
536};
537
538void __init combiner_init(void __iomem *combiner_base, struct device_node *np)
539{
e873a47c 540 int i, irq, irq_base;
1e60bc0b
TA
541 unsigned int max_nr, nr_irq;
542
e873a47c
TA
543 if (np) {
544 if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
545 pr_warning("%s: number of combiners not specified, "
546 "setting default as %d.\n",
547 __func__, EXYNOS4_MAX_COMBINER_NR);
548 max_nr = EXYNOS4_MAX_COMBINER_NR;
549 }
550 } else {
551 max_nr = soc_is_exynos5250() ? EXYNOS5_MAX_COMBINER_NR :
552 EXYNOS4_MAX_COMBINER_NR;
553 }
1e60bc0b
TA
554 nr_irq = max_nr * MAX_IRQ_IN_COMBINER;
555
556 irq_base = irq_alloc_descs(COMBINER_IRQ(0, 0), 1, nr_irq, 0);
557 if (IS_ERR_VALUE(irq_base)) {
558 irq_base = COMBINER_IRQ(0, 0);
559 pr_warning("%s: irq desc alloc failed. Continuing with %d as linux irq base\n", __func__, irq_base);
560 }
561
562 combiner_irq_domain = irq_domain_add_legacy(np, nr_irq, irq_base, 0,
563 &combiner_irq_domain_ops, &combiner_data);
564 if (WARN_ON(!combiner_irq_domain)) {
565 pr_warning("%s: irq domain init failed\n", __func__);
566 return;
567 }
568
569 for (i = 0; i < max_nr; i++) {
570 combiner_init_one(i, combiner_base + (i >> 2) * 0x10);
e873a47c
TA
571 irq = np ? irq_of_parse_and_map(np, i) : IRQ_SPI(i);
572 combiner_cascade_irq(i, irq);
cc511b8d
KK
573 }
574}
575
237c78be 576#ifdef CONFIG_OF
e873a47c
TA
577int __init combiner_of_init(struct device_node *np, struct device_node *parent)
578{
579 void __iomem *combiner_base;
580
581 combiner_base = of_iomap(np, 0);
582 if (!combiner_base) {
583 pr_err("%s: failed to map combiner registers\n", __func__);
584 return -ENXIO;
585 }
586
587 combiner_init(combiner_base, np);
588
589 return 0;
590}
591
237c78be
AB
592static const struct of_device_id exynos4_dt_irq_match[] = {
593 { .compatible = "arm,cortex-a9-gic", .data = gic_of_init, },
e873a47c
TA
594 { .compatible = "samsung,exynos4210-combiner",
595 .data = combiner_of_init, },
237c78be
AB
596 {},
597};
598#endif
cc511b8d
KK
599
600void __init exynos4_init_irq(void)
601{
40ba95fd 602 unsigned int gic_bank_offset;
cc511b8d
KK
603
604 gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
605
237c78be 606 if (!of_have_populated_dt())
75294957 607 gic_init_bases(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU, gic_bank_offset, NULL);
237c78be
AB
608#ifdef CONFIG_OF
609 else
610 of_irq_init(exynos4_dt_irq_match);
611#endif
cc511b8d 612
e873a47c
TA
613 if (!of_have_populated_dt())
614 combiner_init(S5P_VA_COMBINER_BASE, NULL);
cc511b8d
KK
615
616 /*
617 * The parameters of s5p_init_irq() are for VIC init.
618 * Theses parameters should be NULL and 0 because EXYNOS4
619 * uses GIC instead of VIC.
620 */
621 s5p_init_irq(NULL, 0);
622}
623
94c7ca71
KK
624void __init exynos5_init_irq(void)
625{
6fff5a11 626#ifdef CONFIG_OF
5699b0ca 627 of_irq_init(exynos4_dt_irq_match);
6fff5a11 628#endif
cc511b8d
KK
629 /*
630 * The parameters of s5p_init_irq() are for VIC init.
631 * Theses parameters should be NULL and 0 because EXYNOS4
632 * uses GIC instead of VIC.
633 */
634 s5p_init_irq(NULL, 0);
635}
636
9ee6af9c
TA
637struct bus_type exynos_subsys = {
638 .name = "exynos-core",
639 .dev_name = "exynos-core",
94c7ca71
KK
640};
641
7affca35 642static struct device exynos4_dev = {
9ee6af9c 643 .bus = &exynos_subsys,
94c7ca71
KK
644};
645
646static int __init exynos_core_init(void)
cc511b8d 647{
9ee6af9c 648 return subsys_system_register(&exynos_subsys, NULL);
cc511b8d 649}
94c7ca71 650core_initcall(exynos_core_init);
cc511b8d
KK
651
652#ifdef CONFIG_CACHE_L2X0
653static int __init exynos4_l2x0_cache_init(void)
654{
e1b1994e
IH
655 int ret;
656
94c7ca71
KK
657 if (soc_is_exynos5250())
658 return 0;
659
6cdeddcc
ADK
660 ret = l2x0_of_init(L2_AUX_VAL, L2_AUX_MASK);
661 if (!ret) {
662 l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
663 clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
664 return 0;
665 }
cc511b8d 666
b756a50f
ADK
667 if (!(__raw_readl(S5P_VA_L2CC + L2X0_CTRL) & 0x1)) {
668 l2x0_saved_regs.phy_base = EXYNOS4_PA_L2CC;
669 /* TAG, Data Latency Control: 2 cycles */
670 l2x0_saved_regs.tag_latency = 0x110;
cc511b8d 671
b756a50f
ADK
672 if (soc_is_exynos4212() || soc_is_exynos4412())
673 l2x0_saved_regs.data_latency = 0x120;
674 else
675 l2x0_saved_regs.data_latency = 0x110;
676
677 l2x0_saved_regs.prefetch_ctrl = 0x30000007;
678 l2x0_saved_regs.pwr_ctrl =
679 (L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN);
cc511b8d 680
b756a50f 681 l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
cc511b8d 682
b756a50f
ADK
683 __raw_writel(l2x0_saved_regs.tag_latency,
684 S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL);
685 __raw_writel(l2x0_saved_regs.data_latency,
686 S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL);
cc511b8d 687
b756a50f
ADK
688 /* L2X0 Prefetch Control */
689 __raw_writel(l2x0_saved_regs.prefetch_ctrl,
690 S5P_VA_L2CC + L2X0_PREFETCH_CTRL);
691
692 /* L2X0 Power Control */
693 __raw_writel(l2x0_saved_regs.pwr_ctrl,
694 S5P_VA_L2CC + L2X0_POWER_CTRL);
695
696 clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
697 clean_dcache_area(&l2x0_saved_regs, sizeof(struct l2x0_regs));
698 }
cc511b8d 699
6cdeddcc 700 l2x0_init(S5P_VA_L2CC, L2_AUX_VAL, L2_AUX_MASK);
cc511b8d
KK
701 return 0;
702}
cc511b8d
KK
703early_initcall(exynos4_l2x0_cache_init);
704#endif
705
94c7ca71
KK
706static int __init exynos5_l2_cache_init(void)
707{
708 unsigned int val;
709
710 if (!soc_is_exynos5250())
711 return 0;
712
713 asm volatile("mrc p15, 0, %0, c1, c0, 0\n"
714 "bic %0, %0, #(1 << 2)\n" /* cache disable */
715 "mcr p15, 0, %0, c1, c0, 0\n"
716 "mrc p15, 1, %0, c9, c0, 2\n"
717 : "=r"(val));
718
719 val |= (1 << 9) | (1 << 5) | (2 << 6) | (2 << 0);
720
721 asm volatile("mcr p15, 1, %0, c9, c0, 2\n" : : "r"(val));
722 asm volatile("mrc p15, 0, %0, c1, c0, 0\n"
723 "orr %0, %0, #(1 << 2)\n" /* cache enable */
724 "mcr p15, 0, %0, c1, c0, 0\n"
725 : : "r"(val));
726
727 return 0;
728}
729early_initcall(exynos5_l2_cache_init);
730
906c789c 731static int __init exynos_init(void)
cc511b8d
KK
732{
733 printk(KERN_INFO "EXYNOS: Initializing architecture\n");
94c7ca71 734
9ee6af9c 735 return device_register(&exynos4_dev);
cc511b8d
KK
736}
737
cc511b8d
KK
738/* uart registration process */
739
920f4880 740static void __init exynos_init_uarts(struct s3c2410_uartcfg *cfg, int no)
cc511b8d
KK
741{
742 struct s3c2410_uartcfg *tcfg = cfg;
743 u32 ucnt;
744
237c78be
AB
745 for (ucnt = 0; ucnt < no; ucnt++, tcfg++)
746 tcfg->has_fracval = 1;
cc511b8d 747
171c067c
KK
748 if (soc_is_exynos5250())
749 s3c24xx_init_uartdevs("exynos4210-uart", exynos5_uart_resources, cfg, no);
750 else
751 s3c24xx_init_uartdevs("exynos4210-uart", exynos4_uart_resources, cfg, no);
cc511b8d
KK
752}
753
330c90a5
EK
754static void __iomem *exynos_eint_base;
755
cc511b8d
KK
756static DEFINE_SPINLOCK(eint_lock);
757
758static unsigned int eint0_15_data[16];
759
330c90a5 760static inline int exynos4_irq_to_gpio(unsigned int irq)
cc511b8d 761{
330c90a5
EK
762 if (irq < IRQ_EINT(0))
763 return -EINVAL;
cc511b8d 764
330c90a5
EK
765 irq -= IRQ_EINT(0);
766 if (irq < 8)
767 return EXYNOS4_GPX0(irq);
768
769 irq -= 8;
770 if (irq < 8)
771 return EXYNOS4_GPX1(irq);
772
773 irq -= 8;
774 if (irq < 8)
775 return EXYNOS4_GPX2(irq);
776
777 irq -= 8;
778 if (irq < 8)
779 return EXYNOS4_GPX3(irq);
780
781 return -EINVAL;
782}
783
784static inline int exynos5_irq_to_gpio(unsigned int irq)
785{
786 if (irq < IRQ_EINT(0))
787 return -EINVAL;
788
789 irq -= IRQ_EINT(0);
790 if (irq < 8)
791 return EXYNOS5_GPX0(irq);
792
793 irq -= 8;
794 if (irq < 8)
795 return EXYNOS5_GPX1(irq);
796
797 irq -= 8;
798 if (irq < 8)
799 return EXYNOS5_GPX2(irq);
cc511b8d 800
330c90a5
EK
801 irq -= 8;
802 if (irq < 8)
803 return EXYNOS5_GPX3(irq);
804
805 return -EINVAL;
cc511b8d
KK
806}
807
bb19a751
KK
808static unsigned int exynos4_eint0_15_src_int[16] = {
809 EXYNOS4_IRQ_EINT0,
810 EXYNOS4_IRQ_EINT1,
811 EXYNOS4_IRQ_EINT2,
812 EXYNOS4_IRQ_EINT3,
813 EXYNOS4_IRQ_EINT4,
814 EXYNOS4_IRQ_EINT5,
815 EXYNOS4_IRQ_EINT6,
816 EXYNOS4_IRQ_EINT7,
817 EXYNOS4_IRQ_EINT8,
818 EXYNOS4_IRQ_EINT9,
819 EXYNOS4_IRQ_EINT10,
820 EXYNOS4_IRQ_EINT11,
821 EXYNOS4_IRQ_EINT12,
822 EXYNOS4_IRQ_EINT13,
823 EXYNOS4_IRQ_EINT14,
824 EXYNOS4_IRQ_EINT15,
825};
cc511b8d 826
bb19a751
KK
827static unsigned int exynos5_eint0_15_src_int[16] = {
828 EXYNOS5_IRQ_EINT0,
829 EXYNOS5_IRQ_EINT1,
830 EXYNOS5_IRQ_EINT2,
831 EXYNOS5_IRQ_EINT3,
832 EXYNOS5_IRQ_EINT4,
833 EXYNOS5_IRQ_EINT5,
834 EXYNOS5_IRQ_EINT6,
835 EXYNOS5_IRQ_EINT7,
836 EXYNOS5_IRQ_EINT8,
837 EXYNOS5_IRQ_EINT9,
838 EXYNOS5_IRQ_EINT10,
839 EXYNOS5_IRQ_EINT11,
840 EXYNOS5_IRQ_EINT12,
841 EXYNOS5_IRQ_EINT13,
842 EXYNOS5_IRQ_EINT14,
843 EXYNOS5_IRQ_EINT15,
844};
330c90a5 845static inline void exynos_irq_eint_mask(struct irq_data *data)
cc511b8d
KK
846{
847 u32 mask;
848
849 spin_lock(&eint_lock);
330c90a5
EK
850 mask = __raw_readl(EINT_MASK(exynos_eint_base, data->irq));
851 mask |= EINT_OFFSET_BIT(data->irq);
852 __raw_writel(mask, EINT_MASK(exynos_eint_base, data->irq));
cc511b8d
KK
853 spin_unlock(&eint_lock);
854}
855
330c90a5 856static void exynos_irq_eint_unmask(struct irq_data *data)
cc511b8d
KK
857{
858 u32 mask;
859
860 spin_lock(&eint_lock);
330c90a5
EK
861 mask = __raw_readl(EINT_MASK(exynos_eint_base, data->irq));
862 mask &= ~(EINT_OFFSET_BIT(data->irq));
863 __raw_writel(mask, EINT_MASK(exynos_eint_base, data->irq));
cc511b8d
KK
864 spin_unlock(&eint_lock);
865}
866
330c90a5 867static inline void exynos_irq_eint_ack(struct irq_data *data)
cc511b8d 868{
330c90a5
EK
869 __raw_writel(EINT_OFFSET_BIT(data->irq),
870 EINT_PEND(exynos_eint_base, data->irq));
cc511b8d
KK
871}
872
330c90a5 873static void exynos_irq_eint_maskack(struct irq_data *data)
cc511b8d 874{
330c90a5
EK
875 exynos_irq_eint_mask(data);
876 exynos_irq_eint_ack(data);
cc511b8d
KK
877}
878
330c90a5 879static int exynos_irq_eint_set_type(struct irq_data *data, unsigned int type)
cc511b8d
KK
880{
881 int offs = EINT_OFFSET(data->irq);
882 int shift;
883 u32 ctrl, mask;
884 u32 newvalue = 0;
885
886 switch (type) {
887 case IRQ_TYPE_EDGE_RISING:
888 newvalue = S5P_IRQ_TYPE_EDGE_RISING;
889 break;
890
891 case IRQ_TYPE_EDGE_FALLING:
892 newvalue = S5P_IRQ_TYPE_EDGE_FALLING;
893 break;
894
895 case IRQ_TYPE_EDGE_BOTH:
896 newvalue = S5P_IRQ_TYPE_EDGE_BOTH;
897 break;
898
899 case IRQ_TYPE_LEVEL_LOW:
900 newvalue = S5P_IRQ_TYPE_LEVEL_LOW;
901 break;
902
903 case IRQ_TYPE_LEVEL_HIGH:
904 newvalue = S5P_IRQ_TYPE_LEVEL_HIGH;
905 break;
906
907 default:
908 printk(KERN_ERR "No such irq type %d", type);
909 return -EINVAL;
910 }
911
912 shift = (offs & 0x7) * 4;
913 mask = 0x7 << shift;
914
915 spin_lock(&eint_lock);
330c90a5 916 ctrl = __raw_readl(EINT_CON(exynos_eint_base, data->irq));
cc511b8d
KK
917 ctrl &= ~mask;
918 ctrl |= newvalue << shift;
330c90a5 919 __raw_writel(ctrl, EINT_CON(exynos_eint_base, data->irq));
cc511b8d
KK
920 spin_unlock(&eint_lock);
921
330c90a5
EK
922 if (soc_is_exynos5250())
923 s3c_gpio_cfgpin(exynos5_irq_to_gpio(data->irq), S3C_GPIO_SFN(0xf));
924 else
925 s3c_gpio_cfgpin(exynos4_irq_to_gpio(data->irq), S3C_GPIO_SFN(0xf));
cc511b8d
KK
926
927 return 0;
928}
929
330c90a5
EK
930static struct irq_chip exynos_irq_eint = {
931 .name = "exynos-eint",
932 .irq_mask = exynos_irq_eint_mask,
933 .irq_unmask = exynos_irq_eint_unmask,
934 .irq_mask_ack = exynos_irq_eint_maskack,
935 .irq_ack = exynos_irq_eint_ack,
936 .irq_set_type = exynos_irq_eint_set_type,
cc511b8d
KK
937#ifdef CONFIG_PM
938 .irq_set_wake = s3c_irqext_wake,
939#endif
940};
941
942/*
943 * exynos4_irq_demux_eint
944 *
945 * This function demuxes the IRQ from from EINTs 16 to 31.
946 * It is designed to be inlined into the specific handler
947 * s5p_irq_demux_eintX_Y.
948 *
949 * Each EINT pend/mask registers handle eight of them.
950 */
330c90a5 951static inline void exynos_irq_demux_eint(unsigned int start)
cc511b8d
KK
952{
953 unsigned int irq;
954
330c90a5
EK
955 u32 status = __raw_readl(EINT_PEND(exynos_eint_base, start));
956 u32 mask = __raw_readl(EINT_MASK(exynos_eint_base, start));
cc511b8d
KK
957
958 status &= ~mask;
959 status &= 0xff;
960
961 while (status) {
962 irq = fls(status) - 1;
963 generic_handle_irq(irq + start);
964 status &= ~(1 << irq);
965 }
966}
967
330c90a5 968static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
cc511b8d
KK
969{
970 struct irq_chip *chip = irq_get_chip(irq);
971 chained_irq_enter(chip, desc);
330c90a5
EK
972 exynos_irq_demux_eint(IRQ_EINT(16));
973 exynos_irq_demux_eint(IRQ_EINT(24));
cc511b8d
KK
974 chained_irq_exit(chip, desc);
975}
976
bb19a751 977static void exynos_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
cc511b8d
KK
978{
979 u32 *irq_data = irq_get_handler_data(irq);
980 struct irq_chip *chip = irq_get_chip(irq);
981
982 chained_irq_enter(chip, desc);
983 chip->irq_mask(&desc->irq_data);
984
985 if (chip->irq_ack)
986 chip->irq_ack(&desc->irq_data);
987
988 generic_handle_irq(*irq_data);
989
990 chip->irq_unmask(&desc->irq_data);
991 chained_irq_exit(chip, desc);
992}
993
330c90a5 994static int __init exynos_init_irq_eint(void)
cc511b8d
KK
995{
996 int irq;
997
94c7ca71 998 if (soc_is_exynos5250())
330c90a5
EK
999 exynos_eint_base = ioremap(EXYNOS5_PA_GPIO1, SZ_4K);
1000 else
1001 exynos_eint_base = ioremap(EXYNOS4_PA_GPIO2, SZ_4K);
1002
1003 if (exynos_eint_base == NULL) {
1004 pr_err("unable to ioremap for EINT base address\n");
1005 return -ENOMEM;
1006 }
94c7ca71 1007
cc511b8d 1008 for (irq = 0 ; irq <= 31 ; irq++) {
330c90a5 1009 irq_set_chip_and_handler(IRQ_EINT(irq), &exynos_irq_eint,
cc511b8d
KK
1010 handle_level_irq);
1011 set_irq_flags(IRQ_EINT(irq), IRQF_VALID);
1012 }
1013
330c90a5 1014 irq_set_chained_handler(EXYNOS_IRQ_EINT16_31, exynos_irq_demux_eint16_31);
cc511b8d
KK
1015
1016 for (irq = 0 ; irq <= 15 ; irq++) {
1017 eint0_15_data[irq] = IRQ_EINT(irq);
1018
bb19a751
KK
1019 if (soc_is_exynos5250()) {
1020 irq_set_handler_data(exynos5_eint0_15_src_int[irq],
1021 &eint0_15_data[irq]);
1022 irq_set_chained_handler(exynos5_eint0_15_src_int[irq],
1023 exynos_irq_eint0_15);
1024 } else {
1025 irq_set_handler_data(exynos4_eint0_15_src_int[irq],
1026 &eint0_15_data[irq]);
1027 irq_set_chained_handler(exynos4_eint0_15_src_int[irq],
1028 exynos_irq_eint0_15);
1029 }
cc511b8d
KK
1030 }
1031
1032 return 0;
1033}
330c90a5 1034arch_initcall(exynos_init_irq_eint);
This page took 0.133084 seconds and 5 git commands to generate.