CRISv32: add irq domains support
[deliverable/linux.git] / arch / cris / arch-v32 / kernel / irq.c
CommitLineData
51533b61
MS
1/*
2 * Copyright (C) 2003, Axis Communications AB.
3 */
4
5#include <asm/irq.h>
6#include <linux/irq.h>
7#include <linux/interrupt.h>
8#include <linux/smp.h>
51533b61
MS
9#include <linux/kernel.h>
10#include <linux/errno.h>
11#include <linux/init.h>
12#include <linux/profile.h>
43f7071e
RV
13#include <linux/of.h>
14#include <linux/of_irq.h>
51533b61
MS
15#include <linux/proc_fs.h>
16#include <linux/seq_file.h>
17#include <linux/threads.h>
18#include <linux/spinlock.h>
19#include <linux/kernel_stat.h>
693d9847
JN
20#include <hwregs/reg_map.h>
21#include <hwregs/reg_rdwr.h>
22#include <hwregs/intr_vect.h>
23#include <hwregs/intr_vect_defs.h>
51533b61
MS
24
25#define CPU_FIXED -1
26
27/* IRQ masks (refer to comment for crisv32_do_multiple) */
693d9847
JN
28#if TIMER0_INTR_VECT - FIRST_IRQ < 32
29#define TIMER_MASK (1 << (TIMER0_INTR_VECT - FIRST_IRQ))
30#undef TIMER_VECT1
31#else
32#define TIMER_MASK (1 << (TIMER0_INTR_VECT - FIRST_IRQ - 32))
33#define TIMER_VECT1
34#endif
51533b61
MS
35#ifdef CONFIG_ETRAX_KGDB
36#if defined(CONFIG_ETRAX_KGDB_PORT0)
37#define IGNOREMASK (1 << (SER0_INTR_VECT - FIRST_IRQ))
38#elif defined(CONFIG_ETRAX_KGDB_PORT1)
39#define IGNOREMASK (1 << (SER1_INTR_VECT - FIRST_IRQ))
40#elif defined(CONFIG_ETRAX_KGB_PORT2)
41#define IGNOREMASK (1 << (SER2_INTR_VECT - FIRST_IRQ))
42#elif defined(CONFIG_ETRAX_KGDB_PORT3)
43#define IGNOREMASK (1 << (SER3_INTR_VECT - FIRST_IRQ))
44#endif
45#endif
46
47DEFINE_SPINLOCK(irq_lock);
48
49struct cris_irq_allocation
50{
51 int cpu; /* The CPU to which the IRQ is currently allocated. */
52 cpumask_t mask; /* The CPUs to which the IRQ may be allocated. */
53};
54
693d9847
JN
55struct cris_irq_allocation irq_allocations[NR_REAL_IRQS] =
56 { [0 ... NR_REAL_IRQS - 1] = {0, CPU_MASK_ALL} };
51533b61
MS
57
58static unsigned long irq_regs[NR_CPUS] =
59{
60 regi_irq,
61#ifdef CONFIG_SMP
62 regi_irq2,
63#endif
64};
65
693d9847
JN
66#if NR_REAL_IRQS > 32
67#define NBR_REGS 2
68#else
69#define NBR_REGS 1
70#endif
71
51533b61
MS
72unsigned long cpu_irq_counters[NR_CPUS];
73unsigned long irq_counters[NR_REAL_IRQS];
74
75/* From irq.c. */
76extern void weird_irq(void);
77
78/* From entry.S. */
79extern void system_call(void);
80extern void nmi_interrupt(void);
81extern void multiple_interrupt(void);
82extern void gdb_handle_exception(void);
83extern void i_mmu_refill(void);
84extern void i_mmu_invalid(void);
85extern void i_mmu_access(void);
86extern void i_mmu_execute(void);
87extern void d_mmu_refill(void);
88extern void d_mmu_invalid(void);
89extern void d_mmu_access(void);
90extern void d_mmu_write(void);
91
92/* From kgdb.c. */
93extern void kgdb_init(void);
94extern void breakpoint(void);
95
693d9847
JN
96/* From traps.c. */
97extern void breakh_BUG(void);
98
51533b61 99/*
693d9847 100 * Build the IRQ handler stubs using macros from irq.h.
51533b61 101 */
e75a320e
JN
102#ifdef CONFIG_CRIS_MACH_ARTPEC3
103BUILD_TIMER_IRQ(0x31, 0)
104#else
693d9847 105BUILD_IRQ(0x31)
e75a320e 106#endif
693d9847
JN
107BUILD_IRQ(0x32)
108BUILD_IRQ(0x33)
109BUILD_IRQ(0x34)
110BUILD_IRQ(0x35)
111BUILD_IRQ(0x36)
112BUILD_IRQ(0x37)
113BUILD_IRQ(0x38)
114BUILD_IRQ(0x39)
115BUILD_IRQ(0x3a)
116BUILD_IRQ(0x3b)
117BUILD_IRQ(0x3c)
118BUILD_IRQ(0x3d)
119BUILD_IRQ(0x3e)
120BUILD_IRQ(0x3f)
121BUILD_IRQ(0x40)
122BUILD_IRQ(0x41)
123BUILD_IRQ(0x42)
124BUILD_IRQ(0x43)
125BUILD_IRQ(0x44)
126BUILD_IRQ(0x45)
127BUILD_IRQ(0x46)
128BUILD_IRQ(0x47)
129BUILD_IRQ(0x48)
130BUILD_IRQ(0x49)
131BUILD_IRQ(0x4a)
e75a320e
JN
132#ifdef CONFIG_ETRAXFS
133BUILD_TIMER_IRQ(0x4b, 0)
134#else
693d9847 135BUILD_IRQ(0x4b)
e75a320e 136#endif
693d9847
JN
137BUILD_IRQ(0x4c)
138BUILD_IRQ(0x4d)
139BUILD_IRQ(0x4e)
140BUILD_IRQ(0x4f)
141BUILD_IRQ(0x50)
142#if MACH_IRQS > 32
143BUILD_IRQ(0x51)
144BUILD_IRQ(0x52)
145BUILD_IRQ(0x53)
146BUILD_IRQ(0x54)
147BUILD_IRQ(0x55)
148BUILD_IRQ(0x56)
149BUILD_IRQ(0x57)
150BUILD_IRQ(0x58)
151BUILD_IRQ(0x59)
152BUILD_IRQ(0x5a)
153BUILD_IRQ(0x5b)
154BUILD_IRQ(0x5c)
155BUILD_IRQ(0x5d)
156BUILD_IRQ(0x5e)
157BUILD_IRQ(0x5f)
158BUILD_IRQ(0x60)
159BUILD_IRQ(0x61)
160BUILD_IRQ(0x62)
161BUILD_IRQ(0x63)
162BUILD_IRQ(0x64)
163BUILD_IRQ(0x65)
164BUILD_IRQ(0x66)
165BUILD_IRQ(0x67)
166BUILD_IRQ(0x68)
167BUILD_IRQ(0x69)
168BUILD_IRQ(0x6a)
169BUILD_IRQ(0x6b)
170BUILD_IRQ(0x6c)
171BUILD_IRQ(0x6d)
172BUILD_IRQ(0x6e)
173BUILD_IRQ(0x6f)
174BUILD_IRQ(0x70)
175#endif
51533b61
MS
176
177/* Pointers to the low-level handlers. */
693d9847 178static void (*interrupt[MACH_IRQS])(void) = {
51533b61
MS
179 IRQ0x31_interrupt, IRQ0x32_interrupt, IRQ0x33_interrupt,
180 IRQ0x34_interrupt, IRQ0x35_interrupt, IRQ0x36_interrupt,
181 IRQ0x37_interrupt, IRQ0x38_interrupt, IRQ0x39_interrupt,
182 IRQ0x3a_interrupt, IRQ0x3b_interrupt, IRQ0x3c_interrupt,
183 IRQ0x3d_interrupt, IRQ0x3e_interrupt, IRQ0x3f_interrupt,
184 IRQ0x40_interrupt, IRQ0x41_interrupt, IRQ0x42_interrupt,
185 IRQ0x43_interrupt, IRQ0x44_interrupt, IRQ0x45_interrupt,
186 IRQ0x46_interrupt, IRQ0x47_interrupt, IRQ0x48_interrupt,
187 IRQ0x49_interrupt, IRQ0x4a_interrupt, IRQ0x4b_interrupt,
188 IRQ0x4c_interrupt, IRQ0x4d_interrupt, IRQ0x4e_interrupt,
693d9847
JN
189 IRQ0x4f_interrupt, IRQ0x50_interrupt,
190#if MACH_IRQS > 32
191 IRQ0x51_interrupt, IRQ0x52_interrupt, IRQ0x53_interrupt,
192 IRQ0x54_interrupt, IRQ0x55_interrupt, IRQ0x56_interrupt,
193 IRQ0x57_interrupt, IRQ0x58_interrupt, IRQ0x59_interrupt,
194 IRQ0x5a_interrupt, IRQ0x5b_interrupt, IRQ0x5c_interrupt,
195 IRQ0x5d_interrupt, IRQ0x5e_interrupt, IRQ0x5f_interrupt,
196 IRQ0x60_interrupt, IRQ0x61_interrupt, IRQ0x62_interrupt,
197 IRQ0x63_interrupt, IRQ0x64_interrupt, IRQ0x65_interrupt,
198 IRQ0x66_interrupt, IRQ0x67_interrupt, IRQ0x68_interrupt,
199 IRQ0x69_interrupt, IRQ0x6a_interrupt, IRQ0x6b_interrupt,
200 IRQ0x6c_interrupt, IRQ0x6d_interrupt, IRQ0x6e_interrupt,
201 IRQ0x6f_interrupt, IRQ0x70_interrupt,
202#endif
51533b61
MS
203};
204
205void
206block_irq(int irq, int cpu)
207{
208 int intr_mask;
209 unsigned long flags;
210
693d9847 211 spin_lock_irqsave(&irq_lock, flags);
e75a320e
JN
212 /* Remember, 1 let thru, 0 block. */
213 if (irq - FIRST_IRQ < 32) {
693d9847
JN
214 intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
215 rw_mask, 0);
693d9847 216 intr_mask &= ~(1 << (irq - FIRST_IRQ));
693d9847
JN
217 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask,
218 0, intr_mask);
e75a320e
JN
219 } else {
220 intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
221 rw_mask, 1);
222 intr_mask &= ~(1 << (irq - FIRST_IRQ - 32));
693d9847
JN
223 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask,
224 1, intr_mask);
e75a320e 225 }
51533b61
MS
226 spin_unlock_irqrestore(&irq_lock, flags);
227}
228
229void
230unblock_irq(int irq, int cpu)
231{
232 int intr_mask;
233 unsigned long flags;
234
235 spin_lock_irqsave(&irq_lock, flags);
e75a320e
JN
236 /* Remember, 1 let thru, 0 block. */
237 if (irq - FIRST_IRQ < 32) {
693d9847
JN
238 intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
239 rw_mask, 0);
693d9847 240 intr_mask |= (1 << (irq - FIRST_IRQ));
693d9847
JN
241 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask,
242 0, intr_mask);
e75a320e
JN
243 } else {
244 intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
245 rw_mask, 1);
246 intr_mask |= (1 << (irq - FIRST_IRQ - 32));
693d9847
JN
247 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask,
248 1, intr_mask);
e75a320e 249 }
51533b61
MS
250 spin_unlock_irqrestore(&irq_lock, flags);
251}
252
253/* Find out which CPU the irq should be allocated to. */
254static int irq_cpu(int irq)
255{
256 int cpu;
257 unsigned long flags;
258
259 spin_lock_irqsave(&irq_lock, flags);
260 cpu = irq_allocations[irq - FIRST_IRQ].cpu;
261
262 /* Fixed interrupts stay on the local CPU. */
263 if (cpu == CPU_FIXED)
264 {
265 spin_unlock_irqrestore(&irq_lock, flags);
266 return smp_processor_id();
267 }
268
269
270 /* Let the interrupt stay if possible */
8aebe21e 271 if (cpumask_test_cpu(cpu, &irq_allocations[irq - FIRST_IRQ].mask))
51533b61
MS
272 goto out;
273
274 /* IRQ must be moved to another CPU. */
8aebe21e 275 cpu = cpumask_first(&irq_allocations[irq - FIRST_IRQ].mask);
51533b61
MS
276 irq_allocations[irq - FIRST_IRQ].cpu = cpu;
277out:
278 spin_unlock_irqrestore(&irq_lock, flags);
279 return cpu;
280}
281
4150764f 282void crisv32_mask_irq(int irq)
51533b61
MS
283{
284 int cpu;
285
286 for (cpu = 0; cpu < NR_CPUS; cpu++)
287 block_irq(irq, cpu);
288}
289
4150764f 290void crisv32_unmask_irq(int irq)
51533b61
MS
291{
292 unblock_irq(irq, irq_cpu(irq));
293}
294
295
9af7503d 296static void enable_crisv32_irq(struct irq_data *data)
51533b61 297{
9af7503d 298 crisv32_unmask_irq(data->irq);
51533b61
MS
299}
300
9af7503d 301static void disable_crisv32_irq(struct irq_data *data)
51533b61 302{
9af7503d 303 crisv32_mask_irq(data->irq);
51533b61
MS
304}
305
9af7503d
TG
306static int set_affinity_crisv32_irq(struct irq_data *data,
307 const struct cpumask *dest, bool force)
51533b61
MS
308{
309 unsigned long flags;
9af7503d 310
51533b61 311 spin_lock_irqsave(&irq_lock, flags);
9af7503d 312 irq_allocations[data->irq - FIRST_IRQ].mask = *dest;
51533b61 313 spin_unlock_irqrestore(&irq_lock, flags);
d5dedd45 314 return 0;
51533b61
MS
315}
316
c01ce829 317static struct irq_chip crisv32_irq_type = {
9af7503d
TG
318 .name = "CRISv32",
319 .irq_shutdown = disable_crisv32_irq,
320 .irq_enable = enable_crisv32_irq,
321 .irq_disable = disable_crisv32_irq,
322 .irq_set_affinity = set_affinity_crisv32_irq,
51533b61
MS
323};
324
325void
326set_exception_vector(int n, irqvectptr addr)
327{
328 etrax_irv->v[n] = (irqvectptr) addr;
329}
330
331extern void do_IRQ(int irq, struct pt_regs * regs);
332
333void
334crisv32_do_IRQ(int irq, int block, struct pt_regs* regs)
335{
64d8ad93
MO
336 /* Interrupts that may not be moved to another CPU may
337 * skip blocking. This is currently only valid for the
338 * timer IRQ and the IPI and is used for the timer
339 * interrupt to avoid watchdog starvation.
340 */
51533b61
MS
341 if (!block) {
342 do_IRQ(irq, regs);
343 return;
344 }
345
346 block_irq(irq, smp_processor_id());
347 do_IRQ(irq, regs);
348
349 unblock_irq(irq, irq_cpu(irq));
350}
351
352/* If multiple interrupts occur simultaneously we get a multiple
353 * interrupt from the CPU and software has to sort out which
354 * interrupts that happened. There are two special cases here:
355 *
356 * 1. Timer interrupts may never be blocked because of the
357 * watchdog (refer to comment in include/asr/arch/irq.h)
358 * 2. GDB serial port IRQs are unhandled here and will be handled
359 * as a single IRQ when it strikes again because the GDB
360 * stubb wants to save the registers in its own fashion.
361 */
362void
363crisv32_do_multiple(struct pt_regs* regs)
364{
365 int cpu;
366 int mask;
693d9847 367 int masked[NBR_REGS];
51533b61 368 int bit;
693d9847 369 int i;
51533b61
MS
370
371 cpu = smp_processor_id();
372
373 /* An extra irq_enter here to prevent softIRQs to run after
374 * each do_IRQ. This will decrease the interrupt latency.
375 */
376 irq_enter();
377
693d9847 378 for (i = 0; i < NBR_REGS; i++) {
25985edc 379 /* Get which IRQs that happened. */
693d9847
JN
380 masked[i] = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
381 r_masked_vect, i);
51533b61 382
693d9847
JN
383 /* Calculate new IRQ mask with these IRQs disabled. */
384 mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i);
385 mask &= ~masked[i];
51533b61
MS
386
387 /* Timer IRQ is never masked */
693d9847
JN
388#ifdef TIMER_VECT1
389 if ((i == 1) && (masked[0] & TIMER_MASK))
390 mask |= TIMER_MASK;
391#else
392 if ((i == 0) && (masked[0] & TIMER_MASK))
393 mask |= TIMER_MASK;
394#endif
395 /* Block all the IRQs */
396 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i, mask);
51533b61
MS
397
398 /* Check for timer IRQ and handle it special. */
693d9847
JN
399#ifdef TIMER_VECT1
400 if ((i == 1) && (masked[i] & TIMER_MASK)) {
401 masked[i] &= ~TIMER_MASK;
402 do_IRQ(TIMER0_INTR_VECT, regs);
403 }
404#else
405 if ((i == 0) && (masked[i] & TIMER_MASK)) {
406 masked[i] &= ~TIMER_MASK;
407 do_IRQ(TIMER0_INTR_VECT, regs);
408 }
693d9847 409#endif
768c3149 410 }
51533b61
MS
411
412#ifdef IGNORE_MASK
413 /* Remove IRQs that can't be handled as multiple. */
693d9847 414 masked[0] &= ~IGNORE_MASK;
51533b61
MS
415#endif
416
417 /* Handle the rest of the IRQs. */
693d9847
JN
418 for (i = 0; i < NBR_REGS; i++) {
419 for (bit = 0; bit < 32; bit++) {
420 if (masked[i] & (1 << bit))
421 do_IRQ(bit + FIRST_IRQ + i*32, regs);
422 }
51533b61
MS
423 }
424
425 /* Unblock all the IRQs. */
693d9847
JN
426 for (i = 0; i < NBR_REGS; i++) {
427 mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i);
428 mask |= masked[i];
429 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i, mask);
430 }
51533b61
MS
431
432 /* This irq_exit() will trigger the soft IRQs. */
433 irq_exit();
434}
435
43f7071e
RV
436static int crisv32_irq_map(struct irq_domain *h, unsigned int virq,
437 irq_hw_number_t hw_irq_num)
438{
439 irq_set_chip_and_handler(virq, &crisv32_irq_type, handle_simple_irq);
440
441 return 0;
442}
443
444static struct irq_domain_ops crisv32_irq_ops = {
445 .map = crisv32_irq_map,
446 .xlate = irq_domain_xlate_onecell,
447};
448
51533b61
MS
449/*
450 * This is called by start_kernel. It fixes the IRQ masks and setup the
451 * interrupt vector table to point to bad_interrupt pointers.
452 */
453void __init
454init_IRQ(void)
455{
456 int i;
457 int j;
458 reg_intr_vect_rw_mask vect_mask = {0};
43f7071e
RV
459 struct device_node *np;
460 struct irq_domain *domain;
51533b61
MS
461
462 /* Clear all interrupts masks. */
693d9847
JN
463 for (i = 0; i < NBR_REGS; i++)
464 REG_WR_VECT(intr_vect, regi_irq, rw_mask, i, vect_mask);
51533b61
MS
465
466 for (i = 0; i < 256; i++)
467 etrax_irv->v[i] = weird_irq;
468
43f7071e
RV
469 np = of_find_compatible_node(NULL, NULL, "axis,crisv32-intc");
470 domain = irq_domain_add_legacy(np, NR_IRQS - FIRST_IRQ,
471 FIRST_IRQ, FIRST_IRQ,
472 &crisv32_irq_ops, NULL);
473 BUG_ON(!domain);
474 irq_set_default_host(domain);
475 of_node_put(np);
476
51533b61 477 for (i = FIRST_IRQ, j = 0; j < NR_IRQS; i++, j++) {
51533b61
MS
478 set_exception_vector(i, interrupt[j]);
479 }
480
6d05c80d 481 /* Mark Timer and IPI IRQs as CPU local */
693d9847 482 irq_allocations[TIMER0_INTR_VECT - FIRST_IRQ].cpu = CPU_FIXED;
6d05c80d 483 irq_set_status_flags(TIMER0_INTR_VECT, IRQ_PER_CPU);
51533b61 484 irq_allocations[IPI_INTR_VECT - FIRST_IRQ].cpu = CPU_FIXED;
6d05c80d 485 irq_set_status_flags(IPI_INTR_VECT, IRQ_PER_CPU);
51533b61
MS
486
487 set_exception_vector(0x00, nmi_interrupt);
488 set_exception_vector(0x30, multiple_interrupt);
489
490 /* Set up handler for various MMU bus faults. */
491 set_exception_vector(0x04, i_mmu_refill);
492 set_exception_vector(0x05, i_mmu_invalid);
493 set_exception_vector(0x06, i_mmu_access);
494 set_exception_vector(0x07, i_mmu_execute);
495 set_exception_vector(0x08, d_mmu_refill);
496 set_exception_vector(0x09, d_mmu_invalid);
497 set_exception_vector(0x0a, d_mmu_access);
498 set_exception_vector(0x0b, d_mmu_write);
499
693d9847
JN
500#ifdef CONFIG_BUG
501 /* Break 14 handler, used to implement cheap BUG(). */
502 set_exception_vector(0x1e, breakh_BUG);
503#endif
504
51533b61
MS
505 /* The system-call trap is reached by "break 13". */
506 set_exception_vector(0x1d, system_call);
507
508 /* Exception handlers for debugging, both user-mode and kernel-mode. */
509
510 /* Break 8. */
511 set_exception_vector(0x18, gdb_handle_exception);
512 /* Hardware single step. */
513 set_exception_vector(0x3, gdb_handle_exception);
514 /* Hardware breakpoint. */
515 set_exception_vector(0xc, gdb_handle_exception);
516
517#ifdef CONFIG_ETRAX_KGDB
518 kgdb_init();
519 /* Everything is set up; now trap the kernel. */
520 breakpoint();
521#endif
522}
523
This page took 0.662726 seconds and 5 git commands to generate.