MIPS: Whitespace cleanup.
[deliverable/linux.git] / arch / mips / sgi-ip27 / ip27-irq.c
1 /*
2 * ip27-irq.c: Highlevel interrupt handling for IP27 architecture.
3 *
4 * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
5 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
6 * Copyright (C) 1999 - 2001 Kanoj Sarcar
7 */
8
9 #undef DEBUG
10
11 #include <linux/init.h>
12 #include <linux/irq.h>
13 #include <linux/errno.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/types.h>
17 #include <linux/interrupt.h>
18 #include <linux/ioport.h>
19 #include <linux/timex.h>
20 #include <linux/smp.h>
21 #include <linux/random.h>
22 #include <linux/kernel.h>
23 #include <linux/kernel_stat.h>
24 #include <linux/delay.h>
25 #include <linux/bitops.h>
26
27 #include <asm/bootinfo.h>
28 #include <asm/io.h>
29 #include <asm/mipsregs.h>
30
31 #include <asm/processor.h>
32 #include <asm/pci/bridge.h>
33 #include <asm/sn/addrs.h>
34 #include <asm/sn/agent.h>
35 #include <asm/sn/arch.h>
36 #include <asm/sn/hub.h>
37 #include <asm/sn/intr.h>
38
39 /*
40 * Linux has a controller-independent x86 interrupt architecture.
41 * every controller has a 'controller-template', that is used
42 * by the main code to do the right thing. Each driver-visible
43 * interrupt source is transparently wired to the appropriate
44 * controller. Thus drivers need not be aware of the
45 * interrupt-controller.
46 *
47 * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
48 * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
49 * (IO-APICs assumed to be messaging to Pentium local-APICs)
50 *
51 * the code is designed to be easily extended with new/different
52 * interrupt controllers, without having to do assembly magic.
53 */
54
55 extern asmlinkage void ip27_irq(void);
56
57 extern struct bridge_controller *irq_to_bridge[];
58 extern int irq_to_slot[];
59
60 /*
61 * use these macros to get the encoded nasid and widget id
62 * from the irq value
63 */
64 #define IRQ_TO_BRIDGE(i) irq_to_bridge[(i)]
65 #define SLOT_FROM_PCI_IRQ(i) irq_to_slot[i]
66
67 static inline int alloc_level(int cpu, int irq)
68 {
69 struct hub_data *hub = hub_data(cpu_to_node(cpu));
70 struct slice_data *si = cpu_data[cpu].data;
71 int level;
72
73 level = find_first_zero_bit(hub->irq_alloc_mask, LEVELS_PER_SLICE);
74 if (level >= LEVELS_PER_SLICE)
75 panic("Cpu %d flooded with devices", cpu);
76
77 __set_bit(level, hub->irq_alloc_mask);
78 si->level_to_irq[level] = irq;
79
80 return level;
81 }
82
83 static inline int find_level(cpuid_t *cpunum, int irq)
84 {
85 int cpu, i;
86
87 for_each_online_cpu(cpu) {
88 struct slice_data *si = cpu_data[cpu].data;
89
90 for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++)
91 if (si->level_to_irq[i] == irq) {
92 *cpunum = cpu;
93
94 return i;
95 }
96 }
97
98 panic("Could not identify cpu/level for irq %d", irq);
99 }
100
101 /*
102 * Find first bit set
103 */
104 static int ms1bit(unsigned long x)
105 {
106 int b = 0, s;
107
108 s = 16; if (x >> 16 == 0) s = 0; b += s; x >>= s;
109 s = 8; if (x >> 8 == 0) s = 0; b += s; x >>= s;
110 s = 4; if (x >> 4 == 0) s = 0; b += s; x >>= s;
111 s = 2; if (x >> 2 == 0) s = 0; b += s; x >>= s;
112 s = 1; if (x >> 1 == 0) s = 0; b += s;
113
114 return b;
115 }
116
117 /*
118 * This code is unnecessarily complex, because we do
119 * intr enabling. Basically, once we grab the set of intrs we need
120 * to service, we must mask _all_ these interrupts; firstly, to make
121 * sure the same intr does not intr again, causing recursion that
122 * can lead to stack overflow. Secondly, we can not just mask the
123 * one intr we are do_IRQing, because the non-masked intrs in the
124 * first set might intr again, causing multiple servicings of the
125 * same intr. This effect is mostly seen for intercpu intrs.
126 * Kanoj 05.13.00
127 */
128
129 static void ip27_do_irq_mask0(void)
130 {
131 int irq, swlevel;
132 hubreg_t pend0, mask0;
133 cpuid_t cpu = smp_processor_id();
134 int pi_int_mask0 =
135 (cputoslice(cpu) == 0) ? PI_INT_MASK0_A : PI_INT_MASK0_B;
136
137 /* copied from Irix intpend0() */
138 pend0 = LOCAL_HUB_L(PI_INT_PEND0);
139 mask0 = LOCAL_HUB_L(pi_int_mask0);
140
141 pend0 &= mask0; /* Pick intrs we should look at */
142 if (!pend0)
143 return;
144
145 swlevel = ms1bit(pend0);
146 #ifdef CONFIG_SMP
147 if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) {
148 LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ);
149 scheduler_ipi();
150 } else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) {
151 LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ);
152 scheduler_ipi();
153 } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
154 LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
155 smp_call_function_interrupt();
156 } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
157 LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
158 smp_call_function_interrupt();
159 } else
160 #endif
161 {
162 /* "map" swlevel to irq */
163 struct slice_data *si = cpu_data[cpu].data;
164
165 irq = si->level_to_irq[swlevel];
166 do_IRQ(irq);
167 }
168
169 LOCAL_HUB_L(PI_INT_PEND0);
170 }
171
172 static void ip27_do_irq_mask1(void)
173 {
174 int irq, swlevel;
175 hubreg_t pend1, mask1;
176 cpuid_t cpu = smp_processor_id();
177 int pi_int_mask1 = (cputoslice(cpu) == 0) ? PI_INT_MASK1_A : PI_INT_MASK1_B;
178 struct slice_data *si = cpu_data[cpu].data;
179
180 /* copied from Irix intpend0() */
181 pend1 = LOCAL_HUB_L(PI_INT_PEND1);
182 mask1 = LOCAL_HUB_L(pi_int_mask1);
183
184 pend1 &= mask1; /* Pick intrs we should look at */
185 if (!pend1)
186 return;
187
188 swlevel = ms1bit(pend1);
189 /* "map" swlevel to irq */
190 irq = si->level_to_irq[swlevel];
191 LOCAL_HUB_CLR_INTR(swlevel);
192 do_IRQ(irq);
193
194 LOCAL_HUB_L(PI_INT_PEND1);
195 }
196
197 static void ip27_prof_timer(void)
198 {
199 panic("CPU %d got a profiling interrupt", smp_processor_id());
200 }
201
202 static void ip27_hub_error(void)
203 {
204 panic("CPU %d got a hub error interrupt", smp_processor_id());
205 }
206
207 static int intr_connect_level(int cpu, int bit)
208 {
209 nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
210 struct slice_data *si = cpu_data[cpu].data;
211
212 set_bit(bit, si->irq_enable_mask);
213
214 if (!cputoslice(cpu)) {
215 REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
216 REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]);
217 } else {
218 REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]);
219 REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]);
220 }
221
222 return 0;
223 }
224
225 static int intr_disconnect_level(int cpu, int bit)
226 {
227 nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
228 struct slice_data *si = cpu_data[cpu].data;
229
230 clear_bit(bit, si->irq_enable_mask);
231
232 if (!cputoslice(cpu)) {
233 REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
234 REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]);
235 } else {
236 REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]);
237 REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]);
238 }
239
240 return 0;
241 }
242
243 /* Startup one of the (PCI ...) IRQs routes over a bridge. */
244 static unsigned int startup_bridge_irq(struct irq_data *d)
245 {
246 struct bridge_controller *bc;
247 bridgereg_t device;
248 bridge_t *bridge;
249 int pin, swlevel;
250 cpuid_t cpu;
251
252 pin = SLOT_FROM_PCI_IRQ(d->irq);
253 bc = IRQ_TO_BRIDGE(d->irq);
254 bridge = bc->base;
255
256 pr_debug("bridge_startup(): irq= 0x%x pin=%d\n", d->irq, pin);
257 /*
258 * "map" irq to a swlevel greater than 6 since the first 6 bits
259 * of INT_PEND0 are taken
260 */
261 swlevel = find_level(&cpu, d->irq);
262 bridge->b_int_addr[pin].addr = (0x20000 | swlevel | (bc->nasid << 8));
263 bridge->b_int_enable |= (1 << pin);
264 bridge->b_int_enable |= 0x7ffffe00; /* more stuff in int_enable */
265
266 /*
267 * Enable sending of an interrupt clear packt to the hub on a high to
268 * low transition of the interrupt pin.
269 *
270 * IRIX sets additional bits in the address which are documented as
271 * reserved in the bridge docs.
272 */
273 bridge->b_int_mode |= (1UL << pin);
274
275 /*
276 * We assume the bridge to have a 1:1 mapping between devices
277 * (slots) and intr pins.
278 */
279 device = bridge->b_int_device;
280 device &= ~(7 << (pin*3));
281 device |= (pin << (pin*3));
282 bridge->b_int_device = device;
283
284 bridge->b_wid_tflush;
285
286 intr_connect_level(cpu, swlevel);
287
288 return 0; /* Never anything pending. */
289 }
290
291 /* Shutdown one of the (PCI ...) IRQs routes over a bridge. */
292 static void shutdown_bridge_irq(struct irq_data *d)
293 {
294 struct bridge_controller *bc = IRQ_TO_BRIDGE(d->irq);
295 bridge_t *bridge = bc->base;
296 int pin, swlevel;
297 cpuid_t cpu;
298
299 pr_debug("bridge_shutdown: irq 0x%x\n", d->irq);
300 pin = SLOT_FROM_PCI_IRQ(d->irq);
301
302 /*
303 * map irq to a swlevel greater than 6 since the first 6 bits
304 * of INT_PEND0 are taken
305 */
306 swlevel = find_level(&cpu, d->irq);
307 intr_disconnect_level(cpu, swlevel);
308
309 bridge->b_int_enable &= ~(1 << pin);
310 bridge->b_wid_tflush;
311 }
312
313 static inline void enable_bridge_irq(struct irq_data *d)
314 {
315 cpuid_t cpu;
316 int swlevel;
317
318 swlevel = find_level(&cpu, d->irq); /* Criminal offence */
319 intr_connect_level(cpu, swlevel);
320 }
321
322 static inline void disable_bridge_irq(struct irq_data *d)
323 {
324 cpuid_t cpu;
325 int swlevel;
326
327 swlevel = find_level(&cpu, d->irq); /* Criminal offence */
328 intr_disconnect_level(cpu, swlevel);
329 }
330
331 static struct irq_chip bridge_irq_type = {
332 .name = "bridge",
333 .irq_startup = startup_bridge_irq,
334 .irq_shutdown = shutdown_bridge_irq,
335 .irq_mask = disable_bridge_irq,
336 .irq_unmask = enable_bridge_irq,
337 };
338
339 void register_bridge_irq(unsigned int irq)
340 {
341 irq_set_chip_and_handler(irq, &bridge_irq_type, handle_level_irq);
342 }
343
344 int request_bridge_irq(struct bridge_controller *bc)
345 {
346 int irq = allocate_irqno();
347 int swlevel, cpu;
348 nasid_t nasid;
349
350 if (irq < 0)
351 return irq;
352
353 /*
354 * "map" irq to a swlevel greater than 6 since the first 6 bits
355 * of INT_PEND0 are taken
356 */
357 cpu = bc->irq_cpu;
358 swlevel = alloc_level(cpu, irq);
359 if (unlikely(swlevel < 0)) {
360 free_irqno(irq);
361
362 return -EAGAIN;
363 }
364
365 /* Make sure it's not already pending when we connect it. */
366 nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
367 REMOTE_HUB_CLR_INTR(nasid, swlevel);
368
369 intr_connect_level(cpu, swlevel);
370
371 register_bridge_irq(irq);
372
373 return irq;
374 }
375
376 asmlinkage void plat_irq_dispatch(void)
377 {
378 unsigned long pending = read_c0_cause() & read_c0_status();
379 extern unsigned int rt_timer_irq;
380
381 if (pending & CAUSEF_IP4)
382 do_IRQ(rt_timer_irq);
383 else if (pending & CAUSEF_IP2) /* PI_INT_PEND_0 or CC_PEND_{A|B} */
384 ip27_do_irq_mask0();
385 else if (pending & CAUSEF_IP3) /* PI_INT_PEND_1 */
386 ip27_do_irq_mask1();
387 else if (pending & CAUSEF_IP5)
388 ip27_prof_timer();
389 else if (pending & CAUSEF_IP6)
390 ip27_hub_error();
391 }
392
393 void __init arch_init_irq(void)
394 {
395 }
396
397 void install_ipi(void)
398 {
399 int slice = LOCAL_HUB_L(PI_CPU_NUM);
400 int cpu = smp_processor_id();
401 struct slice_data *si = cpu_data[cpu].data;
402 struct hub_data *hub = hub_data(cpu_to_node(cpu));
403 int resched, call;
404
405 resched = CPU_RESCHED_A_IRQ + slice;
406 __set_bit(resched, hub->irq_alloc_mask);
407 __set_bit(resched, si->irq_enable_mask);
408 LOCAL_HUB_CLR_INTR(resched);
409
410 call = CPU_CALL_A_IRQ + slice;
411 __set_bit(call, hub->irq_alloc_mask);
412 __set_bit(call, si->irq_enable_mask);
413 LOCAL_HUB_CLR_INTR(call);
414
415 if (slice == 0) {
416 LOCAL_HUB_S(PI_INT_MASK0_A, si->irq_enable_mask[0]);
417 LOCAL_HUB_S(PI_INT_MASK1_A, si->irq_enable_mask[1]);
418 } else {
419 LOCAL_HUB_S(PI_INT_MASK0_B, si->irq_enable_mask[0]);
420 LOCAL_HUB_S(PI_INT_MASK1_B, si->irq_enable_mask[1]);
421 }
422 }
This page took 0.039416 seconds and 5 git commands to generate.