Merge branch 'fix/misc' into for-linus
[deliverable/linux.git] / arch / blackfin / mach-common / interrupt.S
CommitLineData
1394f032 1/*
96f1050d 2 * Interrupt Entries
1394f032 3 *
96f1050d
RG
4 * Copyright 2005-2009 Analog Devices Inc.
5 * D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca>
6 * Kenneth Albanowski <kjahds@kjahds.com>
1394f032 7 *
96f1050d 8 * Licensed under the GPL-2 or later.
1394f032
BW
9 */
10
11#include <asm/blackfin.h>
639f6571 12#include <mach/irq.h>
1394f032
BW
13#include <linux/linkage.h>
14#include <asm/entry.h>
15#include <asm/asm-offsets.h>
669b792c 16#include <asm/trace.h>
13fe24f3
RG
17#include <asm/traps.h>
18#include <asm/thread_info.h>
1394f032 19
639f6571 20#include <asm/context.S>
1394f032 21
13fe24f3
RG
22.extern _ret_from_exception
23
1394f032
BW
24#ifdef CONFIG_I_ENTRY_L1
25.section .l1.text
26#else
27.text
28#endif
29
30.align 4 /* just in case */
31
1394f032
BW
32/* Common interrupt entry code. First we do CLI, then push
33 * RETI, to keep interrupts disabled, but to allow this state to be changed
34 * by local_bh_enable.
35 * R0 contains the interrupt number, while R1 may contain the value of IPEND,
36 * or garbage if IPEND won't be needed by the ISR. */
37__common_int_entry:
38 [--sp] = fp;
39 [--sp] = usp;
40
41 [--sp] = i0;
42 [--sp] = i1;
43 [--sp] = i2;
44 [--sp] = i3;
45
46 [--sp] = m0;
47 [--sp] = m1;
48 [--sp] = m2;
49 [--sp] = m3;
50
51 [--sp] = l0;
52 [--sp] = l1;
53 [--sp] = l2;
54 [--sp] = l3;
55
56 [--sp] = b0;
57 [--sp] = b1;
58 [--sp] = b2;
59 [--sp] = b3;
60 [--sp] = a0.x;
61 [--sp] = a0.w;
62 [--sp] = a1.x;
63 [--sp] = a1.w;
64
65 [--sp] = LC0;
66 [--sp] = LC1;
67 [--sp] = LT0;
68 [--sp] = LT1;
69 [--sp] = LB0;
70 [--sp] = LB1;
71
72 [--sp] = ASTAT;
73
74 [--sp] = r0; /* Skip reserved */
75 [--sp] = RETS;
76 r2 = RETI;
77 [--sp] = r2;
78 [--sp] = RETX;
79 [--sp] = RETN;
80 [--sp] = RETE;
81 [--sp] = SEQSTAT;
82 [--sp] = r1; /* IPEND - R1 may or may not be set up before jumping here. */
83
84 /* Switch to other method of keeping interrupts disabled. */
85#ifdef CONFIG_DEBUG_HWERR
86 r1 = 0x3f;
87 sti r1;
88#else
89 cli r1;
441504df
YL
90#endif
91#ifdef CONFIG_TRACE_IRQFLAGS
92 [--sp] = r0;
93 sp += -12;
94 call _trace_hardirqs_off;
95 sp += 12;
96 r0 = [sp++];
1394f032
BW
97#endif
98 [--sp] = RETI; /* orig_pc */
99 /* Clear all L registers. */
100 r1 = 0 (x);
101 l0 = r1;
102 l1 = r1;
103 l2 = r1;
104 l3 = r1;
105#ifdef CONFIG_FRAME_POINTER
106 fp = 0;
107#endif
108
dedfd5d7
RG
109 ANOMALY_283_315_WORKAROUND(p5, r7)
110
1394f032
BW
111 r1 = sp;
112 SP += -12;
6a01f230
YL
113#ifdef CONFIG_IPIPE
114 call ___ipipe_grab_irq
115 SP += 12;
116 cc = r0 == 0;
117 if cc jump .Lcommon_restore_context;
118#else /* CONFIG_IPIPE */
d86bfb16 119 pseudo_long_call _do_irq, p2;
1394f032 120 SP += 12;
6a01f230 121#endif /* CONFIG_IPIPE */
d86bfb16 122 pseudo_long_call _return_from_int, p2;
1394f032
BW
123.Lcommon_restore_context:
124 RESTORE_CONTEXT
125 rti;
126
127/* interrupt routine for ivhw - 5 */
128ENTRY(_evt_ivhw)
b9a3899d
RG
129 /* In case a single action kicks off multiple memory transactions, (like
130 * a cache line fetch, - this can cause multiple hardware errors, let's
131 * catch them all. First - make sure all the actions are complete, and
132 * the core sees the hardware errors.
133 */
134 SSYNC;
135 SSYNC;
136
13fe24f3 137 SAVE_ALL_SYS
1394f032
BW
138#ifdef CONFIG_FRAME_POINTER
139 fp = 0;
140#endif
13fe24f3 141
dedfd5d7 142 ANOMALY_283_315_WORKAROUND(p5, r7)
669b792c 143
b9a3899d
RG
144 /* Handle all stacked hardware errors
145 * To make sure we don't hang forever, only do it 10 times
146 */
147 R0 = 0;
148 R2 = 10;
1491:
150 P0.L = LO(ILAT);
151 P0.H = HI(ILAT);
152 R1 = [P0];
153 CC = BITTST(R1, EVT_IVHW_P);
154 IF ! CC JUMP 2f;
155 /* OK a hardware error is pending - clear it */
156 R1 = EVT_IVHW_P;
157 [P0] = R1;
158 R0 += 1;
159 CC = R1 == R2;
160 if CC JUMP 2f;
161 JUMP 1b;
1622:
f994607a
GY
163 # We are going to dump something out, so make sure we print IPEND properly
164 p2.l = lo(IPEND);
165 p2.h = hi(IPEND);
166 r0 = [p2];
167 [sp + PT_IPEND] = r0;
168
13fe24f3
RG
169 /* set the EXCAUSE to HWERR for trap_c */
170 r0 = [sp + PT_SEQSTAT];
171 R1.L = LO(VEC_HWERR);
172 R1.H = HI(VEC_HWERR);
173 R0 = R0 | R1;
174 [sp + PT_SEQSTAT] = R0;
175
176 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
1394f032 177 SP += -12;
d86bfb16 178 pseudo_long_call _trap_c, p5;
1394f032 179 SP += 12;
13fe24f3 180
1d5ff7e2
RG
181#ifdef EBIU_ERRMST
182 /* make sure EBIU_ERRMST is clear */
183 p0.l = LO(EBIU_ERRMST);
184 p0.h = HI(EBIU_ERRMST);
185 r0.l = (CORE_ERROR | CORE_MERROR);
186 w[p0] = r0.l;
187#endif
188
d86bfb16 189 pseudo_long_call _ret_from_exception, p2;
1d5ff7e2 190
13fe24f3
RG
191.Lcommon_restore_all_sys:
192 RESTORE_ALL_SYS
1394f032 193 rti;
13fe24f3
RG
194ENDPROC(_evt_ivhw)
195
f0b5d12f 196/* Interrupt routine for evt2 (NMI).
f0b5d12f 197 * For inner circle type details, please see:
e48df47c 198 * http://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:nmi
f0b5d12f
MF
199 */
200ENTRY(_evt_nmi)
60ffdb36 201#ifndef CONFIG_NMI_WATCHDOG
f0b5d12f 202.weak _evt_nmi
60ffdb36
GY
203#else
204 /* Not take account of CPLBs, this handler will not return */
205 SAVE_ALL_SYS
206 r0 = sp;
207 r1 = retn;
208 [sp + PT_PC] = r1;
209 trace_buffer_save(p4,r5);
210
211 ANOMALY_283_315_WORKAROUND(p4, r5)
212
213 SP += -12;
214 call _do_nmi;
215 SP += 12;
2161:
217 jump 1b;
218#endif
1394f032 219 rtn;
f0b5d12f 220ENDPROC(_evt_nmi)
1394f032
BW
221
222/* interrupt routine for core timer - 6 */
223ENTRY(_evt_timer)
224 TIMER_INTERRUPT_ENTRY(EVT_IVTMR_P)
225
226/* interrupt routine for evt7 - 7 */
227ENTRY(_evt_evt7)
228 INTERRUPT_ENTRY(EVT_IVG7_P)
229ENTRY(_evt_evt8)
230 INTERRUPT_ENTRY(EVT_IVG8_P)
231ENTRY(_evt_evt9)
232 INTERRUPT_ENTRY(EVT_IVG9_P)
233ENTRY(_evt_evt10)
234 INTERRUPT_ENTRY(EVT_IVG10_P)
235ENTRY(_evt_evt11)
236 INTERRUPT_ENTRY(EVT_IVG11_P)
237ENTRY(_evt_evt12)
238 INTERRUPT_ENTRY(EVT_IVG12_P)
239ENTRY(_evt_evt13)
240 INTERRUPT_ENTRY(EVT_IVG13_P)
241
242
243 /* interrupt routine for system_call - 15 */
244ENTRY(_evt_system_call)
245 SAVE_CONTEXT_SYSCALL
246#ifdef CONFIG_FRAME_POINTER
247 fp = 0;
248#endif
d86bfb16 249 pseudo_long_call _system_call, p2;
1394f032 250 jump .Lcommon_restore_context;
51be24c3 251ENDPROC(_evt_system_call)
6a01f230
YL
252
253#ifdef CONFIG_IPIPE
7a7967dc
PG
254/*
255 * __ipipe_call_irqtail: lowers the current priority level to EVT15
256 * before running a user-defined routine, then raises the priority
257 * level to EVT14 to prepare the caller for a normal interrupt
258 * return through RTI.
259 *
260 * We currently use this facility in two occasions:
261 *
262 * - to branch to __ipipe_irq_tail_hook as requested by a high
263 * priority domain after the pipeline delivered an interrupt,
264 * e.g. such as Xenomai, in order to start its rescheduling
265 * procedure, since we may not switch tasks when IRQ levels are
266 * nested on the Blackfin, so we have to fake an interrupt return
267 * so that we may reschedule immediately.
268 *
269 * - to branch to sync_root_irqs, in order to play any interrupt
270 * pending for the root domain (i.e. the Linux kernel). This lowers
271 * the core priority level enough so that Linux IRQ handlers may
272 * never delay interrupts handled by high priority domains; we defer
273 * those handlers until this point instead. This is a substitute
274 * to using a threaded interrupt model for the Linux kernel.
275 *
276 * r0: address of user-defined routine
277 * context: caller must have preempted EVT15, hw interrupts must be off.
278 */
6a01f230 279ENTRY(___ipipe_call_irqtail)
9bd50df6 280 p0 = r0;
6a01f230
YL
281 r0.l = 1f;
282 r0.h = 1f;
283 reti = r0;
284 rti;
2851:
286 [--sp] = rets;
287 [--sp] = ( r7:4, p5:3 );
6a01f230
YL
288 sp += -12;
289 call (p0);
290 sp += 12;
291 ( r7:4, p5:3 ) = [sp++];
292 rets = [sp++];
293
70f47202
PG
294#ifdef CONFIG_DEBUG_HWERR
295 /* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */
296 r0 = (EVT_IVG14 | EVT_IVHW | \
297 EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
298#else
299 /* Only enable irq14 interrupt, until we transition to _evt_evt14 */
300 r0 = (EVT_IVG14 | \
301 EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
302#endif
6a01f230 303 sti r0;
fc9afb99 304 raise 14; /* Branches to _evt_evt14 */
6a01f230
YL
3052:
306 jump 2b; /* Likely paranoid. */
6a01f230 307ENDPROC(___ipipe_call_irqtail)
9bd50df6 308
6a01f230 309#endif /* CONFIG_IPIPE */
This page took 0.295895 seconds and 5 git commands to generate.