Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle | |
7 | * Copyright (C) 1996 by Paul M. Antoine | |
8 | * Copyright (C) 1999 Silicon Graphics | |
9 | * Copyright (C) 2000 MIPS Technologies, Inc. | |
10 | */ | |
192ef366 RB |
11 | #ifndef _ASM_IRQFLAGS_H |
12 | #define _ASM_IRQFLAGS_H | |
13 | ||
14 | #ifndef __ASSEMBLY__ | |
1da177e4 | 15 | |
8a1e97ee | 16 | #include <linux/compiler.h> |
1da177e4 LT |
17 | #include <asm/hazards.h> |
18 | ||
e97c5b60 | 19 | #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) |
1da177e4 | 20 | |
49a89efb | 21 | __asm__( |
df9ee292 | 22 | " .macro arch_local_irq_disable\n" |
ff88f8a3 RB |
23 | " .set push \n" |
24 | " .set noat \n" | |
ff88f8a3 | 25 | " di \n" |
ff88f8a3 RB |
26 | " irq_disable_hazard \n" |
27 | " .set pop \n" | |
28 | " .endm \n"); | |
1da177e4 | 29 | |
df9ee292 | 30 | static inline void arch_local_irq_disable(void) |
1da177e4 LT |
31 | { |
32 | __asm__ __volatile__( | |
df9ee292 | 33 | "arch_local_irq_disable" |
1da177e4 LT |
34 | : /* no outputs */ |
35 | : /* no inputs */ | |
36 | : "memory"); | |
37 | } | |
38 | ||
1da177e4 | 39 | |
49a89efb | 40 | __asm__( |
df9ee292 | 41 | " .macro arch_local_irq_save result \n" |
ff88f8a3 RB |
42 | " .set push \n" |
43 | " .set reorder \n" | |
44 | " .set noat \n" | |
ff88f8a3 | 45 | " di \\result \n" |
15265251 | 46 | " andi \\result, 1 \n" |
ff88f8a3 RB |
47 | " irq_disable_hazard \n" |
48 | " .set pop \n" | |
49 | " .endm \n"); | |
1da177e4 | 50 | |
df9ee292 DH |
51 | static inline unsigned long arch_local_irq_save(void) |
52 | { | |
53 | unsigned long flags; | |
54 | asm volatile("arch_local_irq_save\t%0" | |
55 | : "=r" (flags) | |
56 | : /* no inputs */ | |
57 | : "memory"); | |
58 | return flags; | |
59 | } | |
1da177e4 | 60 | |
e97c5b60 | 61 | |
49a89efb | 62 | __asm__( |
df9ee292 | 63 | " .macro arch_local_irq_restore flags \n" |
2e66fe24 | 64 | " .set push \n" |
ff88f8a3 RB |
65 | " .set noreorder \n" |
66 | " .set noat \n" | |
e97c5b60 | 67 | #if defined(CONFIG_IRQ_CPU) |
ff88f8a3 | 68 | /* |
25985edc | 69 | * Slow, but doesn't suffer from a relatively unlikely race |
ff88f8a3 RB |
70 | * condition we're having since days 1. |
71 | */ | |
72 | " beqz \\flags, 1f \n" | |
e97c5b60 | 73 | " di \n" |
ff88f8a3 RB |
74 | " ei \n" |
75 | "1: \n" | |
e97c5b60 | 76 | #else |
ff88f8a3 RB |
77 | /* |
78 | * Fast, dangerous. Life is fun, life is good. | |
79 | */ | |
80 | " mfc0 $1, $12 \n" | |
81 | " ins $1, \\flags, 0, 1 \n" | |
82 | " mtc0 $1, $12 \n" | |
ff88f8a3 RB |
83 | #endif |
84 | " irq_disable_hazard \n" | |
2e66fe24 | 85 | " .set pop \n" |
ff88f8a3 | 86 | " .endm \n"); |
1da177e4 | 87 | |
df9ee292 | 88 | static inline void arch_local_irq_restore(unsigned long flags) |
8a1e97ee RB |
89 | { |
90 | unsigned long __tmp1; | |
91 | ||
8a1e97ee | 92 | __asm__ __volatile__( |
df9ee292 | 93 | "arch_local_irq_restore\t%0" |
8a1e97ee RB |
94 | : "=r" (__tmp1) |
95 | : "0" (flags) | |
96 | : "memory"); | |
97 | } | |
1da177e4 | 98 | |
df9ee292 | 99 | static inline void __arch_local_irq_restore(unsigned long flags) |
8531a35e KK |
100 | { |
101 | unsigned long __tmp1; | |
102 | ||
103 | __asm__ __volatile__( | |
df9ee292 | 104 | "arch_local_irq_restore\t%0" |
8531a35e KK |
105 | : "=r" (__tmp1) |
106 | : "0" (flags) | |
107 | : "memory"); | |
108 | } | |
e97c5b60 JQ |
109 | #else |
110 | /* Functions that require preempt_{dis,en}able() are in mips-atomic.c */ | |
111 | void arch_local_irq_disable(void); | |
112 | unsigned long arch_local_irq_save(void); | |
113 | void arch_local_irq_restore(unsigned long flags); | |
114 | void __arch_local_irq_restore(unsigned long flags); | |
115 | #endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ | |
116 | ||
117 | ||
118 | __asm__( | |
119 | " .macro arch_local_irq_enable \n" | |
120 | " .set push \n" | |
121 | " .set reorder \n" | |
122 | " .set noat \n" | |
123 | #ifdef CONFIG_MIPS_MT_SMTC | |
124 | " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n" | |
125 | " ori $1, 0x400 \n" | |
126 | " xori $1, 0x400 \n" | |
127 | " mtc0 $1, $2, 1 \n" | |
128 | #elif defined(CONFIG_CPU_MIPSR2) | |
129 | " ei \n" | |
130 | #else | |
131 | " mfc0 $1,$12 \n" | |
132 | " ori $1,0x1f \n" | |
133 | " xori $1,0x1e \n" | |
134 | " mtc0 $1,$12 \n" | |
135 | #endif | |
136 | " irq_enable_hazard \n" | |
137 | " .set pop \n" | |
138 | " .endm"); | |
139 | ||
140 | extern void smtc_ipi_replay(void); | |
141 | ||
142 | static inline void arch_local_irq_enable(void) | |
143 | { | |
144 | #ifdef CONFIG_MIPS_MT_SMTC | |
145 | /* | |
146 | * SMTC kernel needs to do a software replay of queued | |
147 | * IPIs, at the cost of call overhead on each local_irq_enable() | |
148 | */ | |
149 | smtc_ipi_replay(); | |
150 | #endif | |
151 | __asm__ __volatile__( | |
152 | "arch_local_irq_enable" | |
153 | : /* no outputs */ | |
154 | : /* no inputs */ | |
155 | : "memory"); | |
156 | } | |
157 | ||
158 | ||
159 | __asm__( | |
160 | " .macro arch_local_save_flags flags \n" | |
161 | " .set push \n" | |
162 | " .set reorder \n" | |
163 | #ifdef CONFIG_MIPS_MT_SMTC | |
164 | " mfc0 \\flags, $2, 1 \n" | |
165 | #else | |
166 | " mfc0 \\flags, $12 \n" | |
167 | #endif | |
168 | " .set pop \n" | |
169 | " .endm \n"); | |
170 | ||
171 | static inline unsigned long arch_local_save_flags(void) | |
172 | { | |
173 | unsigned long flags; | |
174 | asm volatile("arch_local_save_flags %0" : "=r" (flags)); | |
175 | return flags; | |
176 | } | |
177 | ||
8531a35e | 178 | |
df9ee292 | 179 | static inline int arch_irqs_disabled_flags(unsigned long flags) |
41c594ab RB |
180 | { |
181 | #ifdef CONFIG_MIPS_MT_SMTC | |
182 | /* | |
183 | * SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU | |
184 | */ | |
192ef366 | 185 | return flags & 0x400; |
41c594ab | 186 | #else |
41c594ab RB |
187 | return !(flags & 1); |
188 | #endif | |
189 | } | |
1da177e4 | 190 | |
e97c5b60 | 191 | #endif /* #ifndef __ASSEMBLY__ */ |
192ef366 RB |
192 | |
193 | /* | |
194 | * Do the CPU's IRQ-state tracing from assembly code. | |
195 | */ | |
196 | #ifdef CONFIG_TRACE_IRQFLAGS | |
eae6c0da AN |
197 | /* Reload some registers clobbered by trace_hardirqs_on */ |
198 | #ifdef CONFIG_64BIT | |
199 | # define TRACE_IRQS_RELOAD_REGS \ | |
200 | LONG_L $11, PT_R11(sp); \ | |
201 | LONG_L $10, PT_R10(sp); \ | |
202 | LONG_L $9, PT_R9(sp); \ | |
203 | LONG_L $8, PT_R8(sp); \ | |
204 | LONG_L $7, PT_R7(sp); \ | |
205 | LONG_L $6, PT_R6(sp); \ | |
206 | LONG_L $5, PT_R5(sp); \ | |
207 | LONG_L $4, PT_R4(sp); \ | |
208 | LONG_L $2, PT_R2(sp) | |
209 | #else | |
210 | # define TRACE_IRQS_RELOAD_REGS \ | |
211 | LONG_L $7, PT_R7(sp); \ | |
212 | LONG_L $6, PT_R6(sp); \ | |
213 | LONG_L $5, PT_R5(sp); \ | |
214 | LONG_L $4, PT_R4(sp); \ | |
215 | LONG_L $2, PT_R2(sp) | |
216 | #endif | |
192ef366 | 217 | # define TRACE_IRQS_ON \ |
eae6c0da | 218 | CLI; /* make sure trace_hardirqs_on() is called in kernel level */ \ |
192ef366 | 219 | jal trace_hardirqs_on |
eae6c0da AN |
220 | # define TRACE_IRQS_ON_RELOAD \ |
221 | TRACE_IRQS_ON; \ | |
222 | TRACE_IRQS_RELOAD_REGS | |
192ef366 RB |
223 | # define TRACE_IRQS_OFF \ |
224 | jal trace_hardirqs_off | |
225 | #else | |
226 | # define TRACE_IRQS_ON | |
eae6c0da | 227 | # define TRACE_IRQS_ON_RELOAD |
192ef366 RB |
228 | # define TRACE_IRQS_OFF |
229 | #endif | |
230 | ||
231 | #endif /* _ASM_IRQFLAGS_H */ |