Commit | Line | Data |
---|---|---|
e97c5b60 JQ |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle | |
7 | * Copyright (C) 1996 by Paul M. Antoine | |
8 | * Copyright (C) 1999 Silicon Graphics | |
9 | * Copyright (C) 2000 MIPS Technologies, Inc. | |
10 | */ | |
11 | #include <asm/irqflags.h> | |
12 | #include <asm/hazards.h> | |
13 | #include <linux/compiler.h> | |
14 | #include <linux/preempt.h> | |
15 | #include <linux/export.h> | |
16 | ||
17 | #if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) | |
18 | ||
19 | /* | |
20 | * For cli() we have to insert nops to make sure that the new value | |
21 | * has actually arrived in the status register before the end of this | |
22 | * macro. | |
23 | * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs | |
24 | * no nops at all. | |
25 | */ | |
26 | /* | |
27 | * For TX49, operating only IE bit is not enough. | |
28 | * | |
29 | * If mfc0 $12 follows store and the mfc0 is last instruction of a | |
30 | * page and fetching the next instruction causes TLB miss, the result | |
31 | * of the mfc0 might wrongly contain EXL bit. | |
32 | * | |
33 | * ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008 | |
34 | * | |
35 | * Workaround: mask EXL bit of the result or place a nop before mfc0. | |
36 | */ | |
37 | __asm__( | |
38 | " .macro arch_local_irq_disable\n" | |
39 | " .set push \n" | |
40 | " .set noat \n" | |
41 | #ifdef CONFIG_MIPS_MT_SMTC | |
42 | " mfc0 $1, $2, 1 \n" | |
43 | " ori $1, 0x400 \n" | |
44 | " .set noreorder \n" | |
45 | " mtc0 $1, $2, 1 \n" | |
46 | #elif defined(CONFIG_CPU_MIPSR2) | |
47 | /* see irqflags.h for inline function */ | |
48 | #else | |
49 | " mfc0 $1,$12 \n" | |
50 | " ori $1,0x1f \n" | |
51 | " xori $1,0x1f \n" | |
52 | " .set noreorder \n" | |
53 | " mtc0 $1,$12 \n" | |
54 | #endif | |
55 | " irq_disable_hazard \n" | |
56 | " .set pop \n" | |
57 | " .endm \n"); | |
58 | ||
f93a1a00 | 59 | notrace void arch_local_irq_disable(void) |
e97c5b60 JQ |
60 | { |
61 | preempt_disable(); | |
62 | __asm__ __volatile__( | |
63 | "arch_local_irq_disable" | |
64 | : /* no outputs */ | |
65 | : /* no inputs */ | |
66 | : "memory"); | |
67 | preempt_enable(); | |
68 | } | |
69 | EXPORT_SYMBOL(arch_local_irq_disable); | |
70 | ||
71 | ||
72 | __asm__( | |
73 | " .macro arch_local_irq_save result \n" | |
74 | " .set push \n" | |
75 | " .set reorder \n" | |
76 | " .set noat \n" | |
77 | #ifdef CONFIG_MIPS_MT_SMTC | |
78 | " mfc0 \\result, $2, 1 \n" | |
79 | " ori $1, \\result, 0x400 \n" | |
80 | " .set noreorder \n" | |
81 | " mtc0 $1, $2, 1 \n" | |
82 | " andi \\result, \\result, 0x400 \n" | |
83 | #elif defined(CONFIG_CPU_MIPSR2) | |
84 | /* see irqflags.h for inline function */ | |
85 | #else | |
86 | " mfc0 \\result, $12 \n" | |
87 | " ori $1, \\result, 0x1f \n" | |
88 | " xori $1, 0x1f \n" | |
89 | " .set noreorder \n" | |
90 | " mtc0 $1, $12 \n" | |
91 | #endif | |
92 | " irq_disable_hazard \n" | |
93 | " .set pop \n" | |
94 | " .endm \n"); | |
95 | ||
f93a1a00 | 96 | notrace unsigned long arch_local_irq_save(void) |
e97c5b60 JQ |
97 | { |
98 | unsigned long flags; | |
99 | preempt_disable(); | |
100 | asm volatile("arch_local_irq_save\t%0" | |
101 | : "=r" (flags) | |
102 | : /* no inputs */ | |
103 | : "memory"); | |
104 | preempt_enable(); | |
105 | return flags; | |
106 | } | |
107 | EXPORT_SYMBOL(arch_local_irq_save); | |
108 | ||
109 | ||
110 | __asm__( | |
111 | " .macro arch_local_irq_restore flags \n" | |
112 | " .set push \n" | |
113 | " .set noreorder \n" | |
114 | " .set noat \n" | |
115 | #ifdef CONFIG_MIPS_MT_SMTC | |
116 | "mfc0 $1, $2, 1 \n" | |
117 | "andi \\flags, 0x400 \n" | |
118 | "ori $1, 0x400 \n" | |
119 | "xori $1, 0x400 \n" | |
120 | "or \\flags, $1 \n" | |
121 | "mtc0 \\flags, $2, 1 \n" | |
122 | #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) | |
123 | /* see irqflags.h for inline function */ | |
124 | #elif defined(CONFIG_CPU_MIPSR2) | |
125 | /* see irqflags.h for inline function */ | |
126 | #else | |
127 | " mfc0 $1, $12 \n" | |
128 | " andi \\flags, 1 \n" | |
129 | " ori $1, 0x1f \n" | |
130 | " xori $1, 0x1f \n" | |
131 | " or \\flags, $1 \n" | |
132 | " mtc0 \\flags, $12 \n" | |
133 | #endif | |
134 | " irq_disable_hazard \n" | |
135 | " .set pop \n" | |
136 | " .endm \n"); | |
137 | ||
f93a1a00 | 138 | notrace void arch_local_irq_restore(unsigned long flags) |
e97c5b60 JQ |
139 | { |
140 | unsigned long __tmp1; | |
141 | ||
142 | #ifdef CONFIG_MIPS_MT_SMTC | |
143 | /* | |
144 | * SMTC kernel needs to do a software replay of queued | |
145 | * IPIs, at the cost of branch and call overhead on each | |
146 | * local_irq_restore() | |
147 | */ | |
148 | if (unlikely(!(flags & 0x0400))) | |
149 | smtc_ipi_replay(); | |
150 | #endif | |
151 | preempt_disable(); | |
152 | __asm__ __volatile__( | |
153 | "arch_local_irq_restore\t%0" | |
154 | : "=r" (__tmp1) | |
155 | : "0" (flags) | |
156 | : "memory"); | |
157 | preempt_enable(); | |
158 | } | |
159 | EXPORT_SYMBOL(arch_local_irq_restore); | |
160 | ||
161 | ||
f93a1a00 | 162 | notrace void __arch_local_irq_restore(unsigned long flags) |
e97c5b60 JQ |
163 | { |
164 | unsigned long __tmp1; | |
165 | ||
166 | preempt_disable(); | |
167 | __asm__ __volatile__( | |
168 | "arch_local_irq_restore\t%0" | |
169 | : "=r" (__tmp1) | |
170 | : "0" (flags) | |
171 | : "memory"); | |
172 | preempt_enable(); | |
173 | } | |
174 | EXPORT_SYMBOL(__arch_local_irq_restore); | |
175 | ||
176 | #endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */ |