Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* $Id: system.h,v 1.69 2002/02/09 19:49:31 davem Exp $ */ |
2 | #ifndef __SPARC64_SYSTEM_H | |
3 | #define __SPARC64_SYSTEM_H | |
4 | ||
5 | #include <linux/config.h> | |
6 | #include <asm/ptrace.h> | |
7 | #include <asm/processor.h> | |
8 | #include <asm/visasm.h> | |
9 | ||
10 | #ifndef __ASSEMBLY__ | |
11 | /* | |
12 | * Sparc (general) CPU types | |
13 | */ | |
14 | enum sparc_cpu { | |
15 | sun4 = 0x00, | |
16 | sun4c = 0x01, | |
17 | sun4m = 0x02, | |
18 | sun4d = 0x03, | |
19 | sun4e = 0x04, | |
20 | sun4u = 0x05, /* V8 ploos ploos */ | |
21 | sun_unknown = 0x06, | |
22 | ap1000 = 0x07, /* almost a sun4m */ | |
23 | }; | |
24 | ||
25 | #define sparc_cpu_model sun4u | |
26 | ||
27 | /* This cannot ever be a sun4c nor sun4 :) That's just history. */ | |
28 | #define ARCH_SUN4C_SUN4 0 | |
29 | #define ARCH_SUN4 0 | |
30 | ||
31 | #endif | |
32 | ||
33 | #define setipl(__new_ipl) \ | |
34 | __asm__ __volatile__("wrpr %0, %%pil" : : "r" (__new_ipl) : "memory") | |
35 | ||
36 | #define local_irq_disable() \ | |
37 | __asm__ __volatile__("wrpr 15, %%pil" : : : "memory") | |
38 | ||
39 | #define local_irq_enable() \ | |
40 | __asm__ __volatile__("wrpr 0, %%pil" : : : "memory") | |
41 | ||
42 | #define getipl() \ | |
43 | ({ unsigned long retval; __asm__ __volatile__("rdpr %%pil, %0" : "=r" (retval)); retval; }) | |
44 | ||
45 | #define swap_pil(__new_pil) \ | |
46 | ({ unsigned long retval; \ | |
47 | __asm__ __volatile__("rdpr %%pil, %0\n\t" \ | |
48 | "wrpr %1, %%pil" \ | |
49 | : "=&r" (retval) \ | |
50 | : "r" (__new_pil) \ | |
51 | : "memory"); \ | |
52 | retval; \ | |
53 | }) | |
54 | ||
55 | #define read_pil_and_cli() \ | |
56 | ({ unsigned long retval; \ | |
57 | __asm__ __volatile__("rdpr %%pil, %0\n\t" \ | |
58 | "wrpr 15, %%pil" \ | |
59 | : "=r" (retval) \ | |
60 | : : "memory"); \ | |
61 | retval; \ | |
62 | }) | |
63 | ||
64 | #define local_save_flags(flags) ((flags) = getipl()) | |
65 | #define local_irq_save(flags) ((flags) = read_pil_and_cli()) | |
66 | #define local_irq_restore(flags) setipl((flags)) | |
67 | ||
68 | /* On sparc64 IRQ flags are the PIL register. A value of zero | |
69 | * means all interrupt levels are enabled, any other value means | |
70 | * only IRQ levels greater than that value will be received. | |
71 | * Consequently this means that the lowest IRQ level is one. | |
72 | */ | |
73 | #define irqs_disabled() \ | |
74 | ({ unsigned long flags; \ | |
75 | local_save_flags(flags);\ | |
76 | (flags > 0); \ | |
77 | }) | |
78 | ||
79 | #define nop() __asm__ __volatile__ ("nop") | |
80 | ||
81 | #define membar(type) __asm__ __volatile__ ("membar " type : : : "memory") | |
82 | #define mb() \ | |
83 | membar("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad") | |
84 | #define rmb() membar("#LoadLoad") | |
85 | #define wmb() membar("#StoreStore") | |
86 | #define read_barrier_depends() do { } while(0) | |
87 | #define set_mb(__var, __value) \ | |
88 | do { __var = __value; membar("#StoreLoad | #StoreStore"); } while(0) | |
89 | #define set_wmb(__var, __value) \ | |
90 | do { __var = __value; membar("#StoreStore"); } while(0) | |
91 | ||
92 | #ifdef CONFIG_SMP | |
93 | #define smp_mb() mb() | |
94 | #define smp_rmb() rmb() | |
95 | #define smp_wmb() wmb() | |
96 | #define smp_read_barrier_depends() read_barrier_depends() | |
97 | #else | |
98 | #define smp_mb() __asm__ __volatile__("":::"memory") | |
99 | #define smp_rmb() __asm__ __volatile__("":::"memory") | |
100 | #define smp_wmb() __asm__ __volatile__("":::"memory") | |
101 | #define smp_read_barrier_depends() do { } while(0) | |
102 | #endif | |
103 | ||
104 | #define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory") | |
105 | ||
106 | #define flushw_all() __asm__ __volatile__("flushw") | |
107 | ||
108 | /* Performance counter register access. */ | |
109 | #define read_pcr(__p) __asm__ __volatile__("rd %%pcr, %0" : "=r" (__p)) | |
110 | #define write_pcr(__p) __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (__p)) | |
111 | #define read_pic(__p) __asm__ __volatile__("rd %%pic, %0" : "=r" (__p)) | |
112 | ||
113 | /* Blackbird errata workaround. See commentary in | |
114 | * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt() | |
115 | * for more information. | |
116 | */ | |
117 | #define reset_pic() \ | |
118 | __asm__ __volatile__("ba,pt %xcc, 99f\n\t" \ | |
119 | ".align 64\n" \ | |
120 | "99:wr %g0, 0x0, %pic\n\t" \ | |
121 | "rd %pic, %g0") | |
122 | ||
123 | #ifndef __ASSEMBLY__ | |
124 | ||
125 | extern void sun_do_break(void); | |
126 | extern int serial_console; | |
127 | extern int stop_a_enabled; | |
128 | ||
129 | static __inline__ int con_is_present(void) | |
130 | { | |
131 | return serial_console ? 0 : 1; | |
132 | } | |
133 | ||
134 | extern void synchronize_user_stack(void); | |
135 | ||
136 | extern void __flushw_user(void); | |
137 | #define flushw_user() __flushw_user() | |
138 | ||
139 | #define flush_user_windows flushw_user | |
140 | #define flush_register_windows flushw_all | |
141 | ||
4866cde0 NP |
142 | /* Don't hold the runqueue lock over context switch */ |
143 | #define __ARCH_WANT_UNLOCKED_CTXSW | |
144 | #define prepare_arch_switch(next) \ | |
145 | do { \ | |
1da177e4 LT |
146 | flushw_all(); \ |
147 | } while (0) | |
148 | ||
1da177e4 LT |
149 | /* See what happens when you design the chip correctly? |
150 | * | |
151 | * We tell gcc we clobber all non-fixed-usage registers except | |
152 | * for l0/l1. It will use one for 'next' and the other to hold | |
153 | * the output value of 'last'. 'next' is not referenced again | |
154 | * past the invocation of switch_to in the scheduler, so we need | |
155 | * not preserve it's value. Hairy, but it lets us remove 2 loads | |
156 | * and 2 stores in this critical code path. -DaveM | |
157 | */ | |
158 | #if __GNUC__ >= 3 | |
159 | #define EXTRA_CLOBBER ,"%l1" | |
160 | #else | |
161 | #define EXTRA_CLOBBER | |
162 | #endif | |
163 | #define switch_to(prev, next, last) \ | |
164 | do { if (test_thread_flag(TIF_PERFCTR)) { \ | |
165 | unsigned long __tmp; \ | |
166 | read_pcr(__tmp); \ | |
167 | current_thread_info()->pcr_reg = __tmp; \ | |
168 | read_pic(__tmp); \ | |
169 | current_thread_info()->kernel_cntd0 += (unsigned int)(__tmp);\ | |
170 | current_thread_info()->kernel_cntd1 += ((__tmp) >> 32); \ | |
171 | } \ | |
172 | flush_tlb_pending(); \ | |
173 | save_and_clear_fpu(); \ | |
174 | /* If you are tempted to conditionalize the following */ \ | |
175 | /* so that ASI is only written if it changes, think again. */ \ | |
176 | __asm__ __volatile__("wr %%g0, %0, %%asi" \ | |
177 | : : "r" (__thread_flag_byte_ptr(next->thread_info)[TI_FLAG_BYTE_CURRENT_DS]));\ | |
178 | __asm__ __volatile__( \ | |
179 | "mov %%g4, %%g7\n\t" \ | |
180 | "wrpr %%g0, 0x95, %%pstate\n\t" \ | |
181 | "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \ | |
182 | "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \ | |
183 | "rdpr %%wstate, %%o5\n\t" \ | |
184 | "stx %%o6, [%%g6 + %3]\n\t" \ | |
185 | "stb %%o5, [%%g6 + %2]\n\t" \ | |
186 | "rdpr %%cwp, %%o5\n\t" \ | |
187 | "stb %%o5, [%%g6 + %5]\n\t" \ | |
188 | "mov %1, %%g6\n\t" \ | |
189 | "ldub [%1 + %5], %%g1\n\t" \ | |
190 | "wrpr %%g1, %%cwp\n\t" \ | |
191 | "ldx [%%g6 + %3], %%o6\n\t" \ | |
192 | "ldub [%%g6 + %2], %%o5\n\t" \ | |
db7d9a4e | 193 | "ldub [%%g6 + %4], %%o7\n\t" \ |
1da177e4 LT |
194 | "mov %%g6, %%l2\n\t" \ |
195 | "wrpr %%o5, 0x0, %%wstate\n\t" \ | |
196 | "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \ | |
197 | "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \ | |
198 | "wrpr %%g0, 0x94, %%pstate\n\t" \ | |
199 | "mov %%l2, %%g6\n\t" \ | |
db7d9a4e | 200 | "ldx [%%g6 + %6], %%g4\n\t" \ |
1da177e4 | 201 | "wrpr %%g0, 0x96, %%pstate\n\t" \ |
db7d9a4e | 202 | "brz,pt %%o7, 1f\n\t" \ |
1da177e4 LT |
203 | " mov %%g7, %0\n\t" \ |
204 | "b,a ret_from_syscall\n\t" \ | |
205 | "1:\n\t" \ | |
206 | : "=&r" (last) \ | |
207 | : "0" (next->thread_info), \ | |
db7d9a4e DM |
208 | "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \ |
209 | "i" (TI_CWP), "i" (TI_TASK) \ | |
1da177e4 LT |
210 | : "cc", \ |
211 | "g1", "g2", "g3", "g7", \ | |
212 | "l2", "l3", "l4", "l5", "l6", "l7", \ | |
213 | "i0", "i1", "i2", "i3", "i4", "i5", \ | |
214 | "o0", "o1", "o2", "o3", "o4", "o5", "o7" EXTRA_CLOBBER);\ | |
215 | /* If you fuck with this, update ret_from_syscall code too. */ \ | |
216 | if (test_thread_flag(TIF_PERFCTR)) { \ | |
217 | write_pcr(current_thread_info()->pcr_reg); \ | |
218 | reset_pic(); \ | |
219 | } \ | |
220 | } while(0) | |
221 | ||
222 | static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) | |
223 | { | |
224 | unsigned long tmp1, tmp2; | |
225 | ||
226 | __asm__ __volatile__( | |
227 | " membar #StoreLoad | #LoadLoad\n" | |
228 | " mov %0, %1\n" | |
229 | "1: lduw [%4], %2\n" | |
230 | " cas [%4], %2, %0\n" | |
231 | " cmp %2, %0\n" | |
232 | " bne,a,pn %%icc, 1b\n" | |
233 | " mov %1, %0\n" | |
234 | " membar #StoreLoad | #StoreStore\n" | |
235 | : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2) | |
236 | : "0" (val), "r" (m) | |
237 | : "cc", "memory"); | |
238 | return val; | |
239 | } | |
240 | ||
241 | static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val) | |
242 | { | |
243 | unsigned long tmp1, tmp2; | |
244 | ||
245 | __asm__ __volatile__( | |
246 | " membar #StoreLoad | #LoadLoad\n" | |
247 | " mov %0, %1\n" | |
248 | "1: ldx [%4], %2\n" | |
249 | " casx [%4], %2, %0\n" | |
250 | " cmp %2, %0\n" | |
251 | " bne,a,pn %%xcc, 1b\n" | |
252 | " mov %1, %0\n" | |
253 | " membar #StoreLoad | #StoreStore\n" | |
254 | : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2) | |
255 | : "0" (val), "r" (m) | |
256 | : "cc", "memory"); | |
257 | return val; | |
258 | } | |
259 | ||
260 | #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | |
261 | #define tas(ptr) (xchg((ptr),1)) | |
262 | ||
263 | extern void __xchg_called_with_bad_pointer(void); | |
264 | ||
265 | static __inline__ unsigned long __xchg(unsigned long x, __volatile__ void * ptr, | |
266 | int size) | |
267 | { | |
268 | switch (size) { | |
269 | case 4: | |
270 | return xchg32(ptr, x); | |
271 | case 8: | |
272 | return xchg64(ptr, x); | |
273 | }; | |
274 | __xchg_called_with_bad_pointer(); | |
275 | return x; | |
276 | } | |
277 | ||
278 | extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn)); | |
279 | ||
280 | /* | |
281 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | |
282 | * store NEW in MEM. Return the initial value in MEM. Success is | |
283 | * indicated by comparing RETURN with OLD. | |
284 | */ | |
285 | ||
286 | #define __HAVE_ARCH_CMPXCHG 1 | |
287 | ||
288 | static __inline__ unsigned long | |
289 | __cmpxchg_u32(volatile int *m, int old, int new) | |
290 | { | |
291 | __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n" | |
292 | "cas [%2], %3, %0\n\t" | |
293 | "membar #StoreLoad | #StoreStore" | |
294 | : "=&r" (new) | |
295 | : "0" (new), "r" (m), "r" (old) | |
296 | : "memory"); | |
297 | ||
298 | return new; | |
299 | } | |
300 | ||
301 | static __inline__ unsigned long | |
302 | __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) | |
303 | { | |
304 | __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n" | |
305 | "casx [%2], %3, %0\n\t" | |
306 | "membar #StoreLoad | #StoreStore" | |
307 | : "=&r" (new) | |
308 | : "0" (new), "r" (m), "r" (old) | |
309 | : "memory"); | |
310 | ||
311 | return new; | |
312 | } | |
313 | ||
314 | /* This function doesn't exist, so you'll get a linker error | |
315 | if something tries to do an invalid cmpxchg(). */ | |
316 | extern void __cmpxchg_called_with_bad_pointer(void); | |
317 | ||
318 | static __inline__ unsigned long | |
319 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | |
320 | { | |
321 | switch (size) { | |
322 | case 4: | |
323 | return __cmpxchg_u32(ptr, old, new); | |
324 | case 8: | |
325 | return __cmpxchg_u64(ptr, old, new); | |
326 | } | |
327 | __cmpxchg_called_with_bad_pointer(); | |
328 | return old; | |
329 | } | |
330 | ||
331 | #define cmpxchg(ptr,o,n) \ | |
332 | ({ \ | |
333 | __typeof__(*(ptr)) _o_ = (o); \ | |
334 | __typeof__(*(ptr)) _n_ = (n); \ | |
335 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | |
336 | (unsigned long)_n_, sizeof(*(ptr))); \ | |
337 | }) | |
338 | ||
339 | #endif /* !(__ASSEMBLY__) */ | |
340 | ||
341 | #define arch_align_stack(x) (x) | |
342 | ||
343 | #endif /* !(__SPARC64_SYSTEM_H) */ |