Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * include/asm-s390/system.h | |
3 | * | |
4 | * S390 version | |
5 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | |
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | |
7 | * | |
8 | * Derived from "include/asm-i386/system.h" | |
9 | */ | |
10 | ||
11 | #ifndef __ASM_SYSTEM_H | |
12 | #define __ASM_SYSTEM_H | |
13 | ||
1da177e4 LT |
14 | #include <linux/kernel.h> |
15 | #include <asm/types.h> | |
16 | #include <asm/ptrace.h> | |
17 | #include <asm/setup.h> | |
77fa2245 | 18 | #include <asm/processor.h> |
1da177e4 LT |
19 | |
20 | #ifdef __KERNEL__ | |
21 | ||
22 | struct task_struct; | |
23 | ||
24 | extern struct task_struct *__switch_to(void *, void *); | |
25 | ||
1da177e4 LT |
26 | static inline void save_fp_regs(s390_fp_regs *fpregs) |
27 | { | |
94c12cc7 MS |
28 | asm volatile( |
29 | " std 0,8(%1)\n" | |
30 | " std 2,24(%1)\n" | |
31 | " std 4,40(%1)\n" | |
32 | " std 6,56(%1)" | |
33 | : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory"); | |
1da177e4 LT |
34 | if (!MACHINE_HAS_IEEE) |
35 | return; | |
36 | asm volatile( | |
94c12cc7 MS |
37 | " stfpc 0(%1)\n" |
38 | " std 1,16(%1)\n" | |
39 | " std 3,32(%1)\n" | |
40 | " std 5,48(%1)\n" | |
41 | " std 7,64(%1)\n" | |
42 | " std 8,72(%1)\n" | |
43 | " std 9,80(%1)\n" | |
44 | " std 10,88(%1)\n" | |
45 | " std 11,96(%1)\n" | |
46 | " std 12,104(%1)\n" | |
47 | " std 13,112(%1)\n" | |
48 | " std 14,120(%1)\n" | |
49 | " std 15,128(%1)\n" | |
50 | : "=m" (*fpregs) : "a" (fpregs), "m" (*fpregs) : "memory"); | |
1da177e4 LT |
51 | } |
52 | ||
53 | static inline void restore_fp_regs(s390_fp_regs *fpregs) | |
54 | { | |
94c12cc7 MS |
55 | asm volatile( |
56 | " ld 0,8(%0)\n" | |
57 | " ld 2,24(%0)\n" | |
58 | " ld 4,40(%0)\n" | |
59 | " ld 6,56(%0)" | |
60 | : : "a" (fpregs), "m" (*fpregs)); | |
1da177e4 LT |
61 | if (!MACHINE_HAS_IEEE) |
62 | return; | |
63 | asm volatile( | |
94c12cc7 MS |
64 | " lfpc 0(%0)\n" |
65 | " ld 1,16(%0)\n" | |
66 | " ld 3,32(%0)\n" | |
67 | " ld 5,48(%0)\n" | |
68 | " ld 7,64(%0)\n" | |
69 | " ld 8,72(%0)\n" | |
70 | " ld 9,80(%0)\n" | |
71 | " ld 10,88(%0)\n" | |
72 | " ld 11,96(%0)\n" | |
73 | " ld 12,104(%0)\n" | |
74 | " ld 13,112(%0)\n" | |
75 | " ld 14,120(%0)\n" | |
76 | " ld 15,128(%0)\n" | |
77 | : : "a" (fpregs), "m" (*fpregs)); | |
1da177e4 LT |
78 | } |
79 | ||
80 | static inline void save_access_regs(unsigned int *acrs) | |
81 | { | |
94c12cc7 | 82 | asm volatile("stam 0,15,0(%0)" : : "a" (acrs) : "memory"); |
1da177e4 LT |
83 | } |
84 | ||
85 | static inline void restore_access_regs(unsigned int *acrs) | |
86 | { | |
94c12cc7 | 87 | asm volatile("lam 0,15,0(%0)" : : "a" (acrs)); |
1da177e4 LT |
88 | } |
89 | ||
90 | #define switch_to(prev,next,last) do { \ | |
91 | if (prev == next) \ | |
92 | break; \ | |
93 | save_fp_regs(&prev->thread.fp_regs); \ | |
94 | restore_fp_regs(&next->thread.fp_regs); \ | |
95 | save_access_regs(&prev->thread.acrs[0]); \ | |
96 | restore_access_regs(&next->thread.acrs[0]); \ | |
97 | prev = __switch_to(prev,next); \ | |
98 | } while (0) | |
99 | ||
4dc7a0bb IM |
100 | /* |
101 | * On SMP systems, when the scheduler does migration-cost autodetection, | |
102 | * it needs a way to flush as much of the CPU's caches as possible. | |
103 | * | |
104 | * TODO: fill this in! | |
105 | */ | |
106 | static inline void sched_cacheflush(void) | |
107 | { | |
108 | } | |
109 | ||
1da177e4 | 110 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
1f1c12af MS |
111 | extern void account_vtime(struct task_struct *); |
112 | extern void account_tick_vtime(struct task_struct *); | |
1da177e4 | 113 | extern void account_system_vtime(struct task_struct *); |
5c833890 JB |
114 | #else |
115 | #define account_vtime(x) do { /* empty */ } while (0) | |
4866cde0 | 116 | #endif |
1da177e4 | 117 | |
29b08d2b HC |
118 | #ifdef CONFIG_PFAULT |
119 | extern void pfault_irq_init(void); | |
120 | extern int pfault_init(void); | |
121 | extern void pfault_fini(void); | |
122 | #else /* CONFIG_PFAULT */ | |
123 | #define pfault_irq_init() do { } while (0) | |
124 | #define pfault_init() ({-1;}) | |
125 | #define pfault_fini() do { } while (0) | |
126 | #endif /* CONFIG_PFAULT */ | |
127 | ||
5ee24d95 | 128 | #define finish_arch_switch(prev) do { \ |
1da177e4 | 129 | set_fs(current->thread.mm_segment); \ |
1f1c12af | 130 | account_vtime(prev); \ |
1da177e4 LT |
131 | } while (0) |
132 | ||
94c12cc7 | 133 | #define nop() asm volatile("nop") |
1da177e4 | 134 | |
5a651c93 HC |
135 | #define xchg(ptr,x) \ |
136 | ({ \ | |
137 | __typeof__(*(ptr)) __ret; \ | |
138 | __ret = (__typeof__(*(ptr))) \ | |
139 | __xchg((unsigned long)(x), (void *)(ptr),sizeof(*(ptr))); \ | |
140 | __ret; \ | |
141 | }) | |
1da177e4 LT |
142 | |
143 | static inline unsigned long __xchg(unsigned long x, void * ptr, int size) | |
144 | { | |
145 | unsigned long addr, old; | |
146 | int shift; | |
147 | ||
148 | switch (size) { | |
149 | case 1: | |
150 | addr = (unsigned long) ptr; | |
151 | shift = (3 ^ (addr & 3)) << 3; | |
152 | addr ^= addr & 3; | |
153 | asm volatile( | |
94c12cc7 MS |
154 | " l %0,0(%4)\n" |
155 | "0: lr 0,%0\n" | |
156 | " nr 0,%3\n" | |
157 | " or 0,%2\n" | |
158 | " cs %0,0,0(%4)\n" | |
159 | " jl 0b\n" | |
1da177e4 LT |
160 | : "=&d" (old), "=m" (*(int *) addr) |
161 | : "d" (x << shift), "d" (~(255 << shift)), "a" (addr), | |
94c12cc7 | 162 | "m" (*(int *) addr) : "memory", "cc", "0"); |
1da177e4 LT |
163 | x = old >> shift; |
164 | break; | |
165 | case 2: | |
166 | addr = (unsigned long) ptr; | |
167 | shift = (2 ^ (addr & 2)) << 3; | |
168 | addr ^= addr & 2; | |
169 | asm volatile( | |
94c12cc7 MS |
170 | " l %0,0(%4)\n" |
171 | "0: lr 0,%0\n" | |
172 | " nr 0,%3\n" | |
173 | " or 0,%2\n" | |
174 | " cs %0,0,0(%4)\n" | |
175 | " jl 0b\n" | |
1da177e4 LT |
176 | : "=&d" (old), "=m" (*(int *) addr) |
177 | : "d" (x << shift), "d" (~(65535 << shift)), "a" (addr), | |
94c12cc7 | 178 | "m" (*(int *) addr) : "memory", "cc", "0"); |
1da177e4 LT |
179 | x = old >> shift; |
180 | break; | |
181 | case 4: | |
94c12cc7 MS |
182 | asm volatile( |
183 | " l %0,0(%3)\n" | |
184 | "0: cs %0,%2,0(%3)\n" | |
185 | " jl 0b\n" | |
1da177e4 LT |
186 | : "=&d" (old), "=m" (*(int *) ptr) |
187 | : "d" (x), "a" (ptr), "m" (*(int *) ptr) | |
94c12cc7 | 188 | : "memory", "cc"); |
1da177e4 LT |
189 | x = old; |
190 | break; | |
191 | #ifdef __s390x__ | |
192 | case 8: | |
94c12cc7 MS |
193 | asm volatile( |
194 | " lg %0,0(%3)\n" | |
195 | "0: csg %0,%2,0(%3)\n" | |
196 | " jl 0b\n" | |
1da177e4 LT |
197 | : "=&d" (old), "=m" (*(long *) ptr) |
198 | : "d" (x), "a" (ptr), "m" (*(long *) ptr) | |
94c12cc7 | 199 | : "memory", "cc"); |
1da177e4 LT |
200 | x = old; |
201 | break; | |
202 | #endif /* __s390x__ */ | |
203 | } | |
204 | return x; | |
205 | } | |
206 | ||
207 | /* | |
208 | * Atomic compare and exchange. Compare OLD with MEM, if identical, | |
209 | * store NEW in MEM. Return the initial value in MEM. Success is | |
210 | * indicated by comparing RETURN with OLD. | |
211 | */ | |
212 | ||
213 | #define __HAVE_ARCH_CMPXCHG 1 | |
214 | ||
215 | #define cmpxchg(ptr,o,n)\ | |
216 | ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ | |
217 | (unsigned long)(n),sizeof(*(ptr)))) | |
218 | ||
219 | static inline unsigned long | |
220 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | |
221 | { | |
222 | unsigned long addr, prev, tmp; | |
223 | int shift; | |
224 | ||
225 | switch (size) { | |
226 | case 1: | |
227 | addr = (unsigned long) ptr; | |
228 | shift = (3 ^ (addr & 3)) << 3; | |
229 | addr ^= addr & 3; | |
230 | asm volatile( | |
94c12cc7 MS |
231 | " l %0,0(%4)\n" |
232 | "0: nr %0,%5\n" | |
233 | " lr %1,%0\n" | |
234 | " or %0,%2\n" | |
235 | " or %1,%3\n" | |
236 | " cs %0,%1,0(%4)\n" | |
237 | " jnl 1f\n" | |
238 | " xr %1,%0\n" | |
239 | " nr %1,%5\n" | |
240 | " jnz 0b\n" | |
1da177e4 LT |
241 | "1:" |
242 | : "=&d" (prev), "=&d" (tmp) | |
243 | : "d" (old << shift), "d" (new << shift), "a" (ptr), | |
244 | "d" (~(255 << shift)) | |
94c12cc7 | 245 | : "memory", "cc"); |
1da177e4 LT |
246 | return prev >> shift; |
247 | case 2: | |
248 | addr = (unsigned long) ptr; | |
249 | shift = (2 ^ (addr & 2)) << 3; | |
250 | addr ^= addr & 2; | |
251 | asm volatile( | |
94c12cc7 MS |
252 | " l %0,0(%4)\n" |
253 | "0: nr %0,%5\n" | |
254 | " lr %1,%0\n" | |
255 | " or %0,%2\n" | |
256 | " or %1,%3\n" | |
257 | " cs %0,%1,0(%4)\n" | |
258 | " jnl 1f\n" | |
259 | " xr %1,%0\n" | |
260 | " nr %1,%5\n" | |
261 | " jnz 0b\n" | |
1da177e4 LT |
262 | "1:" |
263 | : "=&d" (prev), "=&d" (tmp) | |
264 | : "d" (old << shift), "d" (new << shift), "a" (ptr), | |
265 | "d" (~(65535 << shift)) | |
94c12cc7 | 266 | : "memory", "cc"); |
1da177e4 LT |
267 | return prev >> shift; |
268 | case 4: | |
94c12cc7 MS |
269 | asm volatile( |
270 | " cs %0,%2,0(%3)\n" | |
1da177e4 | 271 | : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) |
94c12cc7 | 272 | : "memory", "cc"); |
1da177e4 LT |
273 | return prev; |
274 | #ifdef __s390x__ | |
275 | case 8: | |
94c12cc7 MS |
276 | asm volatile( |
277 | " csg %0,%2,0(%3)\n" | |
1da177e4 | 278 | : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr) |
94c12cc7 | 279 | : "memory", "cc"); |
1da177e4 LT |
280 | return prev; |
281 | #endif /* __s390x__ */ | |
282 | } | |
283 | return old; | |
284 | } | |
285 | ||
286 | /* | |
287 | * Force strict CPU ordering. | |
288 | * And yes, this is required on UP too when we're talking | |
289 | * to devices. | |
290 | * | |
291 | * This is very similar to the ppc eieio/sync instruction in that is | |
292 | * does a checkpoint syncronisation & makes sure that | |
293 | * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ). | |
294 | */ | |
295 | ||
94c12cc7 MS |
296 | #define eieio() asm volatile("bcr 15,0" : : : "memory") |
297 | #define SYNC_OTHER_CORES(x) eieio() | |
1da177e4 LT |
298 | #define mb() eieio() |
299 | #define rmb() eieio() | |
300 | #define wmb() eieio() | |
301 | #define read_barrier_depends() do { } while(0) | |
302 | #define smp_mb() mb() | |
303 | #define smp_rmb() rmb() | |
304 | #define smp_wmb() wmb() | |
305 | #define smp_read_barrier_depends() read_barrier_depends() | |
306 | #define smp_mb__before_clear_bit() smp_mb() | |
307 | #define smp_mb__after_clear_bit() smp_mb() | |
308 | ||
309 | ||
310 | #define set_mb(var, value) do { var = value; mb(); } while (0) | |
1da177e4 | 311 | |
1da177e4 LT |
312 | #ifdef __s390x__ |
313 | ||
94c12cc7 MS |
314 | #define __ctl_load(array, low, high) ({ \ |
315 | typedef struct { char _[sizeof(array)]; } addrtype; \ | |
316 | asm volatile( \ | |
317 | " lctlg %1,%2,0(%0)\n" \ | |
318 | : : "a" (&array), "i" (low), "i" (high), \ | |
319 | "m" (*(addrtype *)(array))); \ | |
1da177e4 LT |
320 | }) |
321 | ||
94c12cc7 MS |
322 | #define __ctl_store(array, low, high) ({ \ |
323 | typedef struct { char _[sizeof(array)]; } addrtype; \ | |
324 | asm volatile( \ | |
325 | " stctg %2,%3,0(%1)\n" \ | |
326 | : "=m" (*(addrtype *)(array)) \ | |
327 | : "a" (&array), "i" (low), "i" (high)); \ | |
1da177e4 LT |
328 | }) |
329 | ||
1da177e4 LT |
330 | #else /* __s390x__ */ |
331 | ||
94c12cc7 MS |
332 | #define __ctl_load(array, low, high) ({ \ |
333 | typedef struct { char _[sizeof(array)]; } addrtype; \ | |
334 | asm volatile( \ | |
335 | " lctl %1,%2,0(%0)\n" \ | |
336 | : : "a" (&array), "i" (low), "i" (high), \ | |
337 | "m" (*(addrtype *)(array))); \ | |
338 | }) | |
1da177e4 | 339 | |
94c12cc7 MS |
340 | #define __ctl_store(array, low, high) ({ \ |
341 | typedef struct { char _[sizeof(array)]; } addrtype; \ | |
342 | asm volatile( \ | |
343 | " stctl %2,%3,0(%1)\n" \ | |
344 | : "=m" (*(addrtype *)(array)) \ | |
345 | : "a" (&array), "i" (low), "i" (high)); \ | |
1da177e4 LT |
346 | }) |
347 | ||
1da177e4 LT |
348 | #endif /* __s390x__ */ |
349 | ||
94c12cc7 MS |
350 | #define __ctl_set_bit(cr, bit) ({ \ |
351 | unsigned long __dummy; \ | |
352 | __ctl_store(__dummy, cr, cr); \ | |
353 | __dummy |= 1UL << (bit); \ | |
354 | __ctl_load(__dummy, cr, cr); \ | |
355 | }) | |
356 | ||
357 | #define __ctl_clear_bit(cr, bit) ({ \ | |
358 | unsigned long __dummy; \ | |
359 | __ctl_store(__dummy, cr, cr); \ | |
360 | __dummy &= ~(1UL << (bit)); \ | |
361 | __ctl_load(__dummy, cr, cr); \ | |
362 | }) | |
363 | ||
1f194a4c | 364 | #include <linux/irqflags.h> |
1da177e4 | 365 | |
77fa2245 HC |
366 | /* |
367 | * Use to set psw mask except for the first byte which | |
368 | * won't be changed by this function. | |
369 | */ | |
370 | static inline void | |
371 | __set_psw_mask(unsigned long mask) | |
372 | { | |
94c12cc7 | 373 | __load_psw_mask(mask | (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8))); |
77fa2245 HC |
374 | } |
375 | ||
c1821c2e GS |
376 | #define local_mcck_enable() __set_psw_mask(psw_kernel_bits) |
377 | #define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK) | |
77fa2245 | 378 | |
1da177e4 LT |
379 | #ifdef CONFIG_SMP |
380 | ||
381 | extern void smp_ctl_set_bit(int cr, int bit); | |
382 | extern void smp_ctl_clear_bit(int cr, int bit); | |
383 | #define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit) | |
384 | #define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit) | |
385 | ||
386 | #else | |
387 | ||
388 | #define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit) | |
389 | #define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit) | |
390 | ||
391 | #endif /* CONFIG_SMP */ | |
392 | ||
393 | extern void (*_machine_restart)(char *command); | |
394 | extern void (*_machine_halt)(void); | |
395 | extern void (*_machine_power_off)(void); | |
396 | ||
397 | #define arch_align_stack(x) (x) | |
398 | ||
399 | #endif /* __KERNEL__ */ | |
400 | ||
401 | #endif |