Merge tag 'mce-recovery-for-tip' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / arch / s390 / include / asm / system.h
CommitLineData
1da177e4 1/*
155af2f9 2 * Copyright IBM Corp. 1999, 2009
1da177e4 3 *
155af2f9 4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
1da177e4
LT
5 */
6
7#ifndef __ASM_SYSTEM_H
8#define __ASM_SYSTEM_H
9
1da177e4 10#include <linux/kernel.h>
320c04c0 11#include <linux/errno.h>
1da177e4
LT
12#include <asm/types.h>
13#include <asm/ptrace.h>
14#include <asm/setup.h>
77fa2245 15#include <asm/processor.h>
484875b1 16#include <asm/lowcore.h>
a2c9dbe8 17#include <asm/cmpxchg.h>
1da177e4
LT
18
19#ifdef __KERNEL__
20
21struct task_struct;
22
23extern struct task_struct *__switch_to(void *, void *);
5e9a2692 24extern void update_per_regs(struct task_struct *task);
1da177e4 25
1da177e4
LT
26static inline void save_fp_regs(s390_fp_regs *fpregs)
27{
94c12cc7 28 asm volatile(
987bcdac
MS
29 " std 0,%O0+8(%R0)\n"
30 " std 2,%O0+24(%R0)\n"
31 " std 4,%O0+40(%R0)\n"
32 " std 6,%O0+56(%R0)"
33 : "=Q" (*fpregs) : "Q" (*fpregs));
1da177e4
LT
34 if (!MACHINE_HAS_IEEE)
35 return;
36 asm volatile(
987bcdac
MS
37 " stfpc %0\n"
38 " std 1,%O0+16(%R0)\n"
39 " std 3,%O0+32(%R0)\n"
40 " std 5,%O0+48(%R0)\n"
41 " std 7,%O0+64(%R0)\n"
42 " std 8,%O0+72(%R0)\n"
43 " std 9,%O0+80(%R0)\n"
44 " std 10,%O0+88(%R0)\n"
45 " std 11,%O0+96(%R0)\n"
46 " std 12,%O0+104(%R0)\n"
47 " std 13,%O0+112(%R0)\n"
48 " std 14,%O0+120(%R0)\n"
49 " std 15,%O0+128(%R0)\n"
50 : "=Q" (*fpregs) : "Q" (*fpregs));
1da177e4
LT
51}
52
53static inline void restore_fp_regs(s390_fp_regs *fpregs)
54{
94c12cc7 55 asm volatile(
987bcdac
MS
56 " ld 0,%O0+8(%R0)\n"
57 " ld 2,%O0+24(%R0)\n"
58 " ld 4,%O0+40(%R0)\n"
59 " ld 6,%O0+56(%R0)"
60 : : "Q" (*fpregs));
1da177e4
LT
61 if (!MACHINE_HAS_IEEE)
62 return;
63 asm volatile(
987bcdac
MS
64 " lfpc %0\n"
65 " ld 1,%O0+16(%R0)\n"
66 " ld 3,%O0+32(%R0)\n"
67 " ld 5,%O0+48(%R0)\n"
68 " ld 7,%O0+64(%R0)\n"
69 " ld 8,%O0+72(%R0)\n"
70 " ld 9,%O0+80(%R0)\n"
71 " ld 10,%O0+88(%R0)\n"
72 " ld 11,%O0+96(%R0)\n"
73 " ld 12,%O0+104(%R0)\n"
74 " ld 13,%O0+112(%R0)\n"
75 " ld 14,%O0+120(%R0)\n"
76 " ld 15,%O0+128(%R0)\n"
77 : : "Q" (*fpregs));
1da177e4
LT
78}
79
80static inline void save_access_regs(unsigned int *acrs)
81{
987bcdac 82 asm volatile("stam 0,15,%0" : "=Q" (*acrs));
1da177e4
LT
83}
84
85static inline void restore_access_regs(unsigned int *acrs)
86{
987bcdac 87 asm volatile("lam 0,15,%0" : : "Q" (*acrs));
1da177e4
LT
88}
89
fdb6d070 90#define switch_to(prev,next,last) do { \
fdb6d070
HC
91 if (prev->mm) { \
92 save_fp_regs(&prev->thread.fp_regs); \
93 save_access_regs(&prev->thread.acrs[0]); \
94 } \
95 if (next->mm) { \
96 restore_fp_regs(&next->thread.fp_regs); \
97 restore_access_regs(&next->thread.acrs[0]); \
5e9a2692 98 update_per_regs(next); \
fdb6d070
HC
99 } \
100 prev = __switch_to(prev,next); \
1da177e4
LT
101} while (0)
102
aa5e97ce 103extern void account_vtime(struct task_struct *, struct task_struct *);
1f1c12af 104extern void account_tick_vtime(struct task_struct *);
1da177e4 105
29b08d2b 106#ifdef CONFIG_PFAULT
29b08d2b
HC
107extern int pfault_init(void);
108extern void pfault_fini(void);
109#else /* CONFIG_PFAULT */
29b08d2b
HC
110#define pfault_init() ({-1;})
111#define pfault_fini() do { } while (0)
112#endif /* CONFIG_PFAULT */
113
45e576b1 114extern void cmma_init(void);
92fe3132 115extern int memcpy_real(void *, void *, size_t);
7dd6b334 116extern void copy_to_absolute_zero(void *dest, void *src, size_t count);
7f0bf656
MH
117extern int copy_to_user_real(void __user *dest, void *src, size_t count);
118extern int copy_from_user_real(void *dest, void __user *src, size_t count);
45e576b1 119
5ee24d95 120#define finish_arch_switch(prev) do { \
1da177e4 121 set_fs(current->thread.mm_segment); \
aa5e97ce 122 account_vtime(prev, current); \
1da177e4
LT
123} while (0)
124
94c12cc7 125#define nop() asm volatile("nop")
1da177e4 126
1da177e4
LT
127/*
128 * Force strict CPU ordering.
129 * And yes, this is required on UP too when we're talking
130 * to devices.
131 *
132 * This is very similar to the ppc eieio/sync instruction in that is
133 * does a checkpoint syncronisation & makes sure that
134 * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ).
135 */
136
94c12cc7
MS
137#define eieio() asm volatile("bcr 15,0" : : : "memory")
138#define SYNC_OTHER_CORES(x) eieio()
1da177e4
LT
139#define mb() eieio()
140#define rmb() eieio()
141#define wmb() eieio()
142#define read_barrier_depends() do { } while(0)
143#define smp_mb() mb()
144#define smp_rmb() rmb()
145#define smp_wmb() wmb()
146#define smp_read_barrier_depends() read_barrier_depends()
147#define smp_mb__before_clear_bit() smp_mb()
148#define smp_mb__after_clear_bit() smp_mb()
149
150
151#define set_mb(var, value) do { var = value; mb(); } while (0)
1da177e4 152
1da177e4
LT
153#ifdef __s390x__
154
94c12cc7
MS
155#define __ctl_load(array, low, high) ({ \
156 typedef struct { char _[sizeof(array)]; } addrtype; \
157 asm volatile( \
987bcdac
MS
158 " lctlg %1,%2,%0\n" \
159 : : "Q" (*(addrtype *)(&array)), \
160 "i" (low), "i" (high)); \
1da177e4
LT
161 })
162
94c12cc7
MS
163#define __ctl_store(array, low, high) ({ \
164 typedef struct { char _[sizeof(array)]; } addrtype; \
165 asm volatile( \
987bcdac
MS
166 " stctg %1,%2,%0\n" \
167 : "=Q" (*(addrtype *)(&array)) \
168 : "i" (low), "i" (high)); \
1da177e4
LT
169 })
170
1da177e4
LT
171#else /* __s390x__ */
172
94c12cc7
MS
173#define __ctl_load(array, low, high) ({ \
174 typedef struct { char _[sizeof(array)]; } addrtype; \
175 asm volatile( \
987bcdac
MS
176 " lctl %1,%2,%0\n" \
177 : : "Q" (*(addrtype *)(&array)), \
178 "i" (low), "i" (high)); \
94c12cc7 179})
1da177e4 180
94c12cc7
MS
181#define __ctl_store(array, low, high) ({ \
182 typedef struct { char _[sizeof(array)]; } addrtype; \
183 asm volatile( \
987bcdac
MS
184 " stctl %1,%2,%0\n" \
185 : "=Q" (*(addrtype *)(&array)) \
186 : "i" (low), "i" (high)); \
1da177e4
LT
187 })
188
1da177e4
LT
189#endif /* __s390x__ */
190
94c12cc7
MS
191#define __ctl_set_bit(cr, bit) ({ \
192 unsigned long __dummy; \
193 __ctl_store(__dummy, cr, cr); \
194 __dummy |= 1UL << (bit); \
195 __ctl_load(__dummy, cr, cr); \
196})
197
198#define __ctl_clear_bit(cr, bit) ({ \
199 unsigned long __dummy; \
200 __ctl_store(__dummy, cr, cr); \
201 __dummy &= ~(1UL << (bit)); \
202 __ctl_load(__dummy, cr, cr); \
203})
204
77fa2245
HC
205/*
206 * Use to set psw mask except for the first byte which
207 * won't be changed by this function.
208 */
209static inline void
210__set_psw_mask(unsigned long mask)
211{
df9ee292 212 __load_psw_mask(mask | (arch_local_save_flags() & ~(-1UL >> 8)));
77fa2245
HC
213}
214
b50511e4
MS
215#define local_mcck_enable() \
216 __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK)
217#define local_mcck_disable() \
218 __set_psw_mask(psw_kernel_bits | PSW_MASK_DAT)
77fa2245 219
1da177e4
LT
220#ifdef CONFIG_SMP
221
222extern void smp_ctl_set_bit(int cr, int bit);
223extern void smp_ctl_clear_bit(int cr, int bit);
224#define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
225#define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
226
227#else
228
229#define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
230#define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
231
232#endif /* CONFIG_SMP */
233
14375bc4 234#define MAX_FACILITY_BIT (256*8) /* stfle_fac_list has 256 bytes */
484875b1 235
14375bc4
MS
236/*
237 * The test_facility function uses the bit odering where the MSB is bit 0.
238 * That makes it easier to query facility bits with the bit number as
239 * documented in the Principles of Operation.
240 */
241static inline int test_facility(unsigned long nr)
320c04c0 242{
14375bc4 243 unsigned char *ptr;
320c04c0 244
14375bc4
MS
245 if (nr >= MAX_FACILITY_BIT)
246 return 0;
247 ptr = (unsigned char *) &S390_lowcore.stfle_fac_list + (nr >> 3);
248 return (*ptr & (0x80 >> (nr & 7))) != 0;
320c04c0
HC
249}
250
2e5061e4
HC
251static inline unsigned short stap(void)
252{
253 unsigned short cpu_address;
254
255 asm volatile("stap %0" : "=m" (cpu_address));
256 return cpu_address;
257}
258
1da177e4
LT
259extern void (*_machine_restart)(char *command);
260extern void (*_machine_halt)(void);
261extern void (*_machine_power_off)(void);
262
9887a1fc 263extern unsigned long arch_align_stack(unsigned long sp);
1da177e4 264
155af2f9
HJP
265static inline int tprot(unsigned long addr)
266{
267 int rc = -EFAULT;
268
269 asm volatile(
270 " tprot 0(%1),0\n"
271 "0: ipm %0\n"
272 " srl %0,28\n"
273 "1:\n"
274 EX_TABLE(0b,1b)
275 : "+d" (rc) : "a" (addr) : "cc");
276 return rc;
277}
278
1da177e4
LT
279#endif /* __KERNEL__ */
280
281#endif
This page took 7.20115 seconds and 5 git commands to generate.