Merge remote-tracking branch 'battery/for-next'
[deliverable/linux.git] / arch / s390 / include / asm / uaccess.h
CommitLineData
1da177e4 1/*
1da177e4 2 * S390 version
a53c8fab 3 * Copyright IBM Corp. 1999, 2000
1da177e4
LT
4 * Author(s): Hartmut Penner (hp@de.ibm.com),
5 * Martin Schwidefsky (schwidefsky@de.ibm.com)
6 *
7 * Derived from "include/asm-i386/uaccess.h"
8 */
9#ifndef __S390_UACCESS_H
10#define __S390_UACCESS_H
11
12/*
13 * User space memory access functions
14 */
15#include <linux/sched.h>
16#include <linux/errno.h>
a0616cde 17#include <asm/ctl_reg.h>
1da177e4
LT
18
19#define VERIFY_READ 0
20#define VERIFY_WRITE 1
21
22
23/*
24 * The fs value determines whether argument validity checking should be
25 * performed or not. If get_fs() == USER_DS, checking is performed, with
26 * get_fs() == KERNEL_DS, checking is bypassed.
27 *
28 * For historical reasons, these macros are grossly misnamed.
29 */
30
31#define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
32
33
34#define KERNEL_DS MAKE_MM_SEG(0)
35#define USER_DS MAKE_MM_SEG(1)
36
37#define get_ds() (KERNEL_DS)
38#define get_fs() (current->thread.mm_segment)
39
1da177e4
LT
40#define set_fs(x) \
41({ \
42 unsigned long __pto; \
43 current->thread.mm_segment = (x); \
44 __pto = current->thread.mm_segment.ar4 ? \
45 S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
94c12cc7 46 __ctl_load(__pto, 7, 7); \
1da177e4 47})
1da177e4
LT
48
49#define segment_eq(a,b) ((a).ar4 == (b).ar4)
50
491af990
HC
51static inline int __range_ok(unsigned long addr, unsigned long size)
52{
53 return 1;
54}
55
56#define __access_ok(addr, size) \
57({ \
58 __chk_user_ptr(addr); \
59 __range_ok((unsigned long)(addr), (size)); \
7683f744 60})
1da177e4 61
7683f744 62#define access_ok(type, addr, size) __access_ok(addr, size)
1da177e4 63
1da177e4
LT
64/*
65 * The exception table consists of pairs of addresses: the first is the
66 * address of an instruction that is allowed to fault, and the second is
67 * the address at which the program should continue. No registers are
68 * modified, so it is entirely up to the continuation code to figure out
69 * what to do.
70 *
71 * All the routines below use bits of fixup code that are out of line
72 * with the main instruction path. This means when everything is well,
73 * we don't even have to jump over them. Further, they do not intrude
74 * on our cache or tlb entries.
75 */
76
77struct exception_table_entry
78{
eb608fb3 79 int insn, fixup;
1da177e4
LT
80};
81
eb608fb3
HC
82static inline unsigned long extable_fixup(const struct exception_table_entry *x)
83{
84 return (unsigned long)&x->fixup + x->fixup;
85}
86
c352e8b6 87#define ARCH_HAS_RELATIVE_EXTABLE
eb608fb3 88
4f41c2b4
HC
89/**
90 * __copy_from_user: - Copy a block of data from user space, with less checking.
91 * @to: Destination address, in kernel space.
92 * @from: Source address, in user space.
93 * @n: Number of bytes to copy.
94 *
b3c395ef
DH
95 * Context: User context only. This function may sleep if pagefaults are
96 * enabled.
4f41c2b4
HC
97 *
98 * Copy data from user space to kernel space. Caller must check
99 * the specified block with access_ok() before calling this function.
100 *
101 * Returns number of bytes that could not be copied.
102 * On success, this will be zero.
103 *
104 * If some data could not be copied, this function will pad the copied
105 * data to the requested size using zero bytes.
106 */
211deca6
HC
107unsigned long __must_check __copy_from_user(void *to, const void __user *from,
108 unsigned long n);
4f41c2b4
HC
109
110/**
111 * __copy_to_user: - Copy a block of data into user space, with less checking.
112 * @to: Destination address, in user space.
113 * @from: Source address, in kernel space.
114 * @n: Number of bytes to copy.
115 *
b3c395ef
DH
116 * Context: User context only. This function may sleep if pagefaults are
117 * enabled.
4f41c2b4
HC
118 *
119 * Copy data from kernel space to user space. Caller must check
120 * the specified block with access_ok() before calling this function.
121 *
122 * Returns number of bytes that could not be copied.
123 * On success, this will be zero.
124 */
125unsigned long __must_check __copy_to_user(void __user *to, const void *from,
126 unsigned long n);
d02765d1 127
4f41c2b4
HC
128#define __copy_to_user_inatomic __copy_to_user
129#define __copy_from_user_inatomic __copy_from_user
6c1e3e79 130
c9ca7841
HC
131#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
132
133#define __put_get_user_asm(to, from, size, spec) \
134({ \
135 register unsigned long __reg0 asm("0") = spec; \
136 int __rc; \
137 \
138 asm volatile( \
139 "0: mvcos %1,%3,%2\n" \
140 "1: xr %0,%0\n" \
141 "2:\n" \
142 ".pushsection .fixup, \"ax\"\n" \
143 "3: lhi %0,%5\n" \
144 " jg 2b\n" \
145 ".popsection\n" \
146 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
147 : "=d" (__rc), "=Q" (*(to)) \
148 : "d" (size), "Q" (*(from)), \
149 "d" (__reg0), "K" (-EFAULT) \
150 : "cc"); \
151 __rc; \
152})
153
dc4aace1
HC
154static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
155{
156 unsigned long spec = 0x810000UL;
157 int rc;
158
159 switch (size) {
160 case 1:
161 rc = __put_get_user_asm((unsigned char __user *)ptr,
162 (unsigned char *)x,
163 size, spec);
164 break;
165 case 2:
166 rc = __put_get_user_asm((unsigned short __user *)ptr,
167 (unsigned short *)x,
168 size, spec);
169 break;
170 case 4:
171 rc = __put_get_user_asm((unsigned int __user *)ptr,
172 (unsigned int *)x,
173 size, spec);
174 break;
175 case 8:
176 rc = __put_get_user_asm((unsigned long __user *)ptr,
177 (unsigned long *)x,
178 size, spec);
179 break;
180 };
181 return rc;
182}
183
184static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
185{
186 unsigned long spec = 0x81UL;
187 int rc;
188
189 switch (size) {
190 case 1:
191 rc = __put_get_user_asm((unsigned char *)x,
192 (unsigned char __user *)ptr,
193 size, spec);
194 break;
195 case 2:
196 rc = __put_get_user_asm((unsigned short *)x,
197 (unsigned short __user *)ptr,
198 size, spec);
199 break;
200 case 4:
201 rc = __put_get_user_asm((unsigned int *)x,
202 (unsigned int __user *)ptr,
203 size, spec);
204 break;
205 case 8:
206 rc = __put_get_user_asm((unsigned long *)x,
207 (unsigned long __user *)ptr,
208 size, spec);
209 break;
210 };
211 return rc;
212}
c9ca7841
HC
213
214#else /* CONFIG_HAVE_MARCH_Z10_FEATURES */
215
211deca6 216static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
d02765d1 217{
4f41c2b4
HC
218 size = __copy_to_user(ptr, x, size);
219 return size ? -EFAULT : 0;
d02765d1
GS
220}
221
211deca6 222static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
d02765d1 223{
4f41c2b4
HC
224 size = __copy_from_user(x, ptr, size);
225 return size ? -EFAULT : 0;
d02765d1 226}
1da177e4 227
c9ca7841
HC
228#endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */
229
1da177e4
LT
230/*
231 * These are the main single-value transfer routines. They automatically
232 * use the right size if we just have the right pointer type.
233 */
1da177e4
LT
234#define __put_user(x, ptr) \
235({ \
236 __typeof__(*(ptr)) __x = (x); \
d02765d1 237 int __pu_err = -EFAULT; \
17566c3c 238 __chk_user_ptr(ptr); \
1da177e4
LT
239 switch (sizeof (*(ptr))) { \
240 case 1: \
241 case 2: \
242 case 4: \
243 case 8: \
cfa785e6
HC
244 __pu_err = __put_user_fn(&__x, ptr, \
245 sizeof(*(ptr))); \
1da177e4
LT
246 break; \
247 default: \
248 __put_user_bad(); \
249 break; \
250 } \
ee64baf4 251 __builtin_expect(__pu_err, 0); \
1da177e4 252})
1da177e4
LT
253
254#define put_user(x, ptr) \
255({ \
dab4079d 256 might_fault(); \
1da177e4
LT
257 __put_user(x, ptr); \
258})
259
260
4f41c2b4 261int __put_user_bad(void) __attribute__((noreturn));
1da177e4 262
1da177e4
LT
263#define __get_user(x, ptr) \
264({ \
d02765d1
GS
265 int __gu_err = -EFAULT; \
266 __chk_user_ptr(ptr); \
1da177e4 267 switch (sizeof(*(ptr))) { \
1047aa77
MS
268 case 1: { \
269 unsigned char __x; \
cfa785e6
HC
270 __gu_err = __get_user_fn(&__x, ptr, \
271 sizeof(*(ptr))); \
97fa5a66 272 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
1047aa77
MS
273 break; \
274 }; \
275 case 2: { \
276 unsigned short __x; \
cfa785e6
HC
277 __gu_err = __get_user_fn(&__x, ptr, \
278 sizeof(*(ptr))); \
97fa5a66 279 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
1047aa77
MS
280 break; \
281 }; \
282 case 4: { \
283 unsigned int __x; \
cfa785e6
HC
284 __gu_err = __get_user_fn(&__x, ptr, \
285 sizeof(*(ptr))); \
97fa5a66 286 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
1047aa77
MS
287 break; \
288 }; \
289 case 8: { \
290 unsigned long long __x; \
cfa785e6
HC
291 __gu_err = __get_user_fn(&__x, ptr, \
292 sizeof(*(ptr))); \
97fa5a66 293 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
1da177e4 294 break; \
1047aa77 295 }; \
1da177e4
LT
296 default: \
297 __get_user_bad(); \
298 break; \
299 } \
ee64baf4 300 __builtin_expect(__gu_err, 0); \
1da177e4 301})
1da177e4
LT
302
303#define get_user(x, ptr) \
304({ \
dab4079d 305 might_fault(); \
1da177e4
LT
306 __get_user(x, ptr); \
307})
308
4f41c2b4 309int __get_user_bad(void) __attribute__((noreturn));
1da177e4
LT
310
311#define __put_user_unaligned __put_user
312#define __get_user_unaligned __get_user
313
0d025d27
JP
314extern void __compiletime_error("usercopy buffer size is too small")
315__bad_copy_user(void);
316
317static inline void copy_user_overflow(int size, unsigned long count)
318{
319 WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
320}
321
1da177e4
LT
322/**
323 * copy_to_user: - Copy a block of data into user space.
324 * @to: Destination address, in user space.
325 * @from: Source address, in kernel space.
326 * @n: Number of bytes to copy.
327 *
b3c395ef
DH
328 * Context: User context only. This function may sleep if pagefaults are
329 * enabled.
1da177e4
LT
330 *
331 * Copy data from kernel space to user space.
332 *
333 * Returns number of bytes that could not be copied.
334 * On success, this will be zero.
335 */
f7675ad7 336static inline unsigned long __must_check
1da177e4
LT
337copy_to_user(void __user *to, const void *from, unsigned long n)
338{
dab4079d 339 might_fault();
d12a2970 340 return __copy_to_user(to, from, n);
1da177e4
LT
341}
342
1da177e4
LT
343/**
344 * copy_from_user: - Copy a block of data from user space.
345 * @to: Destination address, in kernel space.
346 * @from: Source address, in user space.
347 * @n: Number of bytes to copy.
348 *
b3c395ef
DH
349 * Context: User context only. This function may sleep if pagefaults are
350 * enabled.
1da177e4
LT
351 *
352 * Copy data from user space to kernel space.
353 *
354 * Returns number of bytes that could not be copied.
355 * On success, this will be zero.
356 *
357 * If some data could not be copied, this function will pad the copied
358 * data to the requested size using zero bytes.
359 */
f7675ad7 360static inline unsigned long __must_check
1da177e4
LT
361copy_from_user(void *to, const void __user *from, unsigned long n)
362{
1dcec254
HC
363 unsigned int sz = __compiletime_object_size(to);
364
dab4079d 365 might_fault();
1dcec254 366 if (unlikely(sz != -1 && sz < n)) {
0d025d27
JP
367 if (!__builtin_constant_p(n))
368 copy_user_overflow(sz, n);
369 else
370 __bad_copy_user();
1dcec254
HC
371 return n;
372 }
d12a2970 373 return __copy_from_user(to, from, n);
1da177e4
LT
374}
375
4f41c2b4
HC
376unsigned long __must_check
377__copy_in_user(void __user *to, const void __user *from, unsigned long n);
1da177e4 378
f7675ad7 379static inline unsigned long __must_check
1da177e4
LT
380copy_in_user(void __user *to, const void __user *from, unsigned long n)
381{
dab4079d 382 might_fault();
d12a2970 383 return __copy_in_user(to, from, n);
1da177e4
LT
384}
385
386/*
387 * Copy a null terminated string from userspace.
388 */
4f41c2b4
HC
389
390long __strncpy_from_user(char *dst, const char __user *src, long count);
391
f7675ad7 392static inline long __must_check
1da177e4
LT
393strncpy_from_user(char *dst, const char __user *src, long count)
394{
dab4079d 395 might_fault();
4f41c2b4 396 return __strncpy_from_user(dst, src, count);
1da177e4
LT
397}
398
211deca6 399unsigned long __must_check __strnlen_user(const char __user *src, unsigned long count);
4f41c2b4 400
211deca6 401static inline unsigned long strnlen_user(const char __user *src, unsigned long n)
1da177e4 402{
dab4079d 403 might_fault();
4f41c2b4 404 return __strnlen_user(src, n);
1da177e4
LT
405}
406
407/**
408 * strlen_user: - Get the size of a string in user space.
409 * @str: The string to measure.
410 *
b3c395ef
DH
411 * Context: User context only. This function may sleep if pagefaults are
412 * enabled.
1da177e4
LT
413 *
414 * Get the size of a NUL-terminated string in user space.
415 *
416 * Returns the size of the string INCLUDING the terminating NUL.
417 * On exception, returns 0.
418 *
419 * If there is a limit on the length of a valid string, you may wish to
420 * consider using strnlen_user() instead.
421 */
422#define strlen_user(str) strnlen_user(str, ~0UL)
423
424/*
425 * Zero Userspace
426 */
211deca6 427unsigned long __must_check __clear_user(void __user *to, unsigned long size);
1da177e4 428
211deca6 429static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
1da177e4 430{
dab4079d 431 might_fault();
4f41c2b4 432 return __clear_user(to, n);
1da177e4
LT
433}
434
211deca6 435int copy_to_user_real(void __user *dest, void *src, unsigned long count);
8a5d8473 436void s390_kernel_write(void *dst, const void *src, size_t size);
a0616cde 437
1da177e4 438#endif /* __S390_UACCESS_H */
This page took 0.90306 seconds and 5 git commands to generate.