Commit | Line | Data |
---|---|---|
eed417dd AB |
1 | #ifndef __ASM_GENERIC_UACCESS_H |
2 | #define __ASM_GENERIC_UACCESS_H | |
3 | ||
4 | /* | |
5 | * User space memory access functions, these should work | |
0a4a6647 | 6 | * on any machine that has kernel and user data in the same |
eed417dd AB |
7 | * address space, e.g. all NOMMU machines. |
8 | */ | |
9 | #include <linux/sched.h> | |
eed417dd AB |
10 | #include <linux/string.h> |
11 | ||
12 | #include <asm/segment.h> | |
13 | ||
14 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) | |
15 | ||
16 | #ifndef KERNEL_DS | |
17 | #define KERNEL_DS MAKE_MM_SEG(~0UL) | |
18 | #endif | |
19 | ||
20 | #ifndef USER_DS | |
21 | #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) | |
22 | #endif | |
23 | ||
24 | #ifndef get_fs | |
25 | #define get_ds() (KERNEL_DS) | |
26 | #define get_fs() (current_thread_info()->addr_limit) | |
27 | ||
28 | static inline void set_fs(mm_segment_t fs) | |
29 | { | |
30 | current_thread_info()->addr_limit = fs; | |
31 | } | |
32 | #endif | |
33 | ||
10a6007b | 34 | #ifndef segment_eq |
eed417dd | 35 | #define segment_eq(a, b) ((a).seg == (b).seg) |
10a6007b | 36 | #endif |
eed417dd AB |
37 | |
38 | #define VERIFY_READ 0 | |
39 | #define VERIFY_WRITE 1 | |
40 | ||
41 | #define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size)) | |
42 | ||
43 | /* | |
44 | * The architecture should really override this if possible, at least | |
45 | * doing a check on the get_fs() | |
46 | */ | |
47 | #ifndef __access_ok | |
48 | static inline int __access_ok(unsigned long addr, unsigned long size) | |
49 | { | |
50 | return 1; | |
51 | } | |
52 | #endif | |
53 | ||
54 | /* | |
55 | * The exception table consists of pairs of addresses: the first is the | |
56 | * address of an instruction that is allowed to fault, and the second is | |
57 | * the address at which the program should continue. No registers are | |
58 | * modified, so it is entirely up to the continuation code to figure out | |
59 | * what to do. | |
60 | * | |
61 | * All the routines below use bits of fixup code that are out of line | |
62 | * with the main instruction path. This means when everything is well, | |
63 | * we don't even have to jump over them. Further, they do not intrude | |
64 | * on our cache or tlb entries. | |
65 | */ | |
66 | ||
67 | struct exception_table_entry | |
68 | { | |
69 | unsigned long insn, fixup; | |
70 | }; | |
71 | ||
72 | /* Returns 0 if exception not found and fixup otherwise. */ | |
73 | extern unsigned long search_exception_table(unsigned long); | |
74 | ||
75 | /* | |
76 | * architectures with an MMU should override these two | |
77 | */ | |
78 | #ifndef __copy_from_user | |
79 | static inline __must_check long __copy_from_user(void *to, | |
80 | const void __user * from, unsigned long n) | |
81 | { | |
82 | if (__builtin_constant_p(n)) { | |
83 | switch(n) { | |
84 | case 1: | |
85 | *(u8 *)to = *(u8 __force *)from; | |
86 | return 0; | |
87 | case 2: | |
88 | *(u16 *)to = *(u16 __force *)from; | |
89 | return 0; | |
90 | case 4: | |
91 | *(u32 *)to = *(u32 __force *)from; | |
92 | return 0; | |
93 | #ifdef CONFIG_64BIT | |
94 | case 8: | |
95 | *(u64 *)to = *(u64 __force *)from; | |
96 | return 0; | |
97 | #endif | |
98 | default: | |
99 | break; | |
100 | } | |
101 | } | |
102 | ||
103 | memcpy(to, (const void __force *)from, n); | |
104 | return 0; | |
105 | } | |
106 | #endif | |
107 | ||
108 | #ifndef __copy_to_user | |
109 | static inline __must_check long __copy_to_user(void __user *to, | |
110 | const void *from, unsigned long n) | |
111 | { | |
112 | if (__builtin_constant_p(n)) { | |
113 | switch(n) { | |
114 | case 1: | |
115 | *(u8 __force *)to = *(u8 *)from; | |
116 | return 0; | |
117 | case 2: | |
118 | *(u16 __force *)to = *(u16 *)from; | |
119 | return 0; | |
120 | case 4: | |
121 | *(u32 __force *)to = *(u32 *)from; | |
122 | return 0; | |
123 | #ifdef CONFIG_64BIT | |
124 | case 8: | |
125 | *(u64 __force *)to = *(u64 *)from; | |
126 | return 0; | |
127 | #endif | |
128 | default: | |
129 | break; | |
130 | } | |
131 | } | |
132 | ||
133 | memcpy((void __force *)to, from, n); | |
134 | return 0; | |
135 | } | |
136 | #endif | |
137 | ||
138 | /* | |
139 | * These are the main single-value transfer routines. They automatically | |
140 | * use the right size if we just have the right pointer type. | |
141 | * This version just falls back to copy_{from,to}_user, which should | |
142 | * provide a fast-path for small values. | |
143 | */ | |
144 | #define __put_user(x, ptr) \ | |
145 | ({ \ | |
146 | __typeof__(*(ptr)) __x = (x); \ | |
147 | int __pu_err = -EFAULT; \ | |
148 | __chk_user_ptr(ptr); \ | |
149 | switch (sizeof (*(ptr))) { \ | |
150 | case 1: \ | |
151 | case 2: \ | |
152 | case 4: \ | |
153 | case 8: \ | |
154 | __pu_err = __put_user_fn(sizeof (*(ptr)), \ | |
155 | ptr, &__x); \ | |
156 | break; \ | |
157 | default: \ | |
158 | __put_user_bad(); \ | |
159 | break; \ | |
160 | } \ | |
161 | __pu_err; \ | |
162 | }) | |
163 | ||
164 | #define put_user(x, ptr) \ | |
165 | ({ \ | |
e0acd0bd | 166 | might_fault(); \ |
a9ede5b3 | 167 | access_ok(VERIFY_WRITE, ptr, sizeof(*ptr)) ? \ |
eed417dd AB |
168 | __put_user(x, ptr) : \ |
169 | -EFAULT; \ | |
170 | }) | |
171 | ||
05d88a49 VG |
172 | #ifndef __put_user_fn |
173 | ||
eed417dd AB |
174 | static inline int __put_user_fn(size_t size, void __user *ptr, void *x) |
175 | { | |
176 | size = __copy_to_user(ptr, x, size); | |
177 | return size ? -EFAULT : size; | |
178 | } | |
179 | ||
05d88a49 VG |
180 | #define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k) |
181 | ||
182 | #endif | |
183 | ||
eed417dd AB |
184 | extern int __put_user_bad(void) __attribute__((noreturn)); |
185 | ||
186 | #define __get_user(x, ptr) \ | |
187 | ({ \ | |
188 | int __gu_err = -EFAULT; \ | |
189 | __chk_user_ptr(ptr); \ | |
190 | switch (sizeof(*(ptr))) { \ | |
191 | case 1: { \ | |
192 | unsigned char __x; \ | |
193 | __gu_err = __get_user_fn(sizeof (*(ptr)), \ | |
194 | ptr, &__x); \ | |
195 | (x) = *(__force __typeof__(*(ptr)) *) &__x; \ | |
196 | break; \ | |
197 | }; \ | |
198 | case 2: { \ | |
199 | unsigned short __x; \ | |
200 | __gu_err = __get_user_fn(sizeof (*(ptr)), \ | |
201 | ptr, &__x); \ | |
202 | (x) = *(__force __typeof__(*(ptr)) *) &__x; \ | |
203 | break; \ | |
204 | }; \ | |
205 | case 4: { \ | |
206 | unsigned int __x; \ | |
207 | __gu_err = __get_user_fn(sizeof (*(ptr)), \ | |
208 | ptr, &__x); \ | |
209 | (x) = *(__force __typeof__(*(ptr)) *) &__x; \ | |
210 | break; \ | |
211 | }; \ | |
212 | case 8: { \ | |
213 | unsigned long long __x; \ | |
214 | __gu_err = __get_user_fn(sizeof (*(ptr)), \ | |
215 | ptr, &__x); \ | |
216 | (x) = *(__force __typeof__(*(ptr)) *) &__x; \ | |
217 | break; \ | |
218 | }; \ | |
219 | default: \ | |
220 | __get_user_bad(); \ | |
221 | break; \ | |
222 | } \ | |
223 | __gu_err; \ | |
224 | }) | |
225 | ||
226 | #define get_user(x, ptr) \ | |
227 | ({ \ | |
e0acd0bd | 228 | might_fault(); \ |
a9ede5b3 | 229 | access_ok(VERIFY_READ, ptr, sizeof(*ptr)) ? \ |
eed417dd AB |
230 | __get_user(x, ptr) : \ |
231 | -EFAULT; \ | |
232 | }) | |
233 | ||
05d88a49 | 234 | #ifndef __get_user_fn |
eed417dd AB |
235 | static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) |
236 | { | |
237 | size = __copy_from_user(x, ptr, size); | |
238 | return size ? -EFAULT : size; | |
239 | } | |
240 | ||
05d88a49 VG |
241 | #define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k) |
242 | ||
243 | #endif | |
244 | ||
eed417dd AB |
245 | extern int __get_user_bad(void) __attribute__((noreturn)); |
246 | ||
247 | #ifndef __copy_from_user_inatomic | |
248 | #define __copy_from_user_inatomic __copy_from_user | |
249 | #endif | |
250 | ||
251 | #ifndef __copy_to_user_inatomic | |
252 | #define __copy_to_user_inatomic __copy_to_user | |
253 | #endif | |
254 | ||
255 | static inline long copy_from_user(void *to, | |
256 | const void __user * from, unsigned long n) | |
257 | { | |
e0acd0bd | 258 | might_fault(); |
a9ede5b3 | 259 | if (access_ok(VERIFY_READ, from, n)) |
eed417dd AB |
260 | return __copy_from_user(to, from, n); |
261 | else | |
262 | return n; | |
263 | } | |
264 | ||
265 | static inline long copy_to_user(void __user *to, | |
266 | const void *from, unsigned long n) | |
267 | { | |
e0acd0bd | 268 | might_fault(); |
a9ede5b3 | 269 | if (access_ok(VERIFY_WRITE, to, n)) |
eed417dd AB |
270 | return __copy_to_user(to, from, n); |
271 | else | |
272 | return n; | |
273 | } | |
274 | ||
275 | /* | |
276 | * Copy a null terminated string from userspace. | |
277 | */ | |
278 | #ifndef __strncpy_from_user | |
279 | static inline long | |
280 | __strncpy_from_user(char *dst, const char __user *src, long count) | |
281 | { | |
282 | char *tmp; | |
283 | strncpy(dst, (const char __force *)src, count); | |
284 | for (tmp = dst; *tmp && count > 0; tmp++, count--) | |
285 | ; | |
286 | return (tmp - dst); | |
287 | } | |
288 | #endif | |
289 | ||
290 | static inline long | |
291 | strncpy_from_user(char *dst, const char __user *src, long count) | |
292 | { | |
a9ede5b3 | 293 | if (!access_ok(VERIFY_READ, src, 1)) |
eed417dd AB |
294 | return -EFAULT; |
295 | return __strncpy_from_user(dst, src, count); | |
296 | } | |
297 | ||
298 | /* | |
299 | * Return the size of a string (including the ending 0) | |
300 | * | |
301 | * Return 0 on exception, a value greater than N if too long | |
302 | */ | |
7f509a9e | 303 | #ifndef __strnlen_user |
830f5800 | 304 | #define __strnlen_user(s, n) (strnlen((s), (n)) + 1) |
7f509a9e G |
305 | #endif |
306 | ||
830f5800 MS |
307 | /* |
308 | * Unlike strnlen, strnlen_user includes the nul terminator in | |
309 | * its returned count. Callers should check for a returned value | |
310 | * greater than N as an indication the string is too long. | |
311 | */ | |
eed417dd AB |
312 | static inline long strnlen_user(const char __user *src, long n) |
313 | { | |
9844813f MF |
314 | if (!access_ok(VERIFY_READ, src, 1)) |
315 | return 0; | |
7f509a9e | 316 | return __strnlen_user(src, n); |
eed417dd | 317 | } |
eed417dd AB |
318 | |
319 | static inline long strlen_user(const char __user *src) | |
320 | { | |
321 | return strnlen_user(src, 32767); | |
322 | } | |
323 | ||
324 | /* | |
325 | * Zero Userspace | |
326 | */ | |
327 | #ifndef __clear_user | |
328 | static inline __must_check unsigned long | |
329 | __clear_user(void __user *to, unsigned long n) | |
330 | { | |
331 | memset((void __force *)to, 0, n); | |
332 | return 0; | |
333 | } | |
334 | #endif | |
335 | ||
336 | static inline __must_check unsigned long | |
337 | clear_user(void __user *to, unsigned long n) | |
338 | { | |
e0acd0bd | 339 | might_fault(); |
a9ede5b3 | 340 | if (!access_ok(VERIFY_WRITE, to, n)) |
eed417dd AB |
341 | return n; |
342 | ||
343 | return __clear_user(to, n); | |
344 | } | |
345 | ||
346 | #endif /* __ASM_GENERIC_UACCESS_H */ |