Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle | |
7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | |
619b6e18 | 8 | * Copyright (C) 2007 Maciej W. Rozycki |
1da177e4 LT |
9 | */ |
10 | #ifndef _ASM_UACCESS_H | |
11 | #define _ASM_UACCESS_H | |
12 | ||
1da177e4 LT |
13 | #include <linux/kernel.h> |
14 | #include <linux/errno.h> | |
15 | #include <linux/thread_info.h> | |
1da177e4 LT |
16 | |
17 | /* | |
18 | * The fs value determines whether argument validity checking should be | |
19 | * performed or not. If get_fs() == USER_DS, checking is performed, with | |
20 | * get_fs() == KERNEL_DS, checking is bypassed. | |
21 | * | |
22 | * For historical reasons, these macros are grossly misnamed. | |
23 | */ | |
875d43e7 | 24 | #ifdef CONFIG_32BIT |
1da177e4 LT |
25 | |
26 | #define __UA_LIMIT 0x80000000UL | |
27 | ||
28 | #define __UA_ADDR ".word" | |
29 | #define __UA_LA "la" | |
30 | #define __UA_ADDU "addu" | |
31 | #define __UA_t0 "$8" | |
32 | #define __UA_t1 "$9" | |
33 | ||
875d43e7 | 34 | #endif /* CONFIG_32BIT */ |
1da177e4 | 35 | |
875d43e7 | 36 | #ifdef CONFIG_64BIT |
1da177e4 LT |
37 | |
38 | #define __UA_LIMIT (- TASK_SIZE) | |
39 | ||
40 | #define __UA_ADDR ".dword" | |
41 | #define __UA_LA "dla" | |
42 | #define __UA_ADDU "daddu" | |
43 | #define __UA_t0 "$12" | |
44 | #define __UA_t1 "$13" | |
45 | ||
875d43e7 | 46 | #endif /* CONFIG_64BIT */ |
1da177e4 LT |
47 | |
48 | /* | |
49 | * USER_DS is a bitmask that has the bits set that may not be set in a valid | |
50 | * userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but | |
51 | * the arithmetic we're doing only works if the limit is a power of two, so | |
52 | * we use 0x80000000 here on 32-bit kernels. If a process passes an invalid | |
53 | * address in this range it's the process's problem, not ours :-) | |
54 | */ | |
55 | ||
56 | #define KERNEL_DS ((mm_segment_t) { 0UL }) | |
57 | #define USER_DS ((mm_segment_t) { __UA_LIMIT }) | |
58 | ||
59 | #define VERIFY_READ 0 | |
60 | #define VERIFY_WRITE 1 | |
61 | ||
62 | #define get_ds() (KERNEL_DS) | |
63 | #define get_fs() (current_thread_info()->addr_limit) | |
64 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | |
65 | ||
21a151d8 | 66 | #define segment_eq(a, b) ((a).seg == (b).seg) |
1da177e4 LT |
67 | |
68 | ||
69 | /* | |
70 | * Is a address valid? This does a straighforward calculation rather | |
71 | * than tests. | |
72 | * | |
73 | * Address valid if: | |
74 | * - "addr" doesn't have any high-bits set | |
75 | * - AND "size" doesn't have any high-bits set | |
76 | * - AND "addr+size" doesn't have any high-bits set | |
77 | * - OR we are in kernel mode. | |
78 | * | |
79 | * __ua_size() is a trick to avoid runtime checking of positive constant | |
80 | * sizes; for those we already know at compile time that the size is ok. | |
81 | */ | |
82 | #define __ua_size(size) \ | |
83 | ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size)) | |
84 | ||
85 | /* | |
86 | * access_ok: - Checks if a user space pointer is valid | |
87 | * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that | |
88 | * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe | |
89 | * to write to a block, it is always safe to read from it. | |
90 | * @addr: User space pointer to start of block to check | |
91 | * @size: Size of block to check | |
92 | * | |
93 | * Context: User context only. This function may sleep. | |
94 | * | |
95 | * Checks if a pointer to a block of memory in user space is valid. | |
96 | * | |
97 | * Returns true (nonzero) if the memory block may be valid, false (zero) | |
98 | * if it is definitely invalid. | |
99 | * | |
100 | * Note that, depending on architecture, this function probably just | |
101 | * checks that the pointer is in the user space range - after calling | |
102 | * this function, memory access functions may still return -EFAULT. | |
103 | */ | |
104 | ||
105 | #define __access_mask get_fs().seg | |
106 | ||
ed01b3d2 RB |
107 | #define __access_ok(addr, size, mask) \ |
108 | ({ \ | |
109 | unsigned long __addr = (unsigned long) (addr); \ | |
110 | unsigned long __size = size; \ | |
111 | unsigned long __mask = mask; \ | |
112 | unsigned long __ok; \ | |
113 | \ | |
114 | __chk_user_ptr(addr); \ | |
115 | __ok = (signed long)(__mask & (__addr | (__addr + __size) | \ | |
116 | __ua_size(__size))); \ | |
117 | __ok == 0; \ | |
d0aab922 | 118 | }) |
1da177e4 LT |
119 | |
120 | #define access_ok(type, addr, size) \ | |
d0aab922 | 121 | likely(__access_ok((addr), (size), __access_mask)) |
1da177e4 | 122 | |
1da177e4 LT |
123 | /* |
124 | * put_user: - Write a simple value into user space. | |
125 | * @x: Value to copy to user space. | |
126 | * @ptr: Destination address, in user space. | |
127 | * | |
128 | * Context: User context only. This function may sleep. | |
129 | * | |
130 | * This macro copies a single simple value from kernel space to user | |
131 | * space. It supports simple types like char and int, but not larger | |
132 | * data types like structures or arrays. | |
133 | * | |
134 | * @ptr must have pointer-to-simple-variable type, and @x must be assignable | |
135 | * to the result of dereferencing @ptr. | |
136 | * | |
137 | * Returns zero on success, or -EFAULT on error. | |
138 | */ | |
139 | #define put_user(x,ptr) \ | |
21a151d8 | 140 | __put_user_check((x), (ptr), sizeof(*(ptr))) |
1da177e4 LT |
141 | |
142 | /* | |
143 | * get_user: - Get a simple variable from user space. | |
144 | * @x: Variable to store result. | |
145 | * @ptr: Source address, in user space. | |
146 | * | |
147 | * Context: User context only. This function may sleep. | |
148 | * | |
149 | * This macro copies a single simple variable from user space to kernel | |
150 | * space. It supports simple types like char and int, but not larger | |
151 | * data types like structures or arrays. | |
152 | * | |
153 | * @ptr must have pointer-to-simple-variable type, and the result of | |
154 | * dereferencing @ptr must be assignable to @x without a cast. | |
155 | * | |
156 | * Returns zero on success, or -EFAULT on error. | |
157 | * On error, the variable @x is set to zero. | |
158 | */ | |
159 | #define get_user(x,ptr) \ | |
21a151d8 | 160 | __get_user_check((x), (ptr), sizeof(*(ptr))) |
1da177e4 LT |
161 | |
162 | /* | |
163 | * __put_user: - Write a simple value into user space, with less checking. | |
164 | * @x: Value to copy to user space. | |
165 | * @ptr: Destination address, in user space. | |
166 | * | |
167 | * Context: User context only. This function may sleep. | |
168 | * | |
169 | * This macro copies a single simple value from kernel space to user | |
170 | * space. It supports simple types like char and int, but not larger | |
171 | * data types like structures or arrays. | |
172 | * | |
173 | * @ptr must have pointer-to-simple-variable type, and @x must be assignable | |
174 | * to the result of dereferencing @ptr. | |
175 | * | |
176 | * Caller must check the pointer with access_ok() before calling this | |
177 | * function. | |
178 | * | |
179 | * Returns zero on success, or -EFAULT on error. | |
180 | */ | |
181 | #define __put_user(x,ptr) \ | |
21a151d8 | 182 | __put_user_nocheck((x), (ptr), sizeof(*(ptr))) |
1da177e4 LT |
183 | |
184 | /* | |
185 | * __get_user: - Get a simple variable from user space, with less checking. | |
186 | * @x: Variable to store result. | |
187 | * @ptr: Source address, in user space. | |
188 | * | |
189 | * Context: User context only. This function may sleep. | |
190 | * | |
191 | * This macro copies a single simple variable from user space to kernel | |
192 | * space. It supports simple types like char and int, but not larger | |
193 | * data types like structures or arrays. | |
194 | * | |
195 | * @ptr must have pointer-to-simple-variable type, and the result of | |
196 | * dereferencing @ptr must be assignable to @x without a cast. | |
197 | * | |
198 | * Caller must check the pointer with access_ok() before calling this | |
199 | * function. | |
200 | * | |
201 | * Returns zero on success, or -EFAULT on error. | |
202 | * On error, the variable @x is set to zero. | |
203 | */ | |
204 | #define __get_user(x,ptr) \ | |
21a151d8 | 205 | __get_user_nocheck((x), (ptr), sizeof(*(ptr))) |
1da177e4 LT |
206 | |
207 | struct __large_struct { unsigned long buf[100]; }; | |
fe00f943 | 208 | #define __m(x) (*(struct __large_struct __user *)(x)) |
1da177e4 LT |
209 | |
210 | /* | |
211 | * Yuck. We need two variants, one for 64bit operation and one | |
212 | * for 32 bit mode and old iron. | |
213 | */ | |
4feb8f8f RB |
214 | #ifdef CONFIG_32BIT |
215 | #define __GET_USER_DW(val, ptr) __get_user_asm_ll32(val, ptr) | |
216 | #endif | |
217 | #ifdef CONFIG_64BIT | |
218 | #define __GET_USER_DW(val, ptr) __get_user_asm(val, "ld", ptr) | |
1da177e4 LT |
219 | #endif |
220 | ||
4feb8f8f RB |
221 | extern void __get_user_unknown(void); |
222 | ||
223 | #define __get_user_common(val, size, ptr) \ | |
224 | do { \ | |
1da177e4 | 225 | switch (size) { \ |
4feb8f8f RB |
226 | case 1: __get_user_asm(val, "lb", ptr); break; \ |
227 | case 2: __get_user_asm(val, "lh", ptr); break; \ | |
228 | case 4: __get_user_asm(val, "lw", ptr); break; \ | |
229 | case 8: __GET_USER_DW(val, ptr); break; \ | |
1da177e4 LT |
230 | default: __get_user_unknown(); break; \ |
231 | } \ | |
4feb8f8f RB |
232 | } while (0) |
233 | ||
21a151d8 | 234 | #define __get_user_nocheck(x, ptr, size) \ |
4feb8f8f | 235 | ({ \ |
8d2d91e8 | 236 | int __gu_err; \ |
4feb8f8f | 237 | \ |
ed01b3d2 | 238 | __chk_user_ptr(ptr); \ |
4feb8f8f | 239 | __get_user_common((x), size, ptr); \ |
1da177e4 LT |
240 | __gu_err; \ |
241 | }) | |
242 | ||
21a151d8 | 243 | #define __get_user_check(x, ptr, size) \ |
1da177e4 | 244 | ({ \ |
8d2d91e8 | 245 | int __gu_err = -EFAULT; \ |
8ecbbcaf | 246 | const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \ |
4feb8f8f | 247 | \ |
ef41f460 | 248 | might_fault(); \ |
4feb8f8f RB |
249 | if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \ |
250 | __get_user_common((x), size, __gu_ptr); \ | |
1da177e4 | 251 | \ |
1da177e4 LT |
252 | __gu_err; \ |
253 | }) | |
254 | ||
4feb8f8f | 255 | #define __get_user_asm(val, insn, addr) \ |
fe00f943 | 256 | { \ |
4feb8f8f RB |
257 | long __gu_tmp; \ |
258 | \ | |
1da177e4 LT |
259 | __asm__ __volatile__( \ |
260 | "1: " insn " %1, %3 \n" \ | |
261 | "2: \n" \ | |
262 | " .section .fixup,\"ax\" \n" \ | |
263 | "3: li %0, %4 \n" \ | |
264 | " j 2b \n" \ | |
265 | " .previous \n" \ | |
266 | " .section __ex_table,\"a\" \n" \ | |
267 | " "__UA_ADDR "\t1b, 3b \n" \ | |
268 | " .previous \n" \ | |
4feb8f8f | 269 | : "=r" (__gu_err), "=r" (__gu_tmp) \ |
fe00f943 | 270 | : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \ |
4feb8f8f | 271 | \ |
8ecbbcaf | 272 | (val) = (__typeof__(*(addr))) __gu_tmp; \ |
fe00f943 | 273 | } |
1da177e4 LT |
274 | |
275 | /* | |
276 | * Get a long long 64 using 32 bit registers. | |
277 | */ | |
4feb8f8f | 278 | #define __get_user_asm_ll32(val, addr) \ |
fe00f943 | 279 | { \ |
cb66fb3f RB |
280 | union { \ |
281 | unsigned long long l; \ | |
282 | __typeof__(*(addr)) t; \ | |
283 | } __gu_tmp; \ | |
cd1fb9ea | 284 | \ |
1da177e4 | 285 | __asm__ __volatile__( \ |
fe00f943 RB |
286 | "1: lw %1, (%3) \n" \ |
287 | "2: lw %D1, 4(%3) \n" \ | |
1da177e4 | 288 | "3: .section .fixup,\"ax\" \n" \ |
fe00f943 | 289 | "4: li %0, %4 \n" \ |
1da177e4 LT |
290 | " move %1, $0 \n" \ |
291 | " move %D1, $0 \n" \ | |
292 | " j 3b \n" \ | |
293 | " .previous \n" \ | |
294 | " .section __ex_table,\"a\" \n" \ | |
295 | " " __UA_ADDR " 1b, 4b \n" \ | |
296 | " " __UA_ADDR " 2b, 4b \n" \ | |
297 | " .previous \n" \ | |
cb66fb3f | 298 | : "=r" (__gu_err), "=&r" (__gu_tmp.l) \ |
fe00f943 | 299 | : "0" (0), "r" (addr), "i" (-EFAULT)); \ |
cb66fb3f RB |
300 | \ |
301 | (val) = __gu_tmp.t; \ | |
fe00f943 | 302 | } |
1da177e4 | 303 | |
1da177e4 LT |
304 | /* |
305 | * Yuck. We need two variants, one for 64bit operation and one | |
306 | * for 32 bit mode and old iron. | |
307 | */ | |
4feb8f8f | 308 | #ifdef CONFIG_32BIT |
fe00f943 | 309 | #define __PUT_USER_DW(ptr) __put_user_asm_ll32(ptr) |
1da177e4 | 310 | #endif |
4feb8f8f RB |
311 | #ifdef CONFIG_64BIT |
312 | #define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr) | |
313 | #endif | |
1da177e4 | 314 | |
21a151d8 | 315 | #define __put_user_nocheck(x, ptr, size) \ |
1da177e4 LT |
316 | ({ \ |
317 | __typeof__(*(ptr)) __pu_val; \ | |
8d2d91e8 | 318 | int __pu_err = 0; \ |
1da177e4 | 319 | \ |
ed01b3d2 | 320 | __chk_user_ptr(ptr); \ |
1da177e4 | 321 | __pu_val = (x); \ |
1da177e4 | 322 | switch (size) { \ |
fe00f943 RB |
323 | case 1: __put_user_asm("sb", ptr); break; \ |
324 | case 2: __put_user_asm("sh", ptr); break; \ | |
325 | case 4: __put_user_asm("sw", ptr); break; \ | |
326 | case 8: __PUT_USER_DW(ptr); break; \ | |
1da177e4 LT |
327 | default: __put_user_unknown(); break; \ |
328 | } \ | |
329 | __pu_err; \ | |
330 | }) | |
331 | ||
21a151d8 | 332 | #define __put_user_check(x, ptr, size) \ |
1da177e4 | 333 | ({ \ |
fe00f943 RB |
334 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ |
335 | __typeof__(*(ptr)) __pu_val = (x); \ | |
8d2d91e8 | 336 | int __pu_err = -EFAULT; \ |
1da177e4 | 337 | \ |
ef41f460 | 338 | might_fault(); \ |
fe00f943 | 339 | if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \ |
1da177e4 | 340 | switch (size) { \ |
fe00f943 RB |
341 | case 1: __put_user_asm("sb", __pu_addr); break; \ |
342 | case 2: __put_user_asm("sh", __pu_addr); break; \ | |
343 | case 4: __put_user_asm("sw", __pu_addr); break; \ | |
344 | case 8: __PUT_USER_DW(__pu_addr); break; \ | |
1da177e4 LT |
345 | default: __put_user_unknown(); break; \ |
346 | } \ | |
347 | } \ | |
348 | __pu_err; \ | |
349 | }) | |
350 | ||
fe00f943 RB |
351 | #define __put_user_asm(insn, ptr) \ |
352 | { \ | |
1da177e4 LT |
353 | __asm__ __volatile__( \ |
354 | "1: " insn " %z2, %3 # __put_user_asm\n" \ | |
355 | "2: \n" \ | |
356 | " .section .fixup,\"ax\" \n" \ | |
357 | "3: li %0, %4 \n" \ | |
358 | " j 2b \n" \ | |
359 | " .previous \n" \ | |
360 | " .section __ex_table,\"a\" \n" \ | |
361 | " " __UA_ADDR " 1b, 3b \n" \ | |
362 | " .previous \n" \ | |
363 | : "=r" (__pu_err) \ | |
fe00f943 | 364 | : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \ |
1da177e4 | 365 | "i" (-EFAULT)); \ |
fe00f943 | 366 | } |
1da177e4 | 367 | |
fe00f943 RB |
368 | #define __put_user_asm_ll32(ptr) \ |
369 | { \ | |
1da177e4 | 370 | __asm__ __volatile__( \ |
fe00f943 RB |
371 | "1: sw %2, (%3) # __put_user_asm_ll32 \n" \ |
372 | "2: sw %D2, 4(%3) \n" \ | |
1da177e4 LT |
373 | "3: \n" \ |
374 | " .section .fixup,\"ax\" \n" \ | |
fe00f943 | 375 | "4: li %0, %4 \n" \ |
1da177e4 LT |
376 | " j 3b \n" \ |
377 | " .previous \n" \ | |
378 | " .section __ex_table,\"a\" \n" \ | |
379 | " " __UA_ADDR " 1b, 4b \n" \ | |
380 | " " __UA_ADDR " 2b, 4b \n" \ | |
381 | " .previous" \ | |
382 | : "=r" (__pu_err) \ | |
fe00f943 RB |
383 | : "0" (0), "r" (__pu_val), "r" (ptr), \ |
384 | "i" (-EFAULT)); \ | |
385 | } | |
1da177e4 LT |
386 | |
387 | extern void __put_user_unknown(void); | |
388 | ||
71ec6ccf RB |
389 | /* |
390 | * put_user_unaligned: - Write a simple value into user space. | |
391 | * @x: Value to copy to user space. | |
392 | * @ptr: Destination address, in user space. | |
393 | * | |
394 | * Context: User context only. This function may sleep. | |
395 | * | |
396 | * This macro copies a single simple value from kernel space to user | |
397 | * space. It supports simple types like char and int, but not larger | |
398 | * data types like structures or arrays. | |
399 | * | |
400 | * @ptr must have pointer-to-simple-variable type, and @x must be assignable | |
401 | * to the result of dereferencing @ptr. | |
402 | * | |
403 | * Returns zero on success, or -EFAULT on error. | |
404 | */ | |
405 | #define put_user_unaligned(x,ptr) \ | |
406 | __put_user_unaligned_check((x),(ptr),sizeof(*(ptr))) | |
407 | ||
408 | /* | |
409 | * get_user_unaligned: - Get a simple variable from user space. | |
410 | * @x: Variable to store result. | |
411 | * @ptr: Source address, in user space. | |
412 | * | |
413 | * Context: User context only. This function may sleep. | |
414 | * | |
415 | * This macro copies a single simple variable from user space to kernel | |
416 | * space. It supports simple types like char and int, but not larger | |
417 | * data types like structures or arrays. | |
418 | * | |
419 | * @ptr must have pointer-to-simple-variable type, and the result of | |
420 | * dereferencing @ptr must be assignable to @x without a cast. | |
421 | * | |
422 | * Returns zero on success, or -EFAULT on error. | |
423 | * On error, the variable @x is set to zero. | |
424 | */ | |
425 | #define get_user_unaligned(x,ptr) \ | |
426 | __get_user_unaligned_check((x),(ptr),sizeof(*(ptr))) | |
427 | ||
428 | /* | |
429 | * __put_user_unaligned: - Write a simple value into user space, with less checking. | |
430 | * @x: Value to copy to user space. | |
431 | * @ptr: Destination address, in user space. | |
432 | * | |
433 | * Context: User context only. This function may sleep. | |
434 | * | |
435 | * This macro copies a single simple value from kernel space to user | |
436 | * space. It supports simple types like char and int, but not larger | |
437 | * data types like structures or arrays. | |
438 | * | |
439 | * @ptr must have pointer-to-simple-variable type, and @x must be assignable | |
440 | * to the result of dereferencing @ptr. | |
441 | * | |
442 | * Caller must check the pointer with access_ok() before calling this | |
443 | * function. | |
444 | * | |
445 | * Returns zero on success, or -EFAULT on error. | |
446 | */ | |
447 | #define __put_user_unaligned(x,ptr) \ | |
448 | __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr))) | |
449 | ||
450 | /* | |
451 | * __get_user_unaligned: - Get a simple variable from user space, with less checking. | |
452 | * @x: Variable to store result. | |
453 | * @ptr: Source address, in user space. | |
454 | * | |
455 | * Context: User context only. This function may sleep. | |
456 | * | |
457 | * This macro copies a single simple variable from user space to kernel | |
458 | * space. It supports simple types like char and int, but not larger | |
459 | * data types like structures or arrays. | |
460 | * | |
461 | * @ptr must have pointer-to-simple-variable type, and the result of | |
462 | * dereferencing @ptr must be assignable to @x without a cast. | |
463 | * | |
464 | * Caller must check the pointer with access_ok() before calling this | |
465 | * function. | |
466 | * | |
467 | * Returns zero on success, or -EFAULT on error. | |
468 | * On error, the variable @x is set to zero. | |
469 | */ | |
470 | #define __get_user_unaligned(x,ptr) \ | |
471 | __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr))) | |
472 | ||
473 | /* | |
474 | * Yuck. We need two variants, one for 64bit operation and one | |
475 | * for 32 bit mode and old iron. | |
476 | */ | |
477 | #ifdef CONFIG_32BIT | |
478 | #define __GET_USER_UNALIGNED_DW(val, ptr) \ | |
479 | __get_user_unaligned_asm_ll32(val, ptr) | |
480 | #endif | |
481 | #ifdef CONFIG_64BIT | |
482 | #define __GET_USER_UNALIGNED_DW(val, ptr) \ | |
483 | __get_user_unaligned_asm(val, "uld", ptr) | |
484 | #endif | |
485 | ||
486 | extern void __get_user_unaligned_unknown(void); | |
487 | ||
488 | #define __get_user_unaligned_common(val, size, ptr) \ | |
489 | do { \ | |
490 | switch (size) { \ | |
491 | case 1: __get_user_asm(val, "lb", ptr); break; \ | |
492 | case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \ | |
493 | case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \ | |
494 | case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \ | |
495 | default: __get_user_unaligned_unknown(); break; \ | |
496 | } \ | |
497 | } while (0) | |
498 | ||
499 | #define __get_user_unaligned_nocheck(x,ptr,size) \ | |
500 | ({ \ | |
501 | int __gu_err; \ | |
502 | \ | |
503 | __get_user_unaligned_common((x), size, ptr); \ | |
504 | __gu_err; \ | |
505 | }) | |
506 | ||
507 | #define __get_user_unaligned_check(x,ptr,size) \ | |
508 | ({ \ | |
509 | int __gu_err = -EFAULT; \ | |
510 | const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \ | |
511 | \ | |
512 | if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \ | |
513 | __get_user_unaligned_common((x), size, __gu_ptr); \ | |
514 | \ | |
515 | __gu_err; \ | |
516 | }) | |
517 | ||
518 | #define __get_user_unaligned_asm(val, insn, addr) \ | |
519 | { \ | |
520 | long __gu_tmp; \ | |
521 | \ | |
522 | __asm__ __volatile__( \ | |
523 | "1: " insn " %1, %3 \n" \ | |
524 | "2: \n" \ | |
525 | " .section .fixup,\"ax\" \n" \ | |
526 | "3: li %0, %4 \n" \ | |
527 | " j 2b \n" \ | |
528 | " .previous \n" \ | |
529 | " .section __ex_table,\"a\" \n" \ | |
530 | " "__UA_ADDR "\t1b, 3b \n" \ | |
531 | " "__UA_ADDR "\t1b + 4, 3b \n" \ | |
532 | " .previous \n" \ | |
533 | : "=r" (__gu_err), "=r" (__gu_tmp) \ | |
534 | : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \ | |
535 | \ | |
536 | (val) = (__typeof__(*(addr))) __gu_tmp; \ | |
537 | } | |
538 | ||
539 | /* | |
540 | * Get a long long 64 using 32 bit registers. | |
541 | */ | |
542 | #define __get_user_unaligned_asm_ll32(val, addr) \ | |
543 | { \ | |
544 | unsigned long long __gu_tmp; \ | |
545 | \ | |
546 | __asm__ __volatile__( \ | |
547 | "1: ulw %1, (%3) \n" \ | |
548 | "2: ulw %D1, 4(%3) \n" \ | |
549 | " move %0, $0 \n" \ | |
550 | "3: .section .fixup,\"ax\" \n" \ | |
551 | "4: li %0, %4 \n" \ | |
552 | " move %1, $0 \n" \ | |
553 | " move %D1, $0 \n" \ | |
554 | " j 3b \n" \ | |
555 | " .previous \n" \ | |
556 | " .section __ex_table,\"a\" \n" \ | |
557 | " " __UA_ADDR " 1b, 4b \n" \ | |
558 | " " __UA_ADDR " 1b + 4, 4b \n" \ | |
559 | " " __UA_ADDR " 2b, 4b \n" \ | |
560 | " " __UA_ADDR " 2b + 4, 4b \n" \ | |
561 | " .previous \n" \ | |
562 | : "=r" (__gu_err), "=&r" (__gu_tmp) \ | |
563 | : "0" (0), "r" (addr), "i" (-EFAULT)); \ | |
564 | (val) = (__typeof__(*(addr))) __gu_tmp; \ | |
565 | } | |
566 | ||
567 | /* | |
568 | * Yuck. We need two variants, one for 64bit operation and one | |
569 | * for 32 bit mode and old iron. | |
570 | */ | |
571 | #ifdef CONFIG_32BIT | |
572 | #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr) | |
573 | #endif | |
574 | #ifdef CONFIG_64BIT | |
575 | #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr) | |
576 | #endif | |
577 | ||
578 | #define __put_user_unaligned_nocheck(x,ptr,size) \ | |
579 | ({ \ | |
580 | __typeof__(*(ptr)) __pu_val; \ | |
581 | int __pu_err = 0; \ | |
582 | \ | |
583 | __pu_val = (x); \ | |
584 | switch (size) { \ | |
585 | case 1: __put_user_asm("sb", ptr); break; \ | |
586 | case 2: __put_user_unaligned_asm("ush", ptr); break; \ | |
587 | case 4: __put_user_unaligned_asm("usw", ptr); break; \ | |
588 | case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \ | |
589 | default: __put_user_unaligned_unknown(); break; \ | |
590 | } \ | |
591 | __pu_err; \ | |
592 | }) | |
593 | ||
594 | #define __put_user_unaligned_check(x,ptr,size) \ | |
595 | ({ \ | |
596 | __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ | |
597 | __typeof__(*(ptr)) __pu_val = (x); \ | |
598 | int __pu_err = -EFAULT; \ | |
599 | \ | |
600 | if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \ | |
601 | switch (size) { \ | |
602 | case 1: __put_user_asm("sb", __pu_addr); break; \ | |
603 | case 2: __put_user_unaligned_asm("ush", __pu_addr); break; \ | |
604 | case 4: __put_user_unaligned_asm("usw", __pu_addr); break; \ | |
605 | case 8: __PUT_USER_UNALGINED_DW(__pu_addr); break; \ | |
606 | default: __put_user_unaligned_unknown(); break; \ | |
607 | } \ | |
608 | } \ | |
609 | __pu_err; \ | |
610 | }) | |
611 | ||
612 | #define __put_user_unaligned_asm(insn, ptr) \ | |
613 | { \ | |
614 | __asm__ __volatile__( \ | |
615 | "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \ | |
616 | "2: \n" \ | |
617 | " .section .fixup,\"ax\" \n" \ | |
618 | "3: li %0, %4 \n" \ | |
619 | " j 2b \n" \ | |
620 | " .previous \n" \ | |
621 | " .section __ex_table,\"a\" \n" \ | |
622 | " " __UA_ADDR " 1b, 3b \n" \ | |
623 | " .previous \n" \ | |
624 | : "=r" (__pu_err) \ | |
625 | : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \ | |
626 | "i" (-EFAULT)); \ | |
627 | } | |
628 | ||
629 | #define __put_user_unaligned_asm_ll32(ptr) \ | |
630 | { \ | |
631 | __asm__ __volatile__( \ | |
632 | "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \ | |
633 | "2: sw %D2, 4(%3) \n" \ | |
634 | "3: \n" \ | |
635 | " .section .fixup,\"ax\" \n" \ | |
636 | "4: li %0, %4 \n" \ | |
637 | " j 3b \n" \ | |
638 | " .previous \n" \ | |
639 | " .section __ex_table,\"a\" \n" \ | |
640 | " " __UA_ADDR " 1b, 4b \n" \ | |
641 | " " __UA_ADDR " 1b + 4, 4b \n" \ | |
642 | " " __UA_ADDR " 2b, 4b \n" \ | |
643 | " " __UA_ADDR " 2b + 4, 4b \n" \ | |
644 | " .previous" \ | |
645 | : "=r" (__pu_err) \ | |
646 | : "0" (0), "r" (__pu_val), "r" (ptr), \ | |
647 | "i" (-EFAULT)); \ | |
648 | } | |
649 | ||
650 | extern void __put_user_unaligned_unknown(void); | |
651 | ||
1da177e4 LT |
652 | /* |
653 | * We're generating jump to subroutines which will be outside the range of | |
654 | * jump instructions | |
655 | */ | |
656 | #ifdef MODULE | |
657 | #define __MODULE_JAL(destination) \ | |
658 | ".set\tnoat\n\t" \ | |
659 | __UA_LA "\t$1, " #destination "\n\t" \ | |
660 | "jalr\t$1\n\t" \ | |
661 | ".set\tat\n\t" | |
662 | #else | |
663 | #define __MODULE_JAL(destination) \ | |
664 | "jal\t" #destination "\n\t" | |
665 | #endif | |
666 | ||
619b6e18 MR |
667 | #ifndef CONFIG_CPU_DADDI_WORKAROUNDS |
668 | #define DADDI_SCRATCH "$0" | |
669 | #else | |
670 | #define DADDI_SCRATCH "$3" | |
671 | #endif | |
672 | ||
1da177e4 LT |
673 | extern size_t __copy_user(void *__to, const void *__from, size_t __n); |
674 | ||
21a151d8 | 675 | #define __invoke_copy_to_user(to, from, n) \ |
1da177e4 | 676 | ({ \ |
49a89efb RB |
677 | register void __user *__cu_to_r __asm__("$4"); \ |
678 | register const void *__cu_from_r __asm__("$5"); \ | |
679 | register long __cu_len_r __asm__("$6"); \ | |
1da177e4 LT |
680 | \ |
681 | __cu_to_r = (to); \ | |
682 | __cu_from_r = (from); \ | |
683 | __cu_len_r = (n); \ | |
684 | __asm__ __volatile__( \ | |
685 | __MODULE_JAL(__copy_user) \ | |
686 | : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ | |
687 | : \ | |
688 | : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \ | |
619b6e18 | 689 | DADDI_SCRATCH, "memory"); \ |
1da177e4 LT |
690 | __cu_len_r; \ |
691 | }) | |
692 | ||
693 | /* | |
694 | * __copy_to_user: - Copy a block of data into user space, with less checking. | |
695 | * @to: Destination address, in user space. | |
696 | * @from: Source address, in kernel space. | |
697 | * @n: Number of bytes to copy. | |
698 | * | |
699 | * Context: User context only. This function may sleep. | |
700 | * | |
701 | * Copy data from kernel space to user space. Caller must check | |
702 | * the specified block with access_ok() before calling this function. | |
703 | * | |
704 | * Returns number of bytes that could not be copied. | |
705 | * On success, this will be zero. | |
706 | */ | |
21a151d8 | 707 | #define __copy_to_user(to, from, n) \ |
1da177e4 | 708 | ({ \ |
fe00f943 | 709 | void __user *__cu_to; \ |
1da177e4 LT |
710 | const void *__cu_from; \ |
711 | long __cu_len; \ | |
712 | \ | |
1da177e4 LT |
713 | __cu_to = (to); \ |
714 | __cu_from = (from); \ | |
715 | __cu_len = (n); \ | |
ef41f460 | 716 | might_fault(); \ |
1da177e4 LT |
717 | __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \ |
718 | __cu_len; \ | |
719 | }) | |
720 | ||
d0c91ae2 RB |
721 | extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); |
722 | ||
21a151d8 | 723 | #define __copy_to_user_inatomic(to, from, n) \ |
e03b5269 RB |
724 | ({ \ |
725 | void __user *__cu_to; \ | |
726 | const void *__cu_from; \ | |
727 | long __cu_len; \ | |
728 | \ | |
729 | __cu_to = (to); \ | |
730 | __cu_from = (from); \ | |
731 | __cu_len = (n); \ | |
732 | __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \ | |
733 | __cu_len; \ | |
734 | }) | |
735 | ||
21a151d8 | 736 | #define __copy_from_user_inatomic(to, from, n) \ |
e03b5269 RB |
737 | ({ \ |
738 | void *__cu_to; \ | |
739 | const void __user *__cu_from; \ | |
740 | long __cu_len; \ | |
741 | \ | |
742 | __cu_to = (to); \ | |
743 | __cu_from = (from); \ | |
744 | __cu_len = (n); \ | |
745 | __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \ | |
746 | __cu_len); \ | |
747 | __cu_len; \ | |
748 | }) | |
1da177e4 LT |
749 | |
750 | /* | |
751 | * copy_to_user: - Copy a block of data into user space. | |
752 | * @to: Destination address, in user space. | |
753 | * @from: Source address, in kernel space. | |
754 | * @n: Number of bytes to copy. | |
755 | * | |
756 | * Context: User context only. This function may sleep. | |
757 | * | |
758 | * Copy data from kernel space to user space. | |
759 | * | |
760 | * Returns number of bytes that could not be copied. | |
761 | * On success, this will be zero. | |
762 | */ | |
21a151d8 | 763 | #define copy_to_user(to, from, n) \ |
1da177e4 | 764 | ({ \ |
fe00f943 | 765 | void __user *__cu_to; \ |
1da177e4 LT |
766 | const void *__cu_from; \ |
767 | long __cu_len; \ | |
768 | \ | |
1da177e4 LT |
769 | __cu_to = (to); \ |
770 | __cu_from = (from); \ | |
771 | __cu_len = (n); \ | |
ef41f460 RB |
772 | if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \ |
773 | might_fault(); \ | |
1da177e4 LT |
774 | __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \ |
775 | __cu_len); \ | |
ef41f460 | 776 | } \ |
1da177e4 LT |
777 | __cu_len; \ |
778 | }) | |
779 | ||
21a151d8 | 780 | #define __invoke_copy_from_user(to, from, n) \ |
1da177e4 | 781 | ({ \ |
49a89efb RB |
782 | register void *__cu_to_r __asm__("$4"); \ |
783 | register const void __user *__cu_from_r __asm__("$5"); \ | |
784 | register long __cu_len_r __asm__("$6"); \ | |
1da177e4 LT |
785 | \ |
786 | __cu_to_r = (to); \ | |
787 | __cu_from_r = (from); \ | |
788 | __cu_len_r = (n); \ | |
789 | __asm__ __volatile__( \ | |
790 | ".set\tnoreorder\n\t" \ | |
791 | __MODULE_JAL(__copy_user) \ | |
792 | ".set\tnoat\n\t" \ | |
793 | __UA_ADDU "\t$1, %1, %2\n\t" \ | |
794 | ".set\tat\n\t" \ | |
795 | ".set\treorder" \ | |
796 | : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ | |
797 | : \ | |
e03b5269 | 798 | : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \ |
619b6e18 | 799 | DADDI_SCRATCH, "memory"); \ |
e03b5269 RB |
800 | __cu_len_r; \ |
801 | }) | |
802 | ||
21a151d8 | 803 | #define __invoke_copy_from_user_inatomic(to, from, n) \ |
e03b5269 | 804 | ({ \ |
49a89efb RB |
805 | register void *__cu_to_r __asm__("$4"); \ |
806 | register const void __user *__cu_from_r __asm__("$5"); \ | |
807 | register long __cu_len_r __asm__("$6"); \ | |
e03b5269 RB |
808 | \ |
809 | __cu_to_r = (to); \ | |
810 | __cu_from_r = (from); \ | |
811 | __cu_len_r = (n); \ | |
812 | __asm__ __volatile__( \ | |
813 | ".set\tnoreorder\n\t" \ | |
814 | __MODULE_JAL(__copy_user_inatomic) \ | |
815 | ".set\tnoat\n\t" \ | |
816 | __UA_ADDU "\t$1, %1, %2\n\t" \ | |
817 | ".set\tat\n\t" \ | |
818 | ".set\treorder" \ | |
819 | : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ | |
820 | : \ | |
1da177e4 | 821 | : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \ |
619b6e18 | 822 | DADDI_SCRATCH, "memory"); \ |
1da177e4 LT |
823 | __cu_len_r; \ |
824 | }) | |
825 | ||
826 | /* | |
131c1a2b CD |
827 | * __copy_from_user: - Copy a block of data from user space, with less checking. |
828 | * @to: Destination address, in kernel space. | |
1da177e4 LT |
829 | * @from: Source address, in user space. |
830 | * @n: Number of bytes to copy. | |
831 | * | |
832 | * Context: User context only. This function may sleep. | |
833 | * | |
834 | * Copy data from user space to kernel space. Caller must check | |
835 | * the specified block with access_ok() before calling this function. | |
836 | * | |
837 | * Returns number of bytes that could not be copied. | |
838 | * On success, this will be zero. | |
839 | * | |
840 | * If some data could not be copied, this function will pad the copied | |
841 | * data to the requested size using zero bytes. | |
842 | */ | |
21a151d8 | 843 | #define __copy_from_user(to, from, n) \ |
1da177e4 LT |
844 | ({ \ |
845 | void *__cu_to; \ | |
fe00f943 | 846 | const void __user *__cu_from; \ |
1da177e4 LT |
847 | long __cu_len; \ |
848 | \ | |
1da177e4 LT |
849 | __cu_to = (to); \ |
850 | __cu_from = (from); \ | |
851 | __cu_len = (n); \ | |
ef41f460 | 852 | might_fault(); \ |
1da177e4 LT |
853 | __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ |
854 | __cu_len); \ | |
855 | __cu_len; \ | |
856 | }) | |
857 | ||
858 | /* | |
859 | * copy_from_user: - Copy a block of data from user space. | |
860 | * @to: Destination address, in kernel space. | |
861 | * @from: Source address, in user space. | |
862 | * @n: Number of bytes to copy. | |
863 | * | |
864 | * Context: User context only. This function may sleep. | |
865 | * | |
866 | * Copy data from user space to kernel space. | |
867 | * | |
868 | * Returns number of bytes that could not be copied. | |
869 | * On success, this will be zero. | |
870 | * | |
871 | * If some data could not be copied, this function will pad the copied | |
872 | * data to the requested size using zero bytes. | |
873 | */ | |
21a151d8 | 874 | #define copy_from_user(to, from, n) \ |
1da177e4 LT |
875 | ({ \ |
876 | void *__cu_to; \ | |
fe00f943 | 877 | const void __user *__cu_from; \ |
1da177e4 LT |
878 | long __cu_len; \ |
879 | \ | |
1da177e4 LT |
880 | __cu_to = (to); \ |
881 | __cu_from = (from); \ | |
882 | __cu_len = (n); \ | |
ef41f460 RB |
883 | if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \ |
884 | might_fault(); \ | |
1da177e4 LT |
885 | __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ |
886 | __cu_len); \ | |
ef41f460 | 887 | } \ |
1da177e4 LT |
888 | __cu_len; \ |
889 | }) | |
890 | ||
ed01b3d2 RB |
891 | #define __copy_in_user(to, from, n) \ |
892 | ({ \ | |
893 | void __user *__cu_to; \ | |
894 | const void __user *__cu_from; \ | |
895 | long __cu_len; \ | |
896 | \ | |
ed01b3d2 RB |
897 | __cu_to = (to); \ |
898 | __cu_from = (from); \ | |
899 | __cu_len = (n); \ | |
ef41f460 | 900 | might_fault(); \ |
ed01b3d2 RB |
901 | __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ |
902 | __cu_len); \ | |
903 | __cu_len; \ | |
904 | }) | |
1da177e4 | 905 | |
21a151d8 | 906 | #define copy_in_user(to, from, n) \ |
1da177e4 | 907 | ({ \ |
fe00f943 RB |
908 | void __user *__cu_to; \ |
909 | const void __user *__cu_from; \ | |
1da177e4 LT |
910 | long __cu_len; \ |
911 | \ | |
1da177e4 LT |
912 | __cu_to = (to); \ |
913 | __cu_from = (from); \ | |
914 | __cu_len = (n); \ | |
915 | if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \ | |
ef41f460 RB |
916 | access_ok(VERIFY_WRITE, __cu_to, __cu_len))) { \ |
917 | might_fault(); \ | |
1da177e4 LT |
918 | __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ |
919 | __cu_len); \ | |
ef41f460 | 920 | } \ |
1da177e4 LT |
921 | __cu_len; \ |
922 | }) | |
923 | ||
924 | /* | |
925 | * __clear_user: - Zero a block of memory in user space, with less checking. | |
926 | * @to: Destination address, in user space. | |
927 | * @n: Number of bytes to zero. | |
928 | * | |
929 | * Zero a block of memory in user space. Caller must check | |
930 | * the specified block with access_ok() before calling this function. | |
931 | * | |
932 | * Returns number of bytes that could not be cleared. | |
933 | * On success, this will be zero. | |
934 | */ | |
935 | static inline __kernel_size_t | |
fe00f943 | 936 | __clear_user(void __user *addr, __kernel_size_t size) |
1da177e4 LT |
937 | { |
938 | __kernel_size_t res; | |
939 | ||
ef41f460 | 940 | might_fault(); |
1da177e4 LT |
941 | __asm__ __volatile__( |
942 | "move\t$4, %1\n\t" | |
943 | "move\t$5, $0\n\t" | |
944 | "move\t$6, %2\n\t" | |
945 | __MODULE_JAL(__bzero) | |
946 | "move\t%0, $6" | |
947 | : "=r" (res) | |
948 | : "r" (addr), "r" (size) | |
949 | : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); | |
950 | ||
951 | return res; | |
952 | } | |
953 | ||
954 | #define clear_user(addr,n) \ | |
955 | ({ \ | |
fe00f943 | 956 | void __user * __cl_addr = (addr); \ |
1da177e4 LT |
957 | unsigned long __cl_size = (n); \ |
958 | if (__cl_size && access_ok(VERIFY_WRITE, \ | |
959 | ((unsigned long)(__cl_addr)), __cl_size)) \ | |
960 | __cl_size = __clear_user(__cl_addr, __cl_size); \ | |
961 | __cl_size; \ | |
962 | }) | |
963 | ||
964 | /* | |
965 | * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking. | |
966 | * @dst: Destination address, in kernel space. This buffer must be at | |
967 | * least @count bytes long. | |
968 | * @src: Source address, in user space. | |
969 | * @count: Maximum number of bytes to copy, including the trailing NUL. | |
970 | * | |
971 | * Copies a NUL-terminated string from userspace to kernel space. | |
972 | * Caller must check the specified block with access_ok() before calling | |
973 | * this function. | |
974 | * | |
975 | * On success, returns the length of the string (not including the trailing | |
976 | * NUL). | |
977 | * | |
978 | * If access to userspace fails, returns -EFAULT (some data may have been | |
979 | * copied). | |
980 | * | |
981 | * If @count is smaller than the length of the string, copies @count bytes | |
982 | * and returns @count. | |
983 | */ | |
984 | static inline long | |
fe00f943 | 985 | __strncpy_from_user(char *__to, const char __user *__from, long __len) |
1da177e4 LT |
986 | { |
987 | long res; | |
988 | ||
ef41f460 | 989 | might_fault(); |
1da177e4 LT |
990 | __asm__ __volatile__( |
991 | "move\t$4, %1\n\t" | |
992 | "move\t$5, %2\n\t" | |
993 | "move\t$6, %3\n\t" | |
994 | __MODULE_JAL(__strncpy_from_user_nocheck_asm) | |
995 | "move\t%0, $2" | |
996 | : "=r" (res) | |
997 | : "r" (__to), "r" (__from), "r" (__len) | |
998 | : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); | |
999 | ||
1000 | return res; | |
1001 | } | |
1002 | ||
1003 | /* | |
1004 | * strncpy_from_user: - Copy a NUL terminated string from userspace. | |
1005 | * @dst: Destination address, in kernel space. This buffer must be at | |
1006 | * least @count bytes long. | |
1007 | * @src: Source address, in user space. | |
1008 | * @count: Maximum number of bytes to copy, including the trailing NUL. | |
1009 | * | |
1010 | * Copies a NUL-terminated string from userspace to kernel space. | |
1011 | * | |
1012 | * On success, returns the length of the string (not including the trailing | |
1013 | * NUL). | |
1014 | * | |
1015 | * If access to userspace fails, returns -EFAULT (some data may have been | |
1016 | * copied). | |
1017 | * | |
1018 | * If @count is smaller than the length of the string, copies @count bytes | |
1019 | * and returns @count. | |
1020 | */ | |
1021 | static inline long | |
fe00f943 | 1022 | strncpy_from_user(char *__to, const char __user *__from, long __len) |
1da177e4 LT |
1023 | { |
1024 | long res; | |
1025 | ||
ef41f460 | 1026 | might_fault(); |
1da177e4 LT |
1027 | __asm__ __volatile__( |
1028 | "move\t$4, %1\n\t" | |
1029 | "move\t$5, %2\n\t" | |
1030 | "move\t$6, %3\n\t" | |
1031 | __MODULE_JAL(__strncpy_from_user_asm) | |
1032 | "move\t%0, $2" | |
1033 | : "=r" (res) | |
1034 | : "r" (__to), "r" (__from), "r" (__len) | |
1035 | : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); | |
1036 | ||
1037 | return res; | |
1038 | } | |
1039 | ||
1040 | /* Returns: 0 if bad, string length+1 (memory size) of string if ok */ | |
fe00f943 | 1041 | static inline long __strlen_user(const char __user *s) |
1da177e4 LT |
1042 | { |
1043 | long res; | |
1044 | ||
ef41f460 | 1045 | might_fault(); |
1da177e4 LT |
1046 | __asm__ __volatile__( |
1047 | "move\t$4, %1\n\t" | |
1048 | __MODULE_JAL(__strlen_user_nocheck_asm) | |
1049 | "move\t%0, $2" | |
1050 | : "=r" (res) | |
1051 | : "r" (s) | |
1052 | : "$2", "$4", __UA_t0, "$31"); | |
1053 | ||
1054 | return res; | |
1055 | } | |
1056 | ||
1057 | /* | |
1058 | * strlen_user: - Get the size of a string in user space. | |
1059 | * @str: The string to measure. | |
1060 | * | |
1061 | * Context: User context only. This function may sleep. | |
1062 | * | |
1063 | * Get the size of a NUL-terminated string in user space. | |
1064 | * | |
1065 | * Returns the size of the string INCLUDING the terminating NUL. | |
1066 | * On exception, returns 0. | |
1067 | * | |
1068 | * If there is a limit on the length of a valid string, you may wish to | |
1069 | * consider using strnlen_user() instead. | |
1070 | */ | |
fe00f943 | 1071 | static inline long strlen_user(const char __user *s) |
1da177e4 LT |
1072 | { |
1073 | long res; | |
1074 | ||
ef41f460 | 1075 | might_fault(); |
1da177e4 LT |
1076 | __asm__ __volatile__( |
1077 | "move\t$4, %1\n\t" | |
1078 | __MODULE_JAL(__strlen_user_asm) | |
1079 | "move\t%0, $2" | |
1080 | : "=r" (res) | |
1081 | : "r" (s) | |
1082 | : "$2", "$4", __UA_t0, "$31"); | |
1083 | ||
1084 | return res; | |
1085 | } | |
1086 | ||
1087 | /* Returns: 0 if bad, string length+1 (memory size) of string if ok */ | |
fe00f943 | 1088 | static inline long __strnlen_user(const char __user *s, long n) |
1da177e4 LT |
1089 | { |
1090 | long res; | |
1091 | ||
ef41f460 | 1092 | might_fault(); |
1da177e4 LT |
1093 | __asm__ __volatile__( |
1094 | "move\t$4, %1\n\t" | |
1095 | "move\t$5, %2\n\t" | |
1096 | __MODULE_JAL(__strnlen_user_nocheck_asm) | |
1097 | "move\t%0, $2" | |
1098 | : "=r" (res) | |
1099 | : "r" (s), "r" (n) | |
1100 | : "$2", "$4", "$5", __UA_t0, "$31"); | |
1101 | ||
1102 | return res; | |
1103 | } | |
1104 | ||
1105 | /* | |
1106 | * strlen_user: - Get the size of a string in user space. | |
1107 | * @str: The string to measure. | |
1108 | * | |
1109 | * Context: User context only. This function may sleep. | |
1110 | * | |
1111 | * Get the size of a NUL-terminated string in user space. | |
1112 | * | |
1113 | * Returns the size of the string INCLUDING the terminating NUL. | |
1114 | * On exception, returns 0. | |
1115 | * | |
1116 | * If there is a limit on the length of a valid string, you may wish to | |
1117 | * consider using strnlen_user() instead. | |
1118 | */ | |
fe00f943 | 1119 | static inline long strnlen_user(const char __user *s, long n) |
1da177e4 LT |
1120 | { |
1121 | long res; | |
1122 | ||
ef41f460 | 1123 | might_fault(); |
1da177e4 LT |
1124 | __asm__ __volatile__( |
1125 | "move\t$4, %1\n\t" | |
1126 | "move\t$5, %2\n\t" | |
1127 | __MODULE_JAL(__strnlen_user_asm) | |
1128 | "move\t%0, $2" | |
1129 | : "=r" (res) | |
1130 | : "r" (s), "r" (n) | |
1131 | : "$2", "$4", "$5", __UA_t0, "$31"); | |
1132 | ||
1133 | return res; | |
1134 | } | |
1135 | ||
1136 | struct exception_table_entry | |
1137 | { | |
1138 | unsigned long insn; | |
1139 | unsigned long nextinsn; | |
1140 | }; | |
1141 | ||
1142 | extern int fixup_exception(struct pt_regs *regs); | |
1143 | ||
1144 | #endif /* _ASM_UACCESS_H */ |