Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[deliverable/linux.git] / include / asm-generic / uaccess.h
1 #ifndef __ASM_GENERIC_UACCESS_H
2 #define __ASM_GENERIC_UACCESS_H
3
4 /*
5 * User space memory access functions, these should work
6 * on any machine that has kernel and user data in the same
7 * address space, e.g. all NOMMU machines.
8 */
9 #include <linux/sched.h>
10 #include <linux/string.h>
11
12 #include <asm/segment.h>
13
14 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
15
16 #ifndef KERNEL_DS
17 #define KERNEL_DS MAKE_MM_SEG(~0UL)
18 #endif
19
20 #ifndef USER_DS
21 #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
22 #endif
23
24 #ifndef get_fs
25 #define get_ds() (KERNEL_DS)
26 #define get_fs() (current_thread_info()->addr_limit)
27
28 static inline void set_fs(mm_segment_t fs)
29 {
30 current_thread_info()->addr_limit = fs;
31 }
32 #endif
33
34 #ifndef segment_eq
35 #define segment_eq(a, b) ((a).seg == (b).seg)
36 #endif
37
38 #define VERIFY_READ 0
39 #define VERIFY_WRITE 1
40
41 #define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size))
42
43 /*
44 * The architecture should really override this if possible, at least
45 * doing a check on the get_fs()
46 */
47 #ifndef __access_ok
48 static inline int __access_ok(unsigned long addr, unsigned long size)
49 {
50 return 1;
51 }
52 #endif
53
54 /*
55 * The exception table consists of pairs of addresses: the first is the
56 * address of an instruction that is allowed to fault, and the second is
57 * the address at which the program should continue. No registers are
58 * modified, so it is entirely up to the continuation code to figure out
59 * what to do.
60 *
61 * All the routines below use bits of fixup code that are out of line
62 * with the main instruction path. This means when everything is well,
63 * we don't even have to jump over them. Further, they do not intrude
64 * on our cache or tlb entries.
65 */
66
67 struct exception_table_entry
68 {
69 unsigned long insn, fixup;
70 };
71
72 /* Returns 0 if exception not found and fixup otherwise. */
73 extern unsigned long search_exception_table(unsigned long);
74
75
76 /*
77 * architectures with an MMU should override these two
78 */
79 #ifndef __copy_from_user
80 static inline __must_check long __copy_from_user(void *to,
81 const void __user * from, unsigned long n)
82 {
83 if (__builtin_constant_p(n)) {
84 switch(n) {
85 case 1:
86 *(u8 *)to = *(u8 __force *)from;
87 return 0;
88 case 2:
89 *(u16 *)to = *(u16 __force *)from;
90 return 0;
91 case 4:
92 *(u32 *)to = *(u32 __force *)from;
93 return 0;
94 #ifdef CONFIG_64BIT
95 case 8:
96 *(u64 *)to = *(u64 __force *)from;
97 return 0;
98 #endif
99 default:
100 break;
101 }
102 }
103
104 memcpy(to, (const void __force *)from, n);
105 return 0;
106 }
107 #endif
108
109 #ifndef __copy_to_user
110 static inline __must_check long __copy_to_user(void __user *to,
111 const void *from, unsigned long n)
112 {
113 if (__builtin_constant_p(n)) {
114 switch(n) {
115 case 1:
116 *(u8 __force *)to = *(u8 *)from;
117 return 0;
118 case 2:
119 *(u16 __force *)to = *(u16 *)from;
120 return 0;
121 case 4:
122 *(u32 __force *)to = *(u32 *)from;
123 return 0;
124 #ifdef CONFIG_64BIT
125 case 8:
126 *(u64 __force *)to = *(u64 *)from;
127 return 0;
128 #endif
129 default:
130 break;
131 }
132 }
133
134 memcpy((void __force *)to, from, n);
135 return 0;
136 }
137 #endif
138
139 /*
140 * These are the main single-value transfer routines. They automatically
141 * use the right size if we just have the right pointer type.
142 * This version just falls back to copy_{from,to}_user, which should
143 * provide a fast-path for small values.
144 */
145 #define __put_user(x, ptr) \
146 ({ \
147 __typeof__(*(ptr)) __x = (x); \
148 int __pu_err = -EFAULT; \
149 __chk_user_ptr(ptr); \
150 switch (sizeof (*(ptr))) { \
151 case 1: \
152 case 2: \
153 case 4: \
154 case 8: \
155 __pu_err = __put_user_fn(sizeof (*(ptr)), \
156 ptr, &__x); \
157 break; \
158 default: \
159 __put_user_bad(); \
160 break; \
161 } \
162 __pu_err; \
163 })
164
165 #define put_user(x, ptr) \
166 ({ \
167 void *__p = (ptr); \
168 might_fault(); \
169 access_ok(VERIFY_WRITE, __p, sizeof(*ptr)) ? \
170 __put_user((x), ((__typeof__(*(ptr)) *)__p)) : \
171 -EFAULT; \
172 })
173
174 #ifndef __put_user_fn
175
176 static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
177 {
178 size = __copy_to_user(ptr, x, size);
179 return size ? -EFAULT : size;
180 }
181
182 #define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
183
184 #endif
185
186 extern int __put_user_bad(void) __attribute__((noreturn));
187
188 #define __get_user(x, ptr) \
189 ({ \
190 int __gu_err = -EFAULT; \
191 __chk_user_ptr(ptr); \
192 switch (sizeof(*(ptr))) { \
193 case 1: { \
194 unsigned char __x; \
195 __gu_err = __get_user_fn(sizeof (*(ptr)), \
196 ptr, &__x); \
197 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
198 break; \
199 }; \
200 case 2: { \
201 unsigned short __x; \
202 __gu_err = __get_user_fn(sizeof (*(ptr)), \
203 ptr, &__x); \
204 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
205 break; \
206 }; \
207 case 4: { \
208 unsigned int __x; \
209 __gu_err = __get_user_fn(sizeof (*(ptr)), \
210 ptr, &__x); \
211 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
212 break; \
213 }; \
214 case 8: { \
215 unsigned long long __x; \
216 __gu_err = __get_user_fn(sizeof (*(ptr)), \
217 ptr, &__x); \
218 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
219 break; \
220 }; \
221 default: \
222 __get_user_bad(); \
223 break; \
224 } \
225 __gu_err; \
226 })
227
228 #define get_user(x, ptr) \
229 ({ \
230 const void *__p = (ptr); \
231 might_fault(); \
232 access_ok(VERIFY_READ, __p, sizeof(*ptr)) ? \
233 __get_user((x), (__typeof__(*(ptr)) *)__p) : \
234 ((x) = (__typeof__(*(ptr)))0,-EFAULT); \
235 })
236
237 #ifndef __get_user_fn
238 static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
239 {
240 size_t n = __copy_from_user(x, ptr, size);
241 if (unlikely(n)) {
242 memset(x + (size - n), 0, n);
243 return -EFAULT;
244 }
245 return 0;
246 }
247
248 #define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
249
250 #endif
251
252 extern int __get_user_bad(void) __attribute__((noreturn));
253
254 #ifndef __copy_from_user_inatomic
255 #define __copy_from_user_inatomic __copy_from_user
256 #endif
257
258 #ifndef __copy_to_user_inatomic
259 #define __copy_to_user_inatomic __copy_to_user
260 #endif
261
262 static inline long copy_from_user(void *to,
263 const void __user * from, unsigned long n)
264 {
265 unsigned long res = n;
266 might_fault();
267 if (likely(access_ok(VERIFY_READ, from, n)))
268 res = __copy_from_user(to, from, n);
269 if (unlikely(res))
270 memset(to + (n - res), 0, res);
271 return res;
272 }
273
274 static inline long copy_to_user(void __user *to,
275 const void *from, unsigned long n)
276 {
277 might_fault();
278 if (access_ok(VERIFY_WRITE, to, n))
279 return __copy_to_user(to, from, n);
280 else
281 return n;
282 }
283
284 /*
285 * Copy a null terminated string from userspace.
286 */
287 #ifndef __strncpy_from_user
288 static inline long
289 __strncpy_from_user(char *dst, const char __user *src, long count)
290 {
291 char *tmp;
292 strncpy(dst, (const char __force *)src, count);
293 for (tmp = dst; *tmp && count > 0; tmp++, count--)
294 ;
295 return (tmp - dst);
296 }
297 #endif
298
299 static inline long
300 strncpy_from_user(char *dst, const char __user *src, long count)
301 {
302 if (!access_ok(VERIFY_READ, src, 1))
303 return -EFAULT;
304 return __strncpy_from_user(dst, src, count);
305 }
306
307 /*
308 * Return the size of a string (including the ending 0)
309 *
310 * Return 0 on exception, a value greater than N if too long
311 */
312 #ifndef __strnlen_user
313 #define __strnlen_user(s, n) (strnlen((s), (n)) + 1)
314 #endif
315
316 /*
317 * Unlike strnlen, strnlen_user includes the nul terminator in
318 * its returned count. Callers should check for a returned value
319 * greater than N as an indication the string is too long.
320 */
321 static inline long strnlen_user(const char __user *src, long n)
322 {
323 if (!access_ok(VERIFY_READ, src, 1))
324 return 0;
325 return __strnlen_user(src, n);
326 }
327
328 static inline long strlen_user(const char __user *src)
329 {
330 return strnlen_user(src, 32767);
331 }
332
333 /*
334 * Zero Userspace
335 */
336 #ifndef __clear_user
337 static inline __must_check unsigned long
338 __clear_user(void __user *to, unsigned long n)
339 {
340 memset((void __force *)to, 0, n);
341 return 0;
342 }
343 #endif
344
345 static inline __must_check unsigned long
346 clear_user(void __user *to, unsigned long n)
347 {
348 might_fault();
349 if (!access_ok(VERIFY_WRITE, to, n))
350 return n;
351
352 return __clear_user(to, n);
353 }
354
355 #endif /* __ASM_GENERIC_UACCESS_H */
This page took 0.055058 seconds and 5 git commands to generate.