Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_SH64_UACCESS_H |
2 | #define __ASM_SH64_UACCESS_H | |
3 | ||
4 | /* | |
5 | * This file is subject to the terms and conditions of the GNU General Public | |
6 | * License. See the file "COPYING" in the main directory of this archive | |
7 | * for more details. | |
8 | * | |
9 | * include/asm-sh64/uaccess.h | |
10 | * | |
11 | * Copyright (C) 2000, 2001 Paolo Alberelli | |
12 | * Copyright (C) 2003, 2004 Paul Mundt | |
13 | * | |
14 | * User space memory access functions | |
15 | * | |
16 | * Copyright (C) 1999 Niibe Yutaka | |
17 | * | |
18 | * Based on: | |
19 | * MIPS implementation version 1.15 by | |
20 | * Copyright (C) 1996, 1997, 1998 by Ralf Baechle | |
21 | * and i386 version. | |
22 | * | |
23 | */ | |
24 | ||
25 | #include <linux/errno.h> | |
26 | #include <linux/sched.h> | |
27 | ||
28 | #define VERIFY_READ 0 | |
29 | #define VERIFY_WRITE 1 | |
30 | ||
31 | /* | |
32 | * The fs value determines whether argument validity checking should be | |
33 | * performed or not. If get_fs() == USER_DS, checking is performed, with | |
34 | * get_fs() == KERNEL_DS, checking is bypassed. | |
35 | * | |
36 | * For historical reasons (Data Segment Register?), these macros are misnamed. | |
37 | */ | |
38 | ||
39 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) | |
40 | ||
41 | #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF) | |
42 | #define USER_DS MAKE_MM_SEG(0x80000000) | |
43 | ||
44 | #define get_ds() (KERNEL_DS) | |
45 | #define get_fs() (current_thread_info()->addr_limit) | |
46 | #define set_fs(x) (current_thread_info()->addr_limit=(x)) | |
47 | ||
48 | #define segment_eq(a,b) ((a).seg == (b).seg) | |
49 | ||
50 | #define __addr_ok(addr) ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg)) | |
51 | ||
52 | /* | |
53 | * Uhhuh, this needs 33-bit arithmetic. We have a carry.. | |
54 | * | |
55 | * sum := addr + size; carry? --> flag = true; | |
56 | * if (sum >= addr_limit) flag = true; | |
57 | */ | |
58 | #define __range_ok(addr,size) (((unsigned long) (addr) + (size) < (current_thread_info()->addr_limit.seg)) ? 0 : 1) | |
59 | ||
60 | #define access_ok(type,addr,size) (__range_ok(addr,size) == 0) | |
61 | #define __access_ok(addr,size) (__range_ok(addr,size) == 0) | |
62 | ||
1da177e4 LT |
63 | /* |
64 | * Uh, these should become the main single-value transfer routines ... | |
65 | * They automatically use the right size if we just have the right | |
66 | * pointer type ... | |
67 | * | |
68 | * As MIPS uses the same address space for kernel and user data, we | |
69 | * can just do these as direct assignments. | |
70 | * | |
71 | * Careful to not | |
72 | * (a) re-use the arguments for side effects (sizeof is ok) | |
73 | * (b) require any knowledge of processes at this stage | |
74 | */ | |
75 | #define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr))) | |
76 | #define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr))) | |
77 | ||
78 | /* | |
79 | * The "__xxx" versions do not do address space checking, useful when | |
80 | * doing multiple accesses to the same area (the user has to do the | |
81 | * checks by hand with "access_ok()") | |
82 | */ | |
83 | #define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr))) | |
84 | #define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr))) | |
85 | ||
86 | /* | |
87 | * The "xxx_ret" versions return constant specified in third argument, if | |
88 | * something bad happens. These macros can be optimized for the | |
89 | * case of just returning from the function xxx_ret is used. | |
90 | */ | |
91 | ||
92 | #define put_user_ret(x,ptr,ret) ({ \ | |
93 | if (put_user(x,ptr)) return ret; }) | |
94 | ||
95 | #define get_user_ret(x,ptr,ret) ({ \ | |
96 | if (get_user(x,ptr)) return ret; }) | |
97 | ||
98 | #define __put_user_ret(x,ptr,ret) ({ \ | |
99 | if (__put_user(x,ptr)) return ret; }) | |
100 | ||
101 | #define __get_user_ret(x,ptr,ret) ({ \ | |
102 | if (__get_user(x,ptr)) return ret; }) | |
103 | ||
104 | struct __large_struct { unsigned long buf[100]; }; | |
105 | #define __m(x) (*(struct __large_struct *)(x)) | |
106 | ||
107 | #define __get_user_size(x,ptr,size,retval) \ | |
108 | do { \ | |
109 | retval = 0; \ | |
110 | switch (size) { \ | |
111 | case 1: \ | |
112 | retval = __get_user_asm_b(x, ptr); \ | |
113 | break; \ | |
114 | case 2: \ | |
115 | retval = __get_user_asm_w(x, ptr); \ | |
116 | break; \ | |
117 | case 4: \ | |
118 | retval = __get_user_asm_l(x, ptr); \ | |
119 | break; \ | |
120 | case 8: \ | |
121 | retval = __get_user_asm_q(x, ptr); \ | |
122 | break; \ | |
123 | default: \ | |
124 | __get_user_unknown(); \ | |
125 | break; \ | |
126 | } \ | |
127 | } while (0) | |
128 | ||
129 | #define __get_user_nocheck(x,ptr,size) \ | |
130 | ({ \ | |
131 | long __gu_addr = (long)(ptr); \ | |
132 | long __gu_err; \ | |
133 | __typeof(*(ptr)) __gu_val; \ | |
134 | __asm__ ("":"=r" (__gu_val)); \ | |
135 | __asm__ ("":"=r" (__gu_err)); \ | |
136 | __get_user_size((void *)&__gu_val, __gu_addr, (size), __gu_err); \ | |
137 | (x) = (__typeof__(*(ptr))) __gu_val; \ | |
138 | __gu_err; \ | |
139 | }) | |
140 | ||
141 | #define __get_user_check(x,ptr,size) \ | |
142 | ({ \ | |
143 | long __gu_addr = (long)(ptr); \ | |
144 | long __gu_err = -EFAULT; \ | |
145 | __typeof(*(ptr)) __gu_val; \ | |
146 | __asm__ ("":"=r" (__gu_val)); \ | |
147 | __asm__ ("":"=r" (__gu_err)); \ | |
148 | if (__access_ok(__gu_addr, (size))) \ | |
149 | __get_user_size((void *)&__gu_val, __gu_addr, (size), __gu_err); \ | |
150 | (x) = (__typeof__(*(ptr))) __gu_val; \ | |
151 | __gu_err; \ | |
152 | }) | |
153 | ||
154 | extern long __get_user_asm_b(void *, long); | |
155 | extern long __get_user_asm_w(void *, long); | |
156 | extern long __get_user_asm_l(void *, long); | |
157 | extern long __get_user_asm_q(void *, long); | |
158 | extern void __get_user_unknown(void); | |
159 | ||
160 | #define __put_user_size(x,ptr,size,retval) \ | |
161 | do { \ | |
162 | retval = 0; \ | |
163 | switch (size) { \ | |
164 | case 1: \ | |
165 | retval = __put_user_asm_b(x, ptr); \ | |
166 | break; \ | |
167 | case 2: \ | |
168 | retval = __put_user_asm_w(x, ptr); \ | |
169 | break; \ | |
170 | case 4: \ | |
171 | retval = __put_user_asm_l(x, ptr); \ | |
172 | break; \ | |
173 | case 8: \ | |
174 | retval = __put_user_asm_q(x, ptr); \ | |
175 | break; \ | |
176 | default: \ | |
177 | __put_user_unknown(); \ | |
178 | } \ | |
179 | } while (0) | |
180 | ||
181 | #define __put_user_nocheck(x,ptr,size) \ | |
182 | ({ \ | |
183 | long __pu_err; \ | |
184 | __typeof__(*(ptr)) __pu_val = (x); \ | |
185 | __put_user_size((void *)&__pu_val, (long)(ptr), (size), __pu_err); \ | |
186 | __pu_err; \ | |
187 | }) | |
188 | ||
189 | #define __put_user_check(x,ptr,size) \ | |
190 | ({ \ | |
191 | long __pu_err = -EFAULT; \ | |
192 | long __pu_addr = (long)(ptr); \ | |
193 | __typeof__(*(ptr)) __pu_val = (x); \ | |
194 | \ | |
195 | if (__access_ok(__pu_addr, (size))) \ | |
196 | __put_user_size((void *)&__pu_val, __pu_addr, (size), __pu_err);\ | |
197 | __pu_err; \ | |
198 | }) | |
199 | ||
200 | extern long __put_user_asm_b(void *, long); | |
201 | extern long __put_user_asm_w(void *, long); | |
202 | extern long __put_user_asm_l(void *, long); | |
203 | extern long __put_user_asm_q(void *, long); | |
204 | extern void __put_user_unknown(void); | |
205 | ||
206 | \f | |
207 | /* Generic arbitrary sized copy. */ | |
208 | /* Return the number of bytes NOT copied */ | |
209 | /* XXX: should be such that: 4byte and the rest. */ | |
210 | extern __kernel_size_t __copy_user(void *__to, const void *__from, __kernel_size_t __n); | |
211 | ||
212 | #define copy_to_user(to,from,n) ({ \ | |
213 | void *__copy_to = (void *) (to); \ | |
214 | __kernel_size_t __copy_size = (__kernel_size_t) (n); \ | |
215 | __kernel_size_t __copy_res; \ | |
216 | if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \ | |
217 | __copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \ | |
218 | } else __copy_res = __copy_size; \ | |
219 | __copy_res; }) | |
220 | ||
221 | #define copy_to_user_ret(to,from,n,retval) ({ \ | |
222 | if (copy_to_user(to,from,n)) \ | |
223 | return retval; \ | |
224 | }) | |
225 | ||
226 | #define __copy_to_user(to,from,n) \ | |
227 | __copy_user((void *)(to), \ | |
228 | (void *)(from), n) | |
229 | ||
230 | #define __copy_to_user_ret(to,from,n,retval) ({ \ | |
231 | if (__copy_to_user(to,from,n)) \ | |
232 | return retval; \ | |
233 | }) | |
234 | ||
235 | #define copy_from_user(to,from,n) ({ \ | |
236 | void *__copy_to = (void *) (to); \ | |
237 | void *__copy_from = (void *) (from); \ | |
238 | __kernel_size_t __copy_size = (__kernel_size_t) (n); \ | |
239 | __kernel_size_t __copy_res; \ | |
240 | if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \ | |
241 | __copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \ | |
242 | } else __copy_res = __copy_size; \ | |
243 | __copy_res; }) | |
244 | ||
245 | #define copy_from_user_ret(to,from,n,retval) ({ \ | |
246 | if (copy_from_user(to,from,n)) \ | |
247 | return retval; \ | |
248 | }) | |
249 | ||
250 | #define __copy_from_user(to,from,n) \ | |
251 | __copy_user((void *)(to), \ | |
252 | (void *)(from), n) | |
253 | ||
254 | #define __copy_from_user_ret(to,from,n,retval) ({ \ | |
255 | if (__copy_from_user(to,from,n)) \ | |
256 | return retval; \ | |
257 | }) | |
258 | ||
259 | #define __copy_to_user_inatomic __copy_to_user | |
260 | #define __copy_from_user_inatomic __copy_from_user | |
261 | ||
262 | /* XXX: Not sure it works well.. | |
263 | should be such that: 4byte clear and the rest. */ | |
264 | extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size); | |
265 | ||
266 | #define clear_user(addr,n) ({ \ | |
267 | void * __cl_addr = (addr); \ | |
268 | unsigned long __cl_size = (n); \ | |
269 | if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \ | |
270 | __cl_size = __clear_user(__cl_addr, __cl_size); \ | |
271 | __cl_size; }) | |
272 | ||
273 | extern int __strncpy_from_user(unsigned long __dest, unsigned long __src, int __count); | |
274 | ||
275 | #define strncpy_from_user(dest,src,count) ({ \ | |
276 | unsigned long __sfu_src = (unsigned long) (src); \ | |
277 | int __sfu_count = (int) (count); \ | |
278 | long __sfu_res = -EFAULT; \ | |
279 | if(__access_ok(__sfu_src, __sfu_count)) { \ | |
280 | __sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \ | |
281 | } __sfu_res; }) | |
282 | ||
283 | #define strlen_user(str) strnlen_user(str, ~0UL >> 1) | |
284 | ||
285 | /* | |
286 | * Return the size of a string (including the ending 0!) | |
287 | */ | |
288 | extern long __strnlen_user(const char *__s, long __n); | |
289 | ||
ca5ed2f5 | 290 | static inline long strnlen_user(const char *s, long n) |
1da177e4 LT |
291 | { |
292 | if (!__addr_ok(s)) | |
293 | return 0; | |
294 | else | |
295 | return __strnlen_user(s, n); | |
296 | } | |
297 | ||
298 | struct exception_table_entry | |
299 | { | |
300 | unsigned long insn, fixup; | |
301 | }; | |
302 | ||
303 | #define ARCH_HAS_SEARCH_EXTABLE | |
304 | ||
305 | /* If gcc inlines memset, it will use st.q instructions. Therefore, we need | |
306 | kmalloc allocations to be 8-byte aligned. Without this, the alignment | |
307 | becomes BYTE_PER_WORD i.e. only 4 (since sizeof(long)==sizeof(void*)==4 on | |
308 | sh64 at the moment). */ | |
309 | #define ARCH_KMALLOC_MINALIGN 8 | |
310 | ||
311 | /* | |
312 | * We want 8-byte alignment for the slab caches as well, otherwise we have | |
313 | * the same BYTES_PER_WORD (sizeof(void *)) min align in kmem_cache_create(). | |
314 | */ | |
315 | #define ARCH_SLAB_MINALIGN 8 | |
316 | ||
317 | /* Returns 0 if exception not found and fixup.unit otherwise. */ | |
318 | extern unsigned long search_exception_table(unsigned long addr); | |
319 | extern const struct exception_table_entry *search_exception_tables (unsigned long addr); | |
320 | ||
321 | #endif /* __ASM_SH64_UACCESS_H */ |