sched/preempt, mm/fault: Trigger might_sleep() in might_fault() with disabled pagefaults
[deliverable/linux.git] / arch / x86 / include / asm / uaccess_32.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_UACCESS_32_H
2#define _ASM_X86_UACCESS_32_H
1da177e4
LT
3
4/*
5 * User space memory access functions
6 */
1da177e4
LT
7#include <linux/errno.h>
8#include <linux/thread_info.h>
1da177e4 9#include <linux/string.h>
14e6d17d 10#include <asm/asm.h>
1da177e4
LT
11#include <asm/page.h>
12
b1fcec7f
JP
13unsigned long __must_check __copy_to_user_ll
14 (void __user *to, const void *from, unsigned long n);
15unsigned long __must_check __copy_from_user_ll
16 (void *to, const void __user *from, unsigned long n);
17unsigned long __must_check __copy_from_user_ll_nozero
18 (void *to, const void __user *from, unsigned long n);
19unsigned long __must_check __copy_from_user_ll_nocache
20 (void *to, const void __user *from, unsigned long n);
21unsigned long __must_check __copy_from_user_ll_nocache_nozero
22 (void *to, const void __user *from, unsigned long n);
1da177e4 23
6d1c4261
AK
24/**
25 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
26 * @to: Destination address, in user space.
27 * @from: Source address, in kernel space.
28 * @n: Number of bytes to copy.
29 *
30 * Context: User context only.
31 *
32 * Copy data from kernel space to user space. Caller must check
33 * the specified block with access_ok() before calling this function.
34 * The caller should also make sure he pins the user space address
4fe48782 35 * so that we don't result in page fault and sleep.
6d1c4261 36 *
1da177e4
LT
37 * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
38 * we return the initial request size (1, 2 or 4), as copy_*_user should do.
39 * If a store crosses a page boundary and gets a fault, the x86 will not write
40 * anything, so this is accurate.
41 */
42
652050ae 43static __always_inline unsigned long __must_check
1da177e4
LT
44__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
45{
46 if (__builtin_constant_p(n)) {
47 unsigned long ret;
48
49 switch (n) {
50 case 1:
b1fcec7f
JP
51 __put_user_size(*(u8 *)from, (u8 __user *)to,
52 1, ret, 1);
1da177e4
LT
53 return ret;
54 case 2:
b1fcec7f
JP
55 __put_user_size(*(u16 *)from, (u16 __user *)to,
56 2, ret, 2);
1da177e4
LT
57 return ret;
58 case 4:
b1fcec7f
JP
59 __put_user_size(*(u32 *)from, (u32 __user *)to,
60 4, ret, 4);
1da177e4
LT
61 return ret;
62 }
63 }
64 return __copy_to_user_ll(to, from, n);
65}
66
1da177e4 67/**
9c7fff6e
RD
68 * __copy_to_user: - Copy a block of data into user space, with less checking.
69 * @to: Destination address, in user space.
70 * @from: Source address, in kernel space.
1da177e4
LT
71 * @n: Number of bytes to copy.
72 *
73 * Context: User context only. This function may sleep.
74 *
9c7fff6e 75 * Copy data from kernel space to user space. Caller must check
1da177e4
LT
76 * the specified block with access_ok() before calling this function.
77 *
78 * Returns number of bytes that could not be copied.
79 * On success, this will be zero.
1da177e4 80 */
9c7fff6e
RD
81static __always_inline unsigned long __must_check
82__copy_to_user(void __user *to, const void *from, unsigned long n)
83{
3ee1afa3 84 might_fault();
c10d38dd 85 return __copy_to_user_inatomic(to, from, n);
9c7fff6e
RD
86}
87
652050ae 88static __always_inline unsigned long
1da177e4
LT
89__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
90{
7c12d811
N
91 /* Avoid zeroing the tail if the copy fails..
92 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
93 * but as the zeroing behaviour is only significant when n is not
94 * constant, that shouldn't be a problem.
95 */
96 if (__builtin_constant_p(n)) {
97 unsigned long ret;
98
99 switch (n) {
100 case 1:
101 __get_user_size(*(u8 *)to, from, 1, ret, 1);
102 return ret;
103 case 2:
104 __get_user_size(*(u16 *)to, from, 2, ret, 2);
105 return ret;
106 case 4:
107 __get_user_size(*(u32 *)to, from, 4, ret, 4);
108 return ret;
109 }
110 }
111 return __copy_from_user_ll_nozero(to, from, n);
112}
9c7fff6e
RD
113
114/**
115 * __copy_from_user: - Copy a block of data from user space, with less checking.
116 * @to: Destination address, in kernel space.
117 * @from: Source address, in user space.
118 * @n: Number of bytes to copy.
119 *
120 * Context: User context only. This function may sleep.
121 *
122 * Copy data from user space to kernel space. Caller must check
123 * the specified block with access_ok() before calling this function.
124 *
125 * Returns number of bytes that could not be copied.
126 * On success, this will be zero.
127 *
128 * If some data could not be copied, this function will pad the copied
129 * data to the requested size using zero bytes.
130 *
131 * An alternate version - __copy_from_user_inatomic() - may be called from
132 * atomic context and will fail rather than sleep. In this case the
133 * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
134 * for explanation of why this is needed.
135 */
7c12d811
N
136static __always_inline unsigned long
137__copy_from_user(void *to, const void __user *from, unsigned long n)
138{
3ee1afa3 139 might_fault();
1da177e4
LT
140 if (__builtin_constant_p(n)) {
141 unsigned long ret;
142
143 switch (n) {
144 case 1:
145 __get_user_size(*(u8 *)to, from, 1, ret, 1);
146 return ret;
147 case 2:
148 __get_user_size(*(u16 *)to, from, 2, ret, 2);
149 return ret;
150 case 4:
151 __get_user_size(*(u32 *)to, from, 4, ret, 4);
152 return ret;
153 }
154 }
155 return __copy_from_user_ll(to, from, n);
156}
157
7c12d811 158static __always_inline unsigned long __copy_from_user_nocache(void *to,
c22ce143
HY
159 const void __user *from, unsigned long n)
160{
3ee1afa3 161 might_fault();
c22ce143
HY
162 if (__builtin_constant_p(n)) {
163 unsigned long ret;
164
165 switch (n) {
166 case 1:
167 __get_user_size(*(u8 *)to, from, 1, ret, 1);
168 return ret;
169 case 2:
170 __get_user_size(*(u16 *)to, from, 2, ret, 2);
171 return ret;
172 case 4:
173 __get_user_size(*(u32 *)to, from, 4, ret, 4);
174 return ret;
175 }
176 }
177 return __copy_from_user_ll_nocache(to, from, n);
178}
179
652050ae 180static __always_inline unsigned long
b1fcec7f
JP
181__copy_from_user_inatomic_nocache(void *to, const void __user *from,
182 unsigned long n)
1da177e4 183{
7c12d811 184 return __copy_from_user_ll_nocache_nozero(to, from, n);
c22ce143
HY
185}
186
1965aae3 187#endif /* _ASM_X86_UACCESS_32_H */
This page took 0.736221 seconds and 5 git commands to generate.