Merge branch 'drm-next-4.2' of git://people.freedesktop.org/~agd5f/linux
[deliverable/linux.git] / arch / x86 / include / asm / uaccess_32.h
1 #ifndef _ASM_X86_UACCESS_32_H
2 #define _ASM_X86_UACCESS_32_H
3
4 /*
5 * User space memory access functions
6 */
7 #include <linux/errno.h>
8 #include <linux/thread_info.h>
9 #include <linux/string.h>
10 #include <asm/asm.h>
11 #include <asm/page.h>
12
13 unsigned long __must_check __copy_to_user_ll
14 (void __user *to, const void *from, unsigned long n);
15 unsigned long __must_check __copy_from_user_ll
16 (void *to, const void __user *from, unsigned long n);
17 unsigned long __must_check __copy_from_user_ll_nozero
18 (void *to, const void __user *from, unsigned long n);
19 unsigned long __must_check __copy_from_user_ll_nocache
20 (void *to, const void __user *from, unsigned long n);
21 unsigned long __must_check __copy_from_user_ll_nocache_nozero
22 (void *to, const void __user *from, unsigned long n);
23
24 /**
25 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
26 * @to: Destination address, in user space.
27 * @from: Source address, in kernel space.
28 * @n: Number of bytes to copy.
29 *
30 * Context: User context only.
31 *
32 * Copy data from kernel space to user space. Caller must check
33 * the specified block with access_ok() before calling this function.
34 * The caller should also make sure he pins the user space address
35 * so that we don't result in page fault and sleep.
36 *
37 * Here we special-case 1, 2 and 4-byte copy_*_user invocations. On a fault
38 * we return the initial request size (1, 2 or 4), as copy_*_user should do.
39 * If a store crosses a page boundary and gets a fault, the x86 will not write
40 * anything, so this is accurate.
41 */
42
43 static __always_inline unsigned long __must_check
44 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
45 {
46 if (__builtin_constant_p(n)) {
47 unsigned long ret;
48
49 switch (n) {
50 case 1:
51 __put_user_size(*(u8 *)from, (u8 __user *)to,
52 1, ret, 1);
53 return ret;
54 case 2:
55 __put_user_size(*(u16 *)from, (u16 __user *)to,
56 2, ret, 2);
57 return ret;
58 case 4:
59 __put_user_size(*(u32 *)from, (u32 __user *)to,
60 4, ret, 4);
61 return ret;
62 case 8:
63 __put_user_size(*(u64 *)from, (u64 __user *)to,
64 8, ret, 8);
65 return ret;
66 }
67 }
68 return __copy_to_user_ll(to, from, n);
69 }
70
71 /**
72 * __copy_to_user: - Copy a block of data into user space, with less checking.
73 * @to: Destination address, in user space.
74 * @from: Source address, in kernel space.
75 * @n: Number of bytes to copy.
76 *
77 * Context: User context only. This function may sleep if pagefaults are
78 * enabled.
79 *
80 * Copy data from kernel space to user space. Caller must check
81 * the specified block with access_ok() before calling this function.
82 *
83 * Returns number of bytes that could not be copied.
84 * On success, this will be zero.
85 */
86 static __always_inline unsigned long __must_check
87 __copy_to_user(void __user *to, const void *from, unsigned long n)
88 {
89 might_fault();
90 return __copy_to_user_inatomic(to, from, n);
91 }
92
93 static __always_inline unsigned long
94 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
95 {
96 /* Avoid zeroing the tail if the copy fails..
97 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
98 * but as the zeroing behaviour is only significant when n is not
99 * constant, that shouldn't be a problem.
100 */
101 if (__builtin_constant_p(n)) {
102 unsigned long ret;
103
104 switch (n) {
105 case 1:
106 __get_user_size(*(u8 *)to, from, 1, ret, 1);
107 return ret;
108 case 2:
109 __get_user_size(*(u16 *)to, from, 2, ret, 2);
110 return ret;
111 case 4:
112 __get_user_size(*(u32 *)to, from, 4, ret, 4);
113 return ret;
114 }
115 }
116 return __copy_from_user_ll_nozero(to, from, n);
117 }
118
119 /**
120 * __copy_from_user: - Copy a block of data from user space, with less checking.
121 * @to: Destination address, in kernel space.
122 * @from: Source address, in user space.
123 * @n: Number of bytes to copy.
124 *
125 * Context: User context only. This function may sleep if pagefaults are
126 * enabled.
127 *
128 * Copy data from user space to kernel space. Caller must check
129 * the specified block with access_ok() before calling this function.
130 *
131 * Returns number of bytes that could not be copied.
132 * On success, this will be zero.
133 *
134 * If some data could not be copied, this function will pad the copied
135 * data to the requested size using zero bytes.
136 *
137 * An alternate version - __copy_from_user_inatomic() - may be called from
138 * atomic context and will fail rather than sleep. In this case the
139 * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h
140 * for explanation of why this is needed.
141 */
142 static __always_inline unsigned long
143 __copy_from_user(void *to, const void __user *from, unsigned long n)
144 {
145 might_fault();
146 if (__builtin_constant_p(n)) {
147 unsigned long ret;
148
149 switch (n) {
150 case 1:
151 __get_user_size(*(u8 *)to, from, 1, ret, 1);
152 return ret;
153 case 2:
154 __get_user_size(*(u16 *)to, from, 2, ret, 2);
155 return ret;
156 case 4:
157 __get_user_size(*(u32 *)to, from, 4, ret, 4);
158 return ret;
159 }
160 }
161 return __copy_from_user_ll(to, from, n);
162 }
163
164 static __always_inline unsigned long __copy_from_user_nocache(void *to,
165 const void __user *from, unsigned long n)
166 {
167 might_fault();
168 if (__builtin_constant_p(n)) {
169 unsigned long ret;
170
171 switch (n) {
172 case 1:
173 __get_user_size(*(u8 *)to, from, 1, ret, 1);
174 return ret;
175 case 2:
176 __get_user_size(*(u16 *)to, from, 2, ret, 2);
177 return ret;
178 case 4:
179 __get_user_size(*(u32 *)to, from, 4, ret, 4);
180 return ret;
181 }
182 }
183 return __copy_from_user_ll_nocache(to, from, n);
184 }
185
186 static __always_inline unsigned long
187 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
188 unsigned long n)
189 {
190 return __copy_from_user_ll_nocache_nozero(to, from, n);
191 }
192
193 #endif /* _ASM_X86_UACCESS_32_H */
This page took 0.065885 seconds and 6 git commands to generate.