| 1 | #ifndef _ASM_X86_UACCESS_64_H |
| 2 | #define _ASM_X86_UACCESS_64_H |
| 3 | |
| 4 | /* |
| 5 | * User space memory access functions |
| 6 | */ |
| 7 | #include <linux/compiler.h> |
| 8 | #include <linux/errno.h> |
| 9 | #include <linux/lockdep.h> |
| 10 | #include <asm/alternative.h> |
| 11 | #include <asm/cpufeature.h> |
| 12 | #include <asm/page.h> |
| 13 | |
| 14 | /* |
| 15 | * Copy To/From Userspace |
| 16 | */ |
| 17 | |
| 18 | /* Handles exceptions in both to and from, but doesn't do access_ok */ |
| 19 | __must_check unsigned long |
| 20 | copy_user_enhanced_fast_string(void *to, const void *from, unsigned len); |
| 21 | __must_check unsigned long |
| 22 | copy_user_generic_string(void *to, const void *from, unsigned len); |
| 23 | __must_check unsigned long |
| 24 | copy_user_generic_unrolled(void *to, const void *from, unsigned len); |
| 25 | |
| 26 | static __always_inline __must_check unsigned long |
| 27 | copy_user_generic(void *to, const void *from, unsigned len) |
| 28 | { |
| 29 | unsigned ret; |
| 30 | |
| 31 | /* |
| 32 | * If CPU has ERMS feature, use copy_user_enhanced_fast_string. |
| 33 | * Otherwise, if CPU has rep_good feature, use copy_user_generic_string. |
| 34 | * Otherwise, use copy_user_generic_unrolled. |
| 35 | */ |
| 36 | alternative_call_2(copy_user_generic_unrolled, |
| 37 | copy_user_generic_string, |
| 38 | X86_FEATURE_REP_GOOD, |
| 39 | copy_user_enhanced_fast_string, |
| 40 | X86_FEATURE_ERMS, |
| 41 | ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from), |
| 42 | "=d" (len)), |
| 43 | "1" (to), "2" (from), "3" (len) |
| 44 | : "memory", "rcx", "r8", "r9", "r10", "r11"); |
| 45 | return ret; |
| 46 | } |
| 47 | |
| 48 | __must_check unsigned long |
| 49 | copy_in_user(void __user *to, const void __user *from, unsigned len); |
| 50 | |
| 51 | static __always_inline __must_check |
| 52 | int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size) |
| 53 | { |
| 54 | int ret = 0; |
| 55 | |
| 56 | if (!__builtin_constant_p(size)) |
| 57 | return copy_user_generic(dst, (__force void *)src, size); |
| 58 | switch (size) { |
| 59 | case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src, |
| 60 | ret, "b", "b", "=q", 1); |
| 61 | return ret; |
| 62 | case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src, |
| 63 | ret, "w", "w", "=r", 2); |
| 64 | return ret; |
| 65 | case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src, |
| 66 | ret, "l", "k", "=r", 4); |
| 67 | return ret; |
| 68 | case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src, |
| 69 | ret, "q", "", "=r", 8); |
| 70 | return ret; |
| 71 | case 10: |
| 72 | __get_user_asm(*(u64 *)dst, (u64 __user *)src, |
| 73 | ret, "q", "", "=r", 10); |
| 74 | if (unlikely(ret)) |
| 75 | return ret; |
| 76 | __get_user_asm(*(u16 *)(8 + (char *)dst), |
| 77 | (u16 __user *)(8 + (char __user *)src), |
| 78 | ret, "w", "w", "=r", 2); |
| 79 | return ret; |
| 80 | case 16: |
| 81 | __get_user_asm(*(u64 *)dst, (u64 __user *)src, |
| 82 | ret, "q", "", "=r", 16); |
| 83 | if (unlikely(ret)) |
| 84 | return ret; |
| 85 | __get_user_asm(*(u64 *)(8 + (char *)dst), |
| 86 | (u64 __user *)(8 + (char __user *)src), |
| 87 | ret, "q", "", "=r", 8); |
| 88 | return ret; |
| 89 | default: |
| 90 | return copy_user_generic(dst, (__force void *)src, size); |
| 91 | } |
| 92 | } |
| 93 | |
| 94 | static __always_inline __must_check |
| 95 | int __copy_from_user(void *dst, const void __user *src, unsigned size) |
| 96 | { |
| 97 | might_fault(); |
| 98 | return __copy_from_user_nocheck(dst, src, size); |
| 99 | } |
| 100 | |
| 101 | static __always_inline __must_check |
| 102 | int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size) |
| 103 | { |
| 104 | int ret = 0; |
| 105 | |
| 106 | if (!__builtin_constant_p(size)) |
| 107 | return copy_user_generic((__force void *)dst, src, size); |
| 108 | switch (size) { |
| 109 | case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst, |
| 110 | ret, "b", "b", "iq", 1); |
| 111 | return ret; |
| 112 | case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst, |
| 113 | ret, "w", "w", "ir", 2); |
| 114 | return ret; |
| 115 | case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst, |
| 116 | ret, "l", "k", "ir", 4); |
| 117 | return ret; |
| 118 | case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst, |
| 119 | ret, "q", "", "er", 8); |
| 120 | return ret; |
| 121 | case 10: |
| 122 | __put_user_asm(*(u64 *)src, (u64 __user *)dst, |
| 123 | ret, "q", "", "er", 10); |
| 124 | if (unlikely(ret)) |
| 125 | return ret; |
| 126 | asm("":::"memory"); |
| 127 | __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst, |
| 128 | ret, "w", "w", "ir", 2); |
| 129 | return ret; |
| 130 | case 16: |
| 131 | __put_user_asm(*(u64 *)src, (u64 __user *)dst, |
| 132 | ret, "q", "", "er", 16); |
| 133 | if (unlikely(ret)) |
| 134 | return ret; |
| 135 | asm("":::"memory"); |
| 136 | __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst, |
| 137 | ret, "q", "", "er", 8); |
| 138 | return ret; |
| 139 | default: |
| 140 | return copy_user_generic((__force void *)dst, src, size); |
| 141 | } |
| 142 | } |
| 143 | |
| 144 | static __always_inline __must_check |
| 145 | int __copy_to_user(void __user *dst, const void *src, unsigned size) |
| 146 | { |
| 147 | might_fault(); |
| 148 | return __copy_to_user_nocheck(dst, src, size); |
| 149 | } |
| 150 | |
| 151 | static __always_inline __must_check |
| 152 | int __copy_in_user(void __user *dst, const void __user *src, unsigned size) |
| 153 | { |
| 154 | int ret = 0; |
| 155 | |
| 156 | might_fault(); |
| 157 | if (!__builtin_constant_p(size)) |
| 158 | return copy_user_generic((__force void *)dst, |
| 159 | (__force void *)src, size); |
| 160 | switch (size) { |
| 161 | case 1: { |
| 162 | u8 tmp; |
| 163 | __get_user_asm(tmp, (u8 __user *)src, |
| 164 | ret, "b", "b", "=q", 1); |
| 165 | if (likely(!ret)) |
| 166 | __put_user_asm(tmp, (u8 __user *)dst, |
| 167 | ret, "b", "b", "iq", 1); |
| 168 | return ret; |
| 169 | } |
| 170 | case 2: { |
| 171 | u16 tmp; |
| 172 | __get_user_asm(tmp, (u16 __user *)src, |
| 173 | ret, "w", "w", "=r", 2); |
| 174 | if (likely(!ret)) |
| 175 | __put_user_asm(tmp, (u16 __user *)dst, |
| 176 | ret, "w", "w", "ir", 2); |
| 177 | return ret; |
| 178 | } |
| 179 | |
| 180 | case 4: { |
| 181 | u32 tmp; |
| 182 | __get_user_asm(tmp, (u32 __user *)src, |
| 183 | ret, "l", "k", "=r", 4); |
| 184 | if (likely(!ret)) |
| 185 | __put_user_asm(tmp, (u32 __user *)dst, |
| 186 | ret, "l", "k", "ir", 4); |
| 187 | return ret; |
| 188 | } |
| 189 | case 8: { |
| 190 | u64 tmp; |
| 191 | __get_user_asm(tmp, (u64 __user *)src, |
| 192 | ret, "q", "", "=r", 8); |
| 193 | if (likely(!ret)) |
| 194 | __put_user_asm(tmp, (u64 __user *)dst, |
| 195 | ret, "q", "", "er", 8); |
| 196 | return ret; |
| 197 | } |
| 198 | default: |
| 199 | return copy_user_generic((__force void *)dst, |
| 200 | (__force void *)src, size); |
| 201 | } |
| 202 | } |
| 203 | |
| 204 | static __must_check __always_inline int |
| 205 | __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) |
| 206 | { |
| 207 | return __copy_from_user_nocheck(dst, src, size); |
| 208 | } |
| 209 | |
| 210 | static __must_check __always_inline int |
| 211 | __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) |
| 212 | { |
| 213 | return __copy_to_user_nocheck(dst, src, size); |
| 214 | } |
| 215 | |
| 216 | extern long __copy_user_nocache(void *dst, const void __user *src, |
| 217 | unsigned size, int zerorest); |
| 218 | |
| 219 | static inline int |
| 220 | __copy_from_user_nocache(void *dst, const void __user *src, unsigned size) |
| 221 | { |
| 222 | might_fault(); |
| 223 | return __copy_user_nocache(dst, src, size, 1); |
| 224 | } |
| 225 | |
| 226 | static inline int |
| 227 | __copy_from_user_inatomic_nocache(void *dst, const void __user *src, |
| 228 | unsigned size) |
| 229 | { |
| 230 | return __copy_user_nocache(dst, src, size, 0); |
| 231 | } |
| 232 | |
| 233 | unsigned long |
| 234 | copy_user_handle_tail(char *to, char *from, unsigned len); |
| 235 | |
| 236 | #endif /* _ASM_X86_UACCESS_64_H */ |