Merge remote-tracking branch 'regulator/topic/tps65910' into regulator-next
[deliverable/linux.git] / arch / s390 / lib / uaccess_pt.c
1 /*
2 * User access functions based on page table walks for enhanced
3 * system layout without hardware support.
4 *
5 * Copyright IBM Corp. 2006, 2012
6 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
7 */
8
9 #include <linux/errno.h>
10 #include <linux/hardirq.h>
11 #include <linux/mm.h>
12 #include <linux/hugetlb.h>
13 #include <asm/uaccess.h>
14 #include <asm/futex.h>
15 #include "uaccess.h"
16
17
18 /*
19 * Returns kernel address for user virtual address. If the returned address is
20 * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
21 * contains the (negative) exception code.
22 */
23 static __always_inline unsigned long follow_table(struct mm_struct *mm,
24 unsigned long addr, int write)
25 {
26 pgd_t *pgd;
27 pud_t *pud;
28 pmd_t *pmd;
29 pte_t *ptep;
30
31 pgd = pgd_offset(mm, addr);
32 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
33 return -0x3aUL;
34
35 pud = pud_offset(pgd, addr);
36 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
37 return -0x3bUL;
38
39 pmd = pmd_offset(pud, addr);
40 if (pmd_none(*pmd))
41 return -0x10UL;
42 if (pmd_large(*pmd)) {
43 if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO))
44 return -0x04UL;
45 return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);
46 }
47 if (unlikely(pmd_bad(*pmd)))
48 return -0x10UL;
49
50 ptep = pte_offset_map(pmd, addr);
51 if (!pte_present(*ptep))
52 return -0x11UL;
53 if (write && !pte_write(*ptep))
54 return -0x04UL;
55
56 return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK);
57 }
58
59 static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
60 size_t n, int write_user)
61 {
62 struct mm_struct *mm = current->mm;
63 unsigned long offset, done, size, kaddr;
64 void *from, *to;
65
66 done = 0;
67 retry:
68 spin_lock(&mm->page_table_lock);
69 do {
70 kaddr = follow_table(mm, uaddr, write_user);
71 if (IS_ERR_VALUE(kaddr))
72 goto fault;
73
74 offset = uaddr & ~PAGE_MASK;
75 size = min(n - done, PAGE_SIZE - offset);
76 if (write_user) {
77 to = (void *) kaddr;
78 from = kptr + done;
79 } else {
80 from = (void *) kaddr;
81 to = kptr + done;
82 }
83 memcpy(to, from, size);
84 done += size;
85 uaddr += size;
86 } while (done < n);
87 spin_unlock(&mm->page_table_lock);
88 return n - done;
89 fault:
90 spin_unlock(&mm->page_table_lock);
91 if (__handle_fault(uaddr, -kaddr, write_user))
92 return n - done;
93 goto retry;
94 }
95
96 /*
97 * Do DAT for user address by page table walk, return kernel address.
98 * This function needs to be called with current->mm->page_table_lock held.
99 */
100 static __always_inline unsigned long __dat_user_addr(unsigned long uaddr,
101 int write)
102 {
103 struct mm_struct *mm = current->mm;
104 unsigned long kaddr;
105 int rc;
106
107 retry:
108 kaddr = follow_table(mm, uaddr, write);
109 if (IS_ERR_VALUE(kaddr))
110 goto fault;
111
112 return kaddr;
113 fault:
114 spin_unlock(&mm->page_table_lock);
115 rc = __handle_fault(uaddr, -kaddr, write);
116 spin_lock(&mm->page_table_lock);
117 if (!rc)
118 goto retry;
119 return 0;
120 }
121
122 size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
123 {
124 size_t rc;
125
126 if (segment_eq(get_fs(), KERNEL_DS)) {
127 memcpy(to, (void __kernel __force *) from, n);
128 return 0;
129 }
130 rc = __user_copy_pt((unsigned long) from, to, n, 0);
131 if (unlikely(rc))
132 memset(to + n - rc, 0, rc);
133 return rc;
134 }
135
136 size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
137 {
138 if (segment_eq(get_fs(), KERNEL_DS)) {
139 memcpy((void __kernel __force *) to, from, n);
140 return 0;
141 }
142 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
143 }
144
145 static size_t clear_user_pt(size_t n, void __user *to)
146 {
147 long done, size, ret;
148
149 if (segment_eq(get_fs(), KERNEL_DS)) {
150 memset((void __kernel __force *) to, 0, n);
151 return 0;
152 }
153 done = 0;
154 do {
155 if (n - done > PAGE_SIZE)
156 size = PAGE_SIZE;
157 else
158 size = n - done;
159 ret = __user_copy_pt((unsigned long) to + done,
160 &empty_zero_page, size, 1);
161 done += size;
162 if (ret)
163 return ret + n - done;
164 } while (done < n);
165 return 0;
166 }
167
168 static size_t strnlen_user_pt(size_t count, const char __user *src)
169 {
170 unsigned long uaddr = (unsigned long) src;
171 struct mm_struct *mm = current->mm;
172 unsigned long offset, done, len, kaddr;
173 size_t len_str;
174
175 if (segment_eq(get_fs(), KERNEL_DS))
176 return strnlen((const char __kernel __force *) src, count) + 1;
177 done = 0;
178 retry:
179 spin_lock(&mm->page_table_lock);
180 do {
181 kaddr = follow_table(mm, uaddr, 0);
182 if (IS_ERR_VALUE(kaddr))
183 goto fault;
184
185 offset = uaddr & ~PAGE_MASK;
186 len = min(count - done, PAGE_SIZE - offset);
187 len_str = strnlen((char *) kaddr, len);
188 done += len_str;
189 uaddr += len_str;
190 } while ((len_str == len) && (done < count));
191 spin_unlock(&mm->page_table_lock);
192 return done + 1;
193 fault:
194 spin_unlock(&mm->page_table_lock);
195 if (__handle_fault(uaddr, -kaddr, 0))
196 return 0;
197 goto retry;
198 }
199
200 static size_t strncpy_from_user_pt(size_t count, const char __user *src,
201 char *dst)
202 {
203 size_t n = strnlen_user_pt(count, src);
204
205 if (!n)
206 return -EFAULT;
207 if (n > count)
208 n = count;
209 if (segment_eq(get_fs(), KERNEL_DS)) {
210 memcpy(dst, (const char __kernel __force *) src, n);
211 if (dst[n-1] == '\0')
212 return n-1;
213 else
214 return n;
215 }
216 if (__user_copy_pt((unsigned long) src, dst, n, 0))
217 return -EFAULT;
218 if (dst[n-1] == '\0')
219 return n-1;
220 else
221 return n;
222 }
223
224 static size_t copy_in_user_pt(size_t n, void __user *to,
225 const void __user *from)
226 {
227 struct mm_struct *mm = current->mm;
228 unsigned long offset_max, uaddr, done, size, error_code;
229 unsigned long uaddr_from = (unsigned long) from;
230 unsigned long uaddr_to = (unsigned long) to;
231 unsigned long kaddr_to, kaddr_from;
232 int write_user;
233
234 if (segment_eq(get_fs(), KERNEL_DS)) {
235 memcpy((void __force *) to, (void __force *) from, n);
236 return 0;
237 }
238 done = 0;
239 retry:
240 spin_lock(&mm->page_table_lock);
241 do {
242 write_user = 0;
243 uaddr = uaddr_from;
244 kaddr_from = follow_table(mm, uaddr_from, 0);
245 error_code = kaddr_from;
246 if (IS_ERR_VALUE(error_code))
247 goto fault;
248
249 write_user = 1;
250 uaddr = uaddr_to;
251 kaddr_to = follow_table(mm, uaddr_to, 1);
252 error_code = (unsigned long) kaddr_to;
253 if (IS_ERR_VALUE(error_code))
254 goto fault;
255
256 offset_max = max(uaddr_from & ~PAGE_MASK,
257 uaddr_to & ~PAGE_MASK);
258 size = min(n - done, PAGE_SIZE - offset_max);
259
260 memcpy((void *) kaddr_to, (void *) kaddr_from, size);
261 done += size;
262 uaddr_from += size;
263 uaddr_to += size;
264 } while (done < n);
265 spin_unlock(&mm->page_table_lock);
266 return n - done;
267 fault:
268 spin_unlock(&mm->page_table_lock);
269 if (__handle_fault(uaddr, -error_code, write_user))
270 return n - done;
271 goto retry;
272 }
273
274 #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
275 asm volatile("0: l %1,0(%6)\n" \
276 "1: " insn \
277 "2: cs %1,%2,0(%6)\n" \
278 "3: jl 1b\n" \
279 " lhi %0,0\n" \
280 "4:\n" \
281 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
282 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
283 "=m" (*uaddr) \
284 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
285 "m" (*uaddr) : "cc" );
286
287 static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
288 {
289 int oldval = 0, newval, ret;
290
291 switch (op) {
292 case FUTEX_OP_SET:
293 __futex_atomic_op("lr %2,%5\n",
294 ret, oldval, newval, uaddr, oparg);
295 break;
296 case FUTEX_OP_ADD:
297 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
298 ret, oldval, newval, uaddr, oparg);
299 break;
300 case FUTEX_OP_OR:
301 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
302 ret, oldval, newval, uaddr, oparg);
303 break;
304 case FUTEX_OP_ANDN:
305 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
306 ret, oldval, newval, uaddr, oparg);
307 break;
308 case FUTEX_OP_XOR:
309 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
310 ret, oldval, newval, uaddr, oparg);
311 break;
312 default:
313 ret = -ENOSYS;
314 }
315 if (ret == 0)
316 *old = oldval;
317 return ret;
318 }
319
320 int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
321 {
322 int ret;
323
324 if (segment_eq(get_fs(), KERNEL_DS))
325 return __futex_atomic_op_pt(op, uaddr, oparg, old);
326 spin_lock(&current->mm->page_table_lock);
327 uaddr = (u32 __force __user *)
328 __dat_user_addr((__force unsigned long) uaddr, 1);
329 if (!uaddr) {
330 spin_unlock(&current->mm->page_table_lock);
331 return -EFAULT;
332 }
333 get_page(virt_to_page(uaddr));
334 spin_unlock(&current->mm->page_table_lock);
335 ret = __futex_atomic_op_pt(op, uaddr, oparg, old);
336 put_page(virt_to_page(uaddr));
337 return ret;
338 }
339
340 static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
341 u32 oldval, u32 newval)
342 {
343 int ret;
344
345 asm volatile("0: cs %1,%4,0(%5)\n"
346 "1: la %0,0\n"
347 "2:\n"
348 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
349 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
350 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
351 : "cc", "memory" );
352 *uval = oldval;
353 return ret;
354 }
355
356 int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
357 u32 oldval, u32 newval)
358 {
359 int ret;
360
361 if (segment_eq(get_fs(), KERNEL_DS))
362 return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
363 spin_lock(&current->mm->page_table_lock);
364 uaddr = (u32 __force __user *)
365 __dat_user_addr((__force unsigned long) uaddr, 1);
366 if (!uaddr) {
367 spin_unlock(&current->mm->page_table_lock);
368 return -EFAULT;
369 }
370 get_page(virt_to_page(uaddr));
371 spin_unlock(&current->mm->page_table_lock);
372 ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
373 put_page(virt_to_page(uaddr));
374 return ret;
375 }
376
377 struct uaccess_ops uaccess_pt = {
378 .copy_from_user = copy_from_user_pt,
379 .copy_from_user_small = copy_from_user_pt,
380 .copy_to_user = copy_to_user_pt,
381 .copy_to_user_small = copy_to_user_pt,
382 .copy_in_user = copy_in_user_pt,
383 .clear_user = clear_user_pt,
384 .strnlen_user = strnlen_user_pt,
385 .strncpy_from_user = strncpy_from_user_pt,
386 .futex_atomic_op = futex_atomic_op_pt,
387 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,
388 };
This page took 0.047207 seconds and 6 git commands to generate.