arm64: compat: align cacheflush syscall with arch/arm
authorVladimir Murzin <vladimir.murzin@arm.com>
Mon, 1 Dec 2014 10:53:08 +0000 (10:53 +0000)
committerWill Deacon <will.deacon@arm.com>
Mon, 1 Dec 2014 13:31:12 +0000 (13:31 +0000)
Update handling of cacheflush syscall with changes made in arch/arm
counterpart:
 - return error to userspace when flushing syscall fails
 - split user cache-flushing into interruptible chunks
 - don't bother rounding to nearest vma

Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com>
[will: changed internal return value from -EINTR to 0 to match arch/arm/]
Signed-off-by: Will Deacon <will.deacon@arm.com>
arch/arm64/include/asm/cacheflush.h
arch/arm64/kernel/sys_compat.c
arch/arm64/mm/cache.S

index 689b6379188c112ac2441833e165dcb63b0b4515..7ae31a2cc6c0ba97780200bef8a0fe96c7a3127e 100644 (file)
@@ -73,7 +73,7 @@ extern void flush_cache_all(void);
 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
 extern void flush_icache_range(unsigned long start, unsigned long end);
 extern void __flush_dcache_area(void *addr, size_t len);
-extern void __flush_cache_user_range(unsigned long start, unsigned long end);
+extern long __flush_cache_user_range(unsigned long start, unsigned long end);
 
 static inline void flush_cache_mm(struct mm_struct *mm)
 {
index dc47e53e9e28c15da99e62976a9ca29f71da8bc4..28c511b06edfc05d9ac95e8c4ddf7bf11d62df05 100644 (file)
 #include <asm/cacheflush.h>
 #include <asm/unistd.h>
 
-static inline void
-do_compat_cache_op(unsigned long start, unsigned long end, int flags)
+static long
+__do_compat_cache_op(unsigned long start, unsigned long end)
 {
-       struct mm_struct *mm = current->active_mm;
-       struct vm_area_struct *vma;
+       long ret;
 
-       if (end < start || flags)
-               return;
+       do {
+               unsigned long chunk = min(PAGE_SIZE, end - start);
 
-       down_read(&mm->mmap_sem);
-       vma = find_vma(mm, start);
-       if (vma && vma->vm_start < end) {
-               if (start < vma->vm_start)
-                       start = vma->vm_start;
-               if (end > vma->vm_end)
-                       end = vma->vm_end;
-               up_read(&mm->mmap_sem);
-               __flush_cache_user_range(start & PAGE_MASK, PAGE_ALIGN(end));
-               return;
-       }
-       up_read(&mm->mmap_sem);
+               if (fatal_signal_pending(current))
+                       return 0;
+
+               ret = __flush_cache_user_range(start, start + chunk);
+               if (ret)
+                       return ret;
+
+               cond_resched();
+               start += chunk;
+       } while (start < end);
+
+       return 0;
 }
 
+static inline long
+do_compat_cache_op(unsigned long start, unsigned long end, int flags)
+{
+       if (end < start || flags)
+               return -EINVAL;
+
+       if (!access_ok(VERIFY_READ, start, end - start))
+               return -EFAULT;
+
+       return __do_compat_cache_op(start, end);
+}
 /*
  * Handle all unrecognised system calls.
  */
@@ -74,8 +84,7 @@ long compat_arm_syscall(struct pt_regs *regs)
         * the specified region).
         */
        case __ARM_NR_compat_cacheflush:
-               do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]);
-               return 0;
+               return do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]);
 
        case __ARM_NR_compat_set_tls:
                current->thread.tp_value = regs->regs[0];
index 8eaf18577d7168b92f2a8f97ff3f29304b4ccb74..2560e1e1562e764f03fe8fa9925466f18653199d 100644 (file)
@@ -17,6 +17,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/errno.h>
 #include <linux/linkage.h>
 #include <linux/init.h>
 #include <asm/assembler.h>
@@ -140,9 +141,12 @@ USER(9f, ic        ivau, x4        )               // invalidate I line PoU
        add     x4, x4, x2
        cmp     x4, x1
        b.lo    1b
-9:                                             // ignore any faulting cache operation
        dsb     ish
        isb
+       mov     x0, #0
+       ret
+9:
+       mov     x0, #-EFAULT
        ret
 ENDPROC(flush_icache_range)
 ENDPROC(__flush_cache_user_range)
This page took 0.027894 seconds and 5 git commands to generate.