ARM: Remove __ARCH_WANT_INTERRUPTS_ON_CTXSW on pre-ARMv6 CPUs
authorCatalin Marinas <catalin.marinas@arm.com>
Mon, 28 Nov 2011 21:57:24 +0000 (21:57 +0000)
committerCatalin Marinas <catalin.marinas@arm.com>
Tue, 17 Apr 2012 14:29:44 +0000 (15:29 +0100)
This patch removes the __ARCH_WANT_INTERRUPTS_ON_CTXSW definition for
ARMv5 and earlier processors. On such processors, the context switch
requires a full cache flush. To avoid high interrupt latencies, this
patch defers the mm switching to the post-lock switch hook if the
interrupts are disabled.

Reviewed-by: Will Deacon <will.deacon@arm.com>
Tested-by: Will Deacon <will.deacon@arm.com>
Reviewed-by: Frank Rowand <frank.rowand@am.sony.com>
Tested-by: Marc Zyngier <Marc.Zyngier@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm/include/asm/mmu.h
arch/arm/include/asm/mmu_context.h

index 20b43d6f23b3a92c5a358241dd13281623fe2e88..14965658a923c5e99c009292ddc99f06d013738a 100644 (file)
@@ -34,13 +34,4 @@ typedef struct {
 
 #endif
 
-/*
- * switch_mm() may do a full cache flush over the context switch,
- * so enable interrupts over the context switch to avoid high
- * latency.
- */
-#ifndef CONFIG_CPU_HAS_ASID
-#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
-#endif
-
 #endif
index 8da4b9c042fe3431b507c4e723c4f11475ab53bb..0306bc642c0d4fe7abd0368fd51477935e496c05 100644 (file)
@@ -105,19 +105,40 @@ static inline void finish_arch_post_lock_switch(void)
 
 #else  /* !CONFIG_CPU_HAS_ASID */
 
+#ifdef CONFIG_MMU
+
 static inline void check_and_switch_context(struct mm_struct *mm,
                                            struct task_struct *tsk)
 {
-#ifdef CONFIG_MMU
        if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
                __check_kvm_seq(mm);
-       cpu_switch_mm(mm->pgd, mm);
-#endif
+
+       if (irqs_disabled())
+               /*
+                * cpu_switch_mm() needs to flush the VIVT caches. To avoid
+                * high interrupt latencies, defer the call and continue
+                * running with the old mm. Since we only support UP systems
+                * on non-ASID CPUs, the old mm will remain valid until the
+                * finish_arch_post_lock_switch() call.
+                */
+               set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
+       else
+               cpu_switch_mm(mm->pgd, mm);
 }
 
-#define init_new_context(tsk,mm)       0
+#define finish_arch_post_lock_switch \
+       finish_arch_post_lock_switch
+static inline void finish_arch_post_lock_switch(void)
+{
+       if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
+               struct mm_struct *mm = current->mm;
+               cpu_switch_mm(mm->pgd, mm);
+       }
+}
 
-#define finish_arch_post_lock_switch() do { } while (0)
+#endif /* CONFIG_MMU */
+
+#define init_new_context(tsk,mm)       0
 
 #endif /* CONFIG_CPU_HAS_ASID */
 
This page took 0.027999 seconds and 5 git commands to generate.