x86/paravirt: Remove the unused irq_enable_sysexit pv op
authorBoris Ostrovsky <boris.ostrovsky@oracle.com>
Thu, 19 Nov 2015 21:55:46 +0000 (16:55 -0500)
committerIngo Molnar <mingo@kernel.org>
Mon, 23 Nov 2015 09:48:16 +0000 (10:48 +0100)
As result of commit "x86/xen: Avoid fast syscall path for Xen PV
guests", the irq_enable_sysexit pv op is not called by Xen PV guests
anymore and since they were the only ones who used it we can
safely remove it.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Reviewed-by: Borislav Petkov <bp@suse.de>
Acked-by: Andy Lutomirski <luto@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: david.vrabel@citrix.com
Cc: konrad.wilk@oracle.com
Cc: virtualization@lists.linux-foundation.org
Cc: xen-devel@lists.xenproject.org
Link: http://lkml.kernel.org/r/1447970147-1733-3-git-send-email-boris.ostrovsky@oracle.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/entry/entry_32.S
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt_types.h
arch/x86/kernel/asm-offsets.c
arch/x86/kernel/paravirt.c
arch/x86/kernel/paravirt_patch_32.c
arch/x86/kernel/paravirt_patch_64.c
arch/x86/xen/enlighten.c
arch/x86/xen/xen-asm_32.S
arch/x86/xen/xen-ops.h

index 0870825a95685bfe916c2fc3523937444a21e948..9870c972d3458b3b1a304ad53bdbf0dc5ba88a26 100644 (file)
@@ -329,7 +329,8 @@ sysenter_past_esp:
         * Return back to the vDSO, which will pop ecx and edx.
         * Don't bother with DS and ES (they already contain __USER_DS).
         */
-       ENABLE_INTERRUPTS_SYSEXIT
+       sti
+       sysexit
 
 .pushsection .fixup, "ax"
 2:     movl    $0, PT_FS(%esp)
@@ -552,11 +553,6 @@ ENTRY(native_iret)
        iret
        _ASM_EXTABLE(native_iret, iret_exc)
 END(native_iret)
-
-ENTRY(native_irq_enable_sysexit)
-       sti
-       sysexit
-END(native_irq_enable_sysexit)
 #endif
 
 ENTRY(overflow)
index 10d0596433f89b849f91c164d7252a29bcac921b..c28518e163f59911ee7ee908157ba72c6fb2e95b 100644 (file)
@@ -932,13 +932,6 @@ extern void default_banner(void);
        push %ecx; push %edx;                           \
        call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
        pop %edx; pop %ecx
-
-#define ENABLE_INTERRUPTS_SYSEXIT                                      \
-       PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
-                 CLBR_NONE,                                            \
-                 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
-
-
 #else  /* !CONFIG_X86_32 */
 
 /*
index 31247b5bff7c8ff86d893851dc9073b72a647cc2..608bbf361c50315ee4781d3ac51137e42d7c26e8 100644 (file)
@@ -157,15 +157,6 @@ struct pv_cpu_ops {
 
        u64 (*read_pmc)(int counter);
 
-#ifdef CONFIG_X86_32
-       /*
-        * Atomically enable interrupts and return to userspace.  This
-        * is only used in 32-bit kernels.  64-bit kernels use
-        * usergs_sysret32 instead.
-        */
-       void (*irq_enable_sysexit)(void);
-#endif
-
        /*
         * Switch to usermode gs and return to 64-bit usermode using
         * sysret.  Only used in 64-bit kernels to return to 64-bit
index 439df975bc7aed0861581381476eeb933bc086de..84a7524b202cad58509a68db5e6d229f7765b6aa 100644 (file)
@@ -65,9 +65,6 @@ void common(void) {
        OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
        OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
        OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
-#ifdef CONFIG_X86_32
-       OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
-#endif
        OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
        OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
 #endif
index c2130aef3f9d25d6c6a47f3499b8096c5cfb8e80..c55f4371a43d2392158a0549b75bc120dec37c40 100644 (file)
@@ -162,9 +162,6 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
                ret = paravirt_patch_ident_64(insnbuf, len);
 
        else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
-#ifdef CONFIG_X86_32
-                type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
-#endif
                 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) ||
                 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64))
                /* If operation requires a jmp, then jmp */
@@ -220,7 +217,6 @@ static u64 native_steal_clock(int cpu)
 
 /* These are in entry.S */
 extern void native_iret(void);
-extern void native_irq_enable_sysexit(void);
 extern void native_usergs_sysret32(void);
 extern void native_usergs_sysret64(void);
 
@@ -379,9 +375,6 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
 
        .load_sp0 = native_load_sp0,
 
-#if defined(CONFIG_X86_32)
-       .irq_enable_sysexit = native_irq_enable_sysexit,
-#endif
 #ifdef CONFIG_X86_64
 #ifdef CONFIG_IA32_EMULATION
        .usergs_sysret32 = native_usergs_sysret32,
index c89f50a76e972d8f58cd4b686333163dcadfd8b3..158dc0650d5dfac1b8755910b30612d6cbb9fdbb 100644 (file)
@@ -5,7 +5,6 @@ DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
 DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf");
 DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax");
 DEF_NATIVE(pv_cpu_ops, iret, "iret");
-DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "sti; sysexit");
 DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax");
 DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3");
 DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
@@ -46,7 +45,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                PATCH_SITE(pv_irq_ops, restore_fl);
                PATCH_SITE(pv_irq_ops, save_fl);
                PATCH_SITE(pv_cpu_ops, iret);
-               PATCH_SITE(pv_cpu_ops, irq_enable_sysexit);
                PATCH_SITE(pv_mmu_ops, read_cr2);
                PATCH_SITE(pv_mmu_ops, read_cr3);
                PATCH_SITE(pv_mmu_ops, write_cr3);
index 8aa05583bc42dec102b3fffb63d2fa6a828eee4c..17c00f80108cffe5977db15bfd110b109739434f 100644 (file)
@@ -13,7 +13,6 @@ DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
 DEF_NATIVE(pv_cpu_ops, clts, "clts");
 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
 
-DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "swapgs; sti; sysexit");
 DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
 DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl");
 DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
index d315151411e56a7ea36bf01807d119df52316bc3..a068e36382b7b522595ac0d74ec36d3d60566739 100644 (file)
@@ -1229,10 +1229,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
 
        .iret = xen_iret,
 #ifdef CONFIG_X86_64
-       .usergs_sysret32 = xen_sysret32,
        .usergs_sysret64 = xen_sysret64,
-#else
-       .irq_enable_sysexit = xen_sysexit,
 #endif
 
        .load_tr_desc = paravirt_nop,
index fd92a64d748e1ca1a3838fe3c26f977f41a8b984..feb6d40a0860f450d5e2c29f2920a139c98794f8 100644 (file)
@@ -34,20 +34,6 @@ check_events:
        pop %eax
        ret
 
-/*
- * We can't use sysexit directly, because we're not running in ring0.
- * But we can easily fake it up using iret.  Assuming xen_sysexit is
- * jumped to with a standard stack frame, we can just strip it back to
- * a standard iret frame and use iret.
- */
-ENTRY(xen_sysexit)
-       movl PT_EAX(%esp), %eax                 /* Shouldn't be necessary? */
-       orl $X86_EFLAGS_IF, PT_EFLAGS(%esp)
-       lea PT_EIP(%esp), %esp
-
-       jmp xen_iret
-ENDPROC(xen_sysexit)
-
 /*
  * This is run where a normal iret would be run, with the same stack setup:
  *     8: eflags
index 1399423f34183dc3bb899180e264a1198dc93192..4140b070f2e991e1b56152056ae78be9f2899244 100644 (file)
@@ -139,9 +139,6 @@ DECL_ASM(void, xen_restore_fl_direct, unsigned long);
 
 /* These are not functions, and cannot be called normally */
 __visible void xen_iret(void);
-#ifdef CONFIG_X86_32
-__visible void xen_sysexit(void);
-#endif
 __visible void xen_sysret32(void);
 __visible void xen_sysret64(void);
 __visible void xen_adjust_exception_frame(void);
This page took 0.037848 seconds and 5 git commands to generate.