Merge tag 'armsoc-soc' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[deliverable/linux.git] / arch / x86 / xen / irq.c
CommitLineData
0d1edf46
JF
1#include <linux/hardirq.h>
2
66bcaf0b
TG
3#include <asm/x86_init.h>
4
0d1edf46
JF
5#include <xen/interface/xen.h>
6#include <xen/interface/sched.h>
7#include <xen/interface/vcpu.h>
2771374d 8#include <xen/features.h>
0ec53ecf 9#include <xen/events.h>
0d1edf46
JF
10
11#include <asm/xen/hypercall.h>
12#include <asm/xen/hypervisor.h>
13
14#include "xen-ops.h"
15
16/*
17 * Force a proper event-channel callback from Xen after clearing the
18 * callback mask. We do this in a very simple manner, by making a call
19 * down into Xen. The pending flag will be checked by Xen on return.
20 */
21void xen_force_evtchn_callback(void)
22{
23 (void)HYPERVISOR_xen_version(0, NULL);
24}
25
2605fc21 26asmlinkage __visible unsigned long xen_save_fl(void)
0d1edf46
JF
27{
28 struct vcpu_info *vcpu;
29 unsigned long flags;
30
2113f469 31 vcpu = this_cpu_read(xen_vcpu);
0d1edf46
JF
32
33 /* flag has opposite sense of mask */
34 flags = !vcpu->evtchn_upcall_mask;
35
36 /* convert to IF type flag
37 -0 -> 0x00000000
38 -1 -> 0xffffffff
39 */
40 return (-flags) & X86_EFLAGS_IF;
41}
ecb93d1c 42PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
0d1edf46 43
a2e7f0e3 44__visible void xen_restore_fl(unsigned long flags)
0d1edf46
JF
45{
46 struct vcpu_info *vcpu;
47
48 /* convert from IF type flag */
49 flags = !(flags & X86_EFLAGS_IF);
50
fb58e300 51 /* See xen_irq_enable() for why preemption must be disabled. */
0d1edf46 52 preempt_disable();
2113f469 53 vcpu = this_cpu_read(xen_vcpu);
0d1edf46 54 vcpu->evtchn_upcall_mask = flags;
0d1edf46
JF
55
56 if (flags == 0) {
0d1edf46
JF
57 barrier(); /* unmask then check (avoid races) */
58 if (unlikely(vcpu->evtchn_upcall_pending))
59 xen_force_evtchn_callback();
fb58e300
DV
60 preempt_enable();
61 } else
62 preempt_enable_no_resched();
0d1edf46 63}
ecb93d1c 64PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
0d1edf46 65
2605fc21 66asmlinkage __visible void xen_irq_disable(void)
0d1edf46
JF
67{
68 /* There's a one instruction preempt window here. We need to
69 make sure we're don't switch CPUs between getting the vcpu
70 pointer and updating the mask. */
71 preempt_disable();
2113f469 72 this_cpu_read(xen_vcpu)->evtchn_upcall_mask = 1;
0d1edf46
JF
73 preempt_enable_no_resched();
74}
ecb93d1c 75PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
0d1edf46 76
2605fc21 77asmlinkage __visible void xen_irq_enable(void)
0d1edf46
JF
78{
79 struct vcpu_info *vcpu;
80
fb58e300
DV
81 /*
82 * We may be preempted as soon as vcpu->evtchn_upcall_mask is
83 * cleared, so disable preemption to ensure we check for
84 * events on the VCPU we are still running on.
85 */
86 preempt_disable();
0d1edf46 87
2113f469 88 vcpu = this_cpu_read(xen_vcpu);
0d1edf46
JF
89 vcpu->evtchn_upcall_mask = 0;
90
91 /* Doesn't matter if we get preempted here, because any
92 pending event will get dealt with anyway. */
93
94 barrier(); /* unmask then check (avoid races) */
95 if (unlikely(vcpu->evtchn_upcall_pending))
96 xen_force_evtchn_callback();
fb58e300
DV
97
98 preempt_enable();
0d1edf46 99}
ecb93d1c 100PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
0d1edf46
JF
101
102static void xen_safe_halt(void)
103{
104 /* Blocking includes an implicit local_irq_enable(). */
105 if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0)
106 BUG();
107}
108
109static void xen_halt(void)
110{
111 if (irqs_disabled())
ad5475f9
VK
112 HYPERVISOR_vcpu_op(VCPUOP_down,
113 xen_vcpu_nr(smp_processor_id()), NULL);
0d1edf46
JF
114 else
115 xen_safe_halt();
116}
117
251511a1 118static const struct pv_irq_ops xen_irq_ops __initconst = {
ecb93d1c
JF
119 .save_fl = PV_CALLEE_SAVE(xen_save_fl),
120 .restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
121 .irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
122 .irq_enable = PV_CALLEE_SAVE(xen_irq_enable),
123
0d1edf46
JF
124 .safe_halt = xen_safe_halt,
125 .halt = xen_halt,
126#ifdef CONFIG_X86_64
127 .adjust_exception_frame = xen_adjust_exception_frame,
128#endif
129};
130
7d81c3b9 131void __init xen_init_irq_ops(void)
0d1edf46 132{
2771374d
MR
133 /* For PVH we use default pv_irq_ops settings. */
134 if (!xen_feature(XENFEAT_hvm_callback_vector))
135 pv_irq_ops = xen_irq_ops;
66bcaf0b 136 x86_init.irqs.intr_init = xen_init_IRQ;
0d1edf46 137}
This page took 0.399772 seconds and 5 git commands to generate.