Merge tag 'staging-3.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
[deliverable/linux.git] / include / kvm / arm_vgic.h
CommitLineData
1a89dd91
MZ
1/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef __ASM_ARM_KVM_VGIC_H
20#define __ASM_ARM_KVM_VGIC_H
21
b47ef92a
MZ
22#include <linux/kernel.h>
23#include <linux/kvm.h>
b47ef92a
MZ
24#include <linux/irqreturn.h>
25#include <linux/spinlock.h>
26#include <linux/types.h>
1a89dd91 27
5fb66da6 28#define VGIC_NR_IRQS_LEGACY 256
b47ef92a
MZ
29#define VGIC_NR_SGIS 16
30#define VGIC_NR_PPIS 16
31#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS)
8f186d52
MZ
32
33#define VGIC_V2_MAX_LRS (1 << 6)
b2fb1c0d 34#define VGIC_V3_MAX_LRS 16
c3c91836 35#define VGIC_MAX_IRQS 1024
3caa2d8c 36#define VGIC_V2_MAX_CPUS 8
b47ef92a
MZ
37
38/* Sanity checks... */
ac3d3735
AP
39#if (KVM_MAX_VCPUS > 255)
40#error Too many KVM VCPUs, the VGIC only supports up to 255 VCPUs for now
b47ef92a
MZ
41#endif
42
5fb66da6 43#if (VGIC_NR_IRQS_LEGACY & 31)
b47ef92a
MZ
44#error "VGIC_NR_IRQS must be a multiple of 32"
45#endif
46
5fb66da6 47#if (VGIC_NR_IRQS_LEGACY > VGIC_MAX_IRQS)
b47ef92a
MZ
48#error "VGIC_NR_IRQS must be <= 1024"
49#endif
50
51/*
52 * The GIC distributor registers describing interrupts have two parts:
53 * - 32 per-CPU interrupts (SGI + PPI)
54 * - a bunch of shared interrupts (SPI)
55 */
56struct vgic_bitmap {
c1bfb577
MZ
57 /*
58 * - One UL per VCPU for private interrupts (assumes UL is at
59 * least 32 bits)
60 * - As many UL as necessary for shared interrupts.
61 *
62 * The private interrupts are accessed via the "private"
63 * field, one UL per vcpu (the state for vcpu n is in
64 * private[n]). The shared interrupts are accessed via the
65 * "shared" pointer (IRQn state is at bit n-32 in the bitmap).
66 */
67 unsigned long *private;
68 unsigned long *shared;
b47ef92a
MZ
69};
70
71struct vgic_bytemap {
c1bfb577
MZ
72 /*
73 * - 8 u32 per VCPU for private interrupts
74 * - As many u32 as necessary for shared interrupts.
75 *
76 * The private interrupts are accessed via the "private"
77 * field, (the state for vcpu n is in private[n*8] to
78 * private[n*8 + 7]). The shared interrupts are accessed via
79 * the "shared" pointer (IRQn state is at byte (n-32)%4 of the
80 * shared[(n-32)/4] word).
81 */
82 u32 *private;
83 u32 *shared;
b47ef92a
MZ
84};
85
8d5c6b06
MZ
86struct kvm_vcpu;
87
1a9b1305
MZ
88enum vgic_type {
89 VGIC_V2, /* Good ol' GICv2 */
b2fb1c0d 90 VGIC_V3, /* New fancy GICv3 */
1a9b1305
MZ
91};
92
8d5c6b06
MZ
93#define LR_STATE_PENDING (1 << 0)
94#define LR_STATE_ACTIVE (1 << 1)
95#define LR_STATE_MASK (3 << 0)
96#define LR_EOI_INT (1 << 2)
97
98struct vgic_lr {
99 u16 irq;
100 u8 source;
101 u8 state;
102};
103
beee38b9
MZ
104struct vgic_vmcr {
105 u32 ctlr;
106 u32 abpr;
107 u32 bpr;
108 u32 pmr;
109};
110
8d5c6b06
MZ
111struct vgic_ops {
112 struct vgic_lr (*get_lr)(const struct kvm_vcpu *, int);
113 void (*set_lr)(struct kvm_vcpu *, int, struct vgic_lr);
69bb2c9f
MZ
114 void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr);
115 u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
8d6a0313 116 u64 (*get_eisr)(const struct kvm_vcpu *vcpu);
495dd859 117 u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu);
909d9b50
MZ
118 void (*enable_underflow)(struct kvm_vcpu *vcpu);
119 void (*disable_underflow)(struct kvm_vcpu *vcpu);
beee38b9
MZ
120 void (*get_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
121 void (*set_vmcr)(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
da8dafd1 122 void (*enable)(struct kvm_vcpu *vcpu);
8d5c6b06
MZ
123};
124
ca85f623 125struct vgic_params {
1a9b1305
MZ
126 /* vgic type */
127 enum vgic_type type;
ca85f623
MZ
128 /* Physical address of vgic virtual cpu interface */
129 phys_addr_t vcpu_base;
130 /* Number of list registers */
131 u32 nr_lr;
132 /* Interrupt number */
133 unsigned int maint_irq;
134 /* Virtual control interface base address */
135 void __iomem *vctrl_base;
3caa2d8c 136 int max_gic_vcpus;
b5d84ff6
AP
137 /* Only needed for the legacy KVM_CREATE_IRQCHIP */
138 bool can_emulate_gicv2;
ca85f623
MZ
139};
140
b26e5fda
AP
141struct vgic_vm_ops {
142 bool (*handle_mmio)(struct kvm_vcpu *, struct kvm_run *,
143 struct kvm_exit_mmio *);
144 bool (*queue_sgi)(struct kvm_vcpu *, int irq);
145 void (*add_sgi_source)(struct kvm_vcpu *, int irq, int source);
146 int (*init_model)(struct kvm *);
147 int (*map_resources)(struct kvm *, const struct vgic_params *);
148};
149
1a89dd91 150struct vgic_dist {
b47ef92a
MZ
151#ifdef CONFIG_KVM_ARM_VGIC
152 spinlock_t lock;
f982cf4e 153 bool in_kernel;
01ac5e34 154 bool ready;
b47ef92a 155
59892136
AP
156 /* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */
157 u32 vgic_model;
158
c1bfb577
MZ
159 int nr_cpus;
160 int nr_irqs;
161
b47ef92a
MZ
162 /* Virtual control interface mapping */
163 void __iomem *vctrl_base;
164
330690cd
CD
165 /* Distributor and vcpu interface mapping in the guest */
166 phys_addr_t vgic_dist_base;
a0675c25
AP
167 /* GICv2 and GICv3 use different mapped register blocks */
168 union {
169 phys_addr_t vgic_cpu_base;
170 phys_addr_t vgic_redist_base;
171 };
b47ef92a
MZ
172
173 /* Distributor enabled */
174 u32 enabled;
175
176 /* Interrupt enabled (one bit per IRQ) */
177 struct vgic_bitmap irq_enabled;
178
faa1b46c
CD
179 /* Level-triggered interrupt external input is asserted */
180 struct vgic_bitmap irq_level;
181
182 /*
183 * Interrupt state is pending on the distributor
184 */
227844f5 185 struct vgic_bitmap irq_pending;
b47ef92a 186
faa1b46c
CD
187 /*
188 * Tracks writes to GICD_ISPENDRn and GICD_ICPENDRn for level-triggered
189 * interrupts. Essentially holds the state of the flip-flop in
190 * Figure 4-10 on page 4-101 in ARM IHI 0048B.b.
191 * Once set, it is only cleared for level-triggered interrupts on
192 * guest ACKs (when we queue it) or writes to GICD_ICPENDRn.
193 */
194 struct vgic_bitmap irq_soft_pend;
195
dbf20f9d
CD
196 /* Level-triggered interrupt queued on VCPU interface */
197 struct vgic_bitmap irq_queued;
b47ef92a
MZ
198
199 /* Interrupt priority. Not used yet. */
200 struct vgic_bytemap irq_priority;
201
202 /* Level/edge triggered */
203 struct vgic_bitmap irq_cfg;
204
c1bfb577
MZ
205 /*
206 * Source CPU per SGI and target CPU:
207 *
208 * Each byte represent a SGI observable on a VCPU, each bit of
209 * this byte indicating if the corresponding VCPU has
210 * generated this interrupt. This is a GICv2 feature only.
211 *
212 * For VCPUn (n < 8), irq_sgi_sources[n*16] to [n*16 + 15] are
213 * the SGIs observable on VCPUn.
214 */
215 u8 *irq_sgi_sources;
b47ef92a 216
c1bfb577
MZ
217 /*
218 * Target CPU for each SPI:
219 *
220 * Array of available SPI, each byte indicating the target
221 * VCPU for SPI. IRQn (n >=32) is at irq_spi_cpu[n-32].
222 */
223 u8 *irq_spi_cpu;
224
225 /*
226 * Reverse lookup of irq_spi_cpu for faster compute pending:
227 *
228 * Array of bitmaps, one per VCPU, describing if IRQn is
229 * routed to a particular VCPU.
230 */
231 struct vgic_bitmap *irq_spi_target;
b47ef92a 232
a0675c25
AP
233 /* Target MPIDR for each IRQ (needed for GICv3 IROUTERn) only */
234 u32 *irq_spi_mpidr;
235
b47ef92a 236 /* Bitmap indicating which CPU has something pending */
c1bfb577 237 unsigned long *irq_pending_on_cpu;
b26e5fda
AP
238
239 struct vgic_vm_ops vm_ops;
b47ef92a 240#endif
1a89dd91
MZ
241};
242
eede821d
MZ
243struct vgic_v2_cpu_if {
244 u32 vgic_hcr;
245 u32 vgic_vmcr;
246 u32 vgic_misr; /* Saved only */
2df36a5d
CD
247 u64 vgic_eisr; /* Saved only */
248 u64 vgic_elrsr; /* Saved only */
eede821d 249 u32 vgic_apr;
8f186d52 250 u32 vgic_lr[VGIC_V2_MAX_LRS];
eede821d
MZ
251};
252
b2fb1c0d
MZ
253struct vgic_v3_cpu_if {
254#ifdef CONFIG_ARM_GIC_V3
255 u32 vgic_hcr;
256 u32 vgic_vmcr;
2f5fa41a 257 u32 vgic_sre; /* Restored only, change ignored */
b2fb1c0d
MZ
258 u32 vgic_misr; /* Saved only */
259 u32 vgic_eisr; /* Saved only */
260 u32 vgic_elrsr; /* Saved only */
261 u32 vgic_ap0r[4];
262 u32 vgic_ap1r[4];
263 u64 vgic_lr[VGIC_V3_MAX_LRS];
264#endif
265};
266
1a89dd91 267struct vgic_cpu {
9d949dce
MZ
268#ifdef CONFIG_KVM_ARM_VGIC
269 /* per IRQ to LR mapping */
c1bfb577 270 u8 *vgic_irq_lr_map;
9d949dce
MZ
271
272 /* Pending interrupts on this VCPU */
273 DECLARE_BITMAP( pending_percpu, VGIC_NR_PRIVATE_IRQS);
c1bfb577 274 unsigned long *pending_shared;
9d949dce
MZ
275
276 /* Bitmap of used/free list registers */
8f186d52 277 DECLARE_BITMAP( lr_used, VGIC_V2_MAX_LRS);
9d949dce
MZ
278
279 /* Number of list registers on this CPU */
280 int nr_lr;
281
282 /* CPU vif control registers for world switch */
eede821d
MZ
283 union {
284 struct vgic_v2_cpu_if vgic_v2;
b2fb1c0d 285 struct vgic_v3_cpu_if vgic_v3;
eede821d 286 };
9d949dce 287#endif
1a89dd91
MZ
288};
289
9d949dce
MZ
290#define LR_EMPTY 0xff
291
495dd859
MZ
292#define INT_STATUS_EOI (1 << 0)
293#define INT_STATUS_UNDERFLOW (1 << 1)
294
1a89dd91
MZ
295struct kvm;
296struct kvm_vcpu;
297struct kvm_run;
298struct kvm_exit_mmio;
299
300#ifdef CONFIG_KVM_ARM_VGIC
ce01e4e8 301int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
01ac5e34 302int kvm_vgic_hyp_init(void);
6d3cfbe2 303int kvm_vgic_map_resources(struct kvm *kvm);
3caa2d8c 304int kvm_vgic_get_max_vcpus(void);
59892136 305int kvm_vgic_create(struct kvm *kvm, u32 type);
c1bfb577 306void kvm_vgic_destroy(struct kvm *kvm);
c1bfb577 307void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
9d949dce
MZ
308void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
309void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
5863c2ce
MZ
310int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
311 bool level);
6d52f35a 312void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
9d949dce 313int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
1a89dd91
MZ
314bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
315 struct kvm_exit_mmio *mmio);
316
f982cf4e 317#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
1f57be28 318#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
c52edf5f 319#define vgic_ready(k) ((k)->arch.vgic.ready)
9d949dce 320
8f186d52
MZ
321int vgic_v2_probe(struct device_node *vgic_node,
322 const struct vgic_ops **ops,
323 const struct vgic_params **params);
b2fb1c0d
MZ
324#ifdef CONFIG_ARM_GIC_V3
325int vgic_v3_probe(struct device_node *vgic_node,
326 const struct vgic_ops **ops,
327 const struct vgic_params **params);
328#else
329static inline int vgic_v3_probe(struct device_node *vgic_node,
330 const struct vgic_ops **ops,
331 const struct vgic_params **params)
332{
333 return -ENODEV;
334}
335#endif
8f186d52 336
1a89dd91
MZ
337#else
338static inline int kvm_vgic_hyp_init(void)
339{
340 return 0;
341}
342
330690cd
CD
343static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 addr)
344{
345 return 0;
346}
347
6cbde825
MZ
348static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
349{
350 return -ENXIO;
351}
352
6d3cfbe2 353static inline int kvm_vgic_map_resources(struct kvm *kvm)
1a89dd91
MZ
354{
355 return 0;
356}
357
59892136 358static inline int kvm_vgic_create(struct kvm *kvm, u32 type)
1a89dd91
MZ
359{
360 return 0;
361}
362
b5e7a955
AB
363static inline void kvm_vgic_destroy(struct kvm *kvm)
364{
365}
366
367static inline void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
368{
369}
370
1a89dd91
MZ
371static inline int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
372{
373 return 0;
374}
375
376static inline void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) {}
377static inline void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) {}
378
5863c2ce
MZ
379static inline int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid,
380 unsigned int irq_num, bool level)
381{
382 return 0;
383}
384
1a89dd91
MZ
385static inline int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
386{
387 return 0;
388}
389
390static inline bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
391 struct kvm_exit_mmio *mmio)
392{
393 return false;
394}
395
396static inline int irqchip_in_kernel(struct kvm *kvm)
397{
398 return 0;
399}
01ac5e34 400
1f57be28
CD
401static inline bool vgic_initialized(struct kvm *kvm)
402{
403 return true;
404}
405
c52edf5f 406static inline bool vgic_ready(struct kvm *kvm)
01ac5e34
MZ
407{
408 return true;
409}
3caa2d8c
AP
410
411static inline int kvm_vgic_get_max_vcpus(void)
412{
413 return KVM_MAX_VCPUS;
414}
1a89dd91
MZ
415#endif
416
417#endif
This page took 0.094227 seconds and 5 git commands to generate.