Merge tag 'kvm-arm-for-4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm...
[deliverable/linux.git] / virt / kvm / arm / vgic / vgic-kvm-device.c
CommitLineData
c86c7721
EA
1/*
2 * VGIC: KVM DEVICE API
3 *
4 * Copyright (C) 2015 ARM Ltd.
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16#include <linux/kvm_host.h>
17#include <kvm/arm_vgic.h>
fca25602 18#include <linux/uaccess.h>
e2c1f9ab 19#include <asm/kvm_mmu.h>
fca25602 20#include "vgic.h"
c86c7721
EA
21
22/* common helpers */
23
1085fdc6
AP
24int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
25 phys_addr_t addr, phys_addr_t alignment)
e2c1f9ab
EA
26{
27 if (addr & ~KVM_PHYS_MASK)
28 return -E2BIG;
29
30 if (!IS_ALIGNED(addr, alignment))
31 return -EINVAL;
32
33 if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
34 return -EEXIST;
35
36 return 0;
37}
38
39/**
40 * kvm_vgic_addr - set or get vgic VM base addresses
41 * @kvm: pointer to the vm struct
42 * @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
43 * @addr: pointer to address value
44 * @write: if true set the address in the VM address space, if false read the
45 * address
46 *
47 * Set or get the vgic base addresses for the distributor and the virtual CPU
48 * interface in the VM physical address space. These addresses are properties
49 * of the emulated core/SoC and therefore user space initially knows this
50 * information.
51 * Check them for sanity (alignment, double assignment). We can't check for
52 * overlapping regions in case of a virtual GICv3 here, since we don't know
53 * the number of VCPUs yet, so we defer this check to map_resources().
54 */
55int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
56{
57 int r = 0;
58 struct vgic_dist *vgic = &kvm->arch.vgic;
59 int type_needed;
60 phys_addr_t *addr_ptr, alignment;
61
62 mutex_lock(&kvm->lock);
63 switch (type) {
64 case KVM_VGIC_V2_ADDR_TYPE_DIST:
65 type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
66 addr_ptr = &vgic->vgic_dist_base;
67 alignment = SZ_4K;
68 break;
69 case KVM_VGIC_V2_ADDR_TYPE_CPU:
70 type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
71 addr_ptr = &vgic->vgic_cpu_base;
72 alignment = SZ_4K;
73 break;
74#ifdef CONFIG_KVM_ARM_VGIC_V3
75 case KVM_VGIC_V3_ADDR_TYPE_DIST:
76 type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
77 addr_ptr = &vgic->vgic_dist_base;
78 alignment = SZ_64K;
79 break;
80 case KVM_VGIC_V3_ADDR_TYPE_REDIST:
81 type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
82 addr_ptr = &vgic->vgic_redist_base;
83 alignment = SZ_64K;
84 break;
85#endif
86 default:
87 r = -ENODEV;
88 goto out;
89 }
90
91 if (vgic->vgic_model != type_needed) {
92 r = -ENODEV;
93 goto out;
94 }
95
96 if (write) {
97 r = vgic_check_ioaddr(kvm, addr_ptr, *addr, alignment);
98 if (!r)
99 *addr_ptr = *addr;
100 } else {
101 *addr = *addr_ptr;
102 }
103
104out:
105 mutex_unlock(&kvm->lock);
106 return r;
107}
108
fca25602
EA
109static int vgic_set_common_attr(struct kvm_device *dev,
110 struct kvm_device_attr *attr)
111{
afcc7c50
EA
112 int r;
113
fca25602 114 switch (attr->group) {
e5c30294
EA
115 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
116 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
117 u64 addr;
118 unsigned long type = (unsigned long)attr->attr;
119
120 if (copy_from_user(&addr, uaddr, sizeof(addr)))
121 return -EFAULT;
122
123 r = kvm_vgic_addr(dev->kvm, type, &addr, true);
124 return (r == -ENODEV) ? -ENXIO : r;
125 }
fca25602
EA
126 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
127 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
128 u32 val;
129 int ret = 0;
130
131 if (get_user(val, uaddr))
132 return -EFAULT;
133
134 /*
135 * We require:
136 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
137 * - at most 1024 interrupts
138 * - a multiple of 32 interrupts
139 */
140 if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
141 val > VGIC_MAX_RESERVED ||
142 (val & 31))
143 return -EINVAL;
144
145 mutex_lock(&dev->kvm->lock);
146
147 if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
148 ret = -EBUSY;
149 else
150 dev->kvm->arch.vgic.nr_spis =
151 val - VGIC_NR_PRIVATE_IRQS;
152
153 mutex_unlock(&dev->kvm->lock);
154
155 return ret;
156 }
afcc7c50
EA
157 case KVM_DEV_ARM_VGIC_GRP_CTRL: {
158 switch (attr->attr) {
159 case KVM_DEV_ARM_VGIC_CTRL_INIT:
160 mutex_lock(&dev->kvm->lock);
161 r = vgic_init(dev->kvm);
162 mutex_unlock(&dev->kvm->lock);
163 return r;
164 }
165 break;
166 }
fca25602
EA
167 }
168
169 return -ENXIO;
170}
171
172static int vgic_get_common_attr(struct kvm_device *dev,
173 struct kvm_device_attr *attr)
174{
175 int r = -ENXIO;
176
177 switch (attr->group) {
e5c30294
EA
178 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
179 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
180 u64 addr;
181 unsigned long type = (unsigned long)attr->attr;
182
183 r = kvm_vgic_addr(dev->kvm, type, &addr, false);
184 if (r)
185 return (r == -ENODEV) ? -ENXIO : r;
186
187 if (copy_to_user(uaddr, &addr, sizeof(addr)))
188 return -EFAULT;
189 break;
190 }
fca25602
EA
191 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
192 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
193
194 r = put_user(dev->kvm->arch.vgic.nr_spis +
195 VGIC_NR_PRIVATE_IRQS, uaddr);
196 break;
197 }
198 }
199
200 return r;
201}
202
c86c7721
EA
203static int vgic_create(struct kvm_device *dev, u32 type)
204{
205 return kvm_vgic_create(dev->kvm, type);
206}
207
208static void vgic_destroy(struct kvm_device *dev)
209{
210 kfree(dev);
211}
212
42c8870f 213int kvm_register_vgic_device(unsigned long type)
c86c7721 214{
42c8870f
AP
215 int ret = -ENODEV;
216
c86c7721
EA
217 switch (type) {
218 case KVM_DEV_TYPE_ARM_VGIC_V2:
42c8870f
AP
219 ret = kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
220 KVM_DEV_TYPE_ARM_VGIC_V2);
c86c7721
EA
221 break;
222#ifdef CONFIG_KVM_ARM_VGIC_V3
223 case KVM_DEV_TYPE_ARM_VGIC_V3:
42c8870f
AP
224 ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops,
225 KVM_DEV_TYPE_ARM_VGIC_V3);
0e4e82f1
AP
226 if (ret)
227 break;
228 ret = kvm_vgic_register_its_device();
c86c7721
EA
229 break;
230#endif
231 }
42c8870f
AP
232
233 return ret;
c86c7721
EA
234}
235
f94591e2
EA
236/** vgic_attr_regs_access: allows user space to read/write VGIC registers
237 *
238 * @dev: kvm device handle
239 * @attr: kvm device attribute
240 * @reg: address the value is read or written
241 * @is_write: write flag
242 *
243 */
244static int vgic_attr_regs_access(struct kvm_device *dev,
245 struct kvm_device_attr *attr,
246 u32 *reg, bool is_write)
247{
7d450e28
AP
248 gpa_t addr;
249 int cpuid, ret, c;
250 struct kvm_vcpu *vcpu, *tmp_vcpu;
251 int vcpu_lock_idx = -1;
252
253 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
254 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
255 vcpu = kvm_get_vcpu(dev->kvm, cpuid);
256 addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
257
258 mutex_lock(&dev->kvm->lock);
259
260 ret = vgic_init(dev->kvm);
261 if (ret)
262 goto out;
263
264 if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
265 ret = -EINVAL;
266 goto out;
267 }
268
269 /*
270 * Any time a vcpu is run, vcpu_load is called which tries to grab the
271 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
272 * that no other VCPUs are run and fiddle with the vgic state while we
273 * access it.
274 */
275 ret = -EBUSY;
276 kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
277 if (!mutex_trylock(&tmp_vcpu->mutex))
278 goto out;
279 vcpu_lock_idx = c;
280 }
281
282 switch (attr->group) {
283 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
878c569e 284 ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg);
7d450e28
AP
285 break;
286 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
287 ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, reg);
288 break;
289 default:
290 ret = -EINVAL;
291 break;
292 }
293
294out:
295 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
296 tmp_vcpu = kvm_get_vcpu(dev->kvm, vcpu_lock_idx);
297 mutex_unlock(&tmp_vcpu->mutex);
298 }
299
300 mutex_unlock(&dev->kvm->lock);
301 return ret;
f94591e2
EA
302}
303
c86c7721
EA
304/* V2 ops */
305
306static int vgic_v2_set_attr(struct kvm_device *dev,
307 struct kvm_device_attr *attr)
308{
fca25602
EA
309 int ret;
310
311 ret = vgic_set_common_attr(dev, attr);
f94591e2
EA
312 if (ret != -ENXIO)
313 return ret;
314
315 switch (attr->group) {
316 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
317 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
318 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
319 u32 reg;
320
321 if (get_user(reg, uaddr))
322 return -EFAULT;
fca25602 323
f94591e2
EA
324 return vgic_attr_regs_access(dev, attr, &reg, true);
325 }
326 }
327
328 return -ENXIO;
c86c7721
EA
329}
330
331static int vgic_v2_get_attr(struct kvm_device *dev,
332 struct kvm_device_attr *attr)
333{
fca25602
EA
334 int ret;
335
336 ret = vgic_get_common_attr(dev, attr);
f94591e2
EA
337 if (ret != -ENXIO)
338 return ret;
339
340 switch (attr->group) {
341 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
342 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
343 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
344 u32 reg = 0;
345
346 ret = vgic_attr_regs_access(dev, attr, &reg, false);
347 if (ret)
348 return ret;
349 return put_user(reg, uaddr);
350 }
351 }
352
353 return -ENXIO;
c86c7721
EA
354}
355
356static int vgic_v2_has_attr(struct kvm_device *dev,
357 struct kvm_device_attr *attr)
358{
fca25602 359 switch (attr->group) {
e5c30294
EA
360 case KVM_DEV_ARM_VGIC_GRP_ADDR:
361 switch (attr->attr) {
362 case KVM_VGIC_V2_ADDR_TYPE_DIST:
363 case KVM_VGIC_V2_ADDR_TYPE_CPU:
364 return 0;
365 }
366 break;
f94591e2
EA
367 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
368 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
369 return vgic_v2_has_attr_regs(dev, attr);
fca25602
EA
370 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
371 return 0;
afcc7c50
EA
372 case KVM_DEV_ARM_VGIC_GRP_CTRL:
373 switch (attr->attr) {
374 case KVM_DEV_ARM_VGIC_CTRL_INIT:
375 return 0;
376 }
fca25602 377 }
c86c7721
EA
378 return -ENXIO;
379}
380
381struct kvm_device_ops kvm_arm_vgic_v2_ops = {
382 .name = "kvm-arm-vgic-v2",
383 .create = vgic_create,
384 .destroy = vgic_destroy,
385 .set_attr = vgic_v2_set_attr,
386 .get_attr = vgic_v2_get_attr,
387 .has_attr = vgic_v2_has_attr,
388};
389
390/* V3 ops */
391
392#ifdef CONFIG_KVM_ARM_VGIC_V3
393
394static int vgic_v3_set_attr(struct kvm_device *dev,
395 struct kvm_device_attr *attr)
396{
fca25602 397 return vgic_set_common_attr(dev, attr);
c86c7721
EA
398}
399
400static int vgic_v3_get_attr(struct kvm_device *dev,
401 struct kvm_device_attr *attr)
402{
fca25602 403 return vgic_get_common_attr(dev, attr);
c86c7721
EA
404}
405
406static int vgic_v3_has_attr(struct kvm_device *dev,
407 struct kvm_device_attr *attr)
408{
fca25602 409 switch (attr->group) {
e5c30294
EA
410 case KVM_DEV_ARM_VGIC_GRP_ADDR:
411 switch (attr->attr) {
412 case KVM_VGIC_V3_ADDR_TYPE_DIST:
413 case KVM_VGIC_V3_ADDR_TYPE_REDIST:
414 return 0;
415 }
416 break;
fca25602
EA
417 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
418 return 0;
afcc7c50
EA
419 case KVM_DEV_ARM_VGIC_GRP_CTRL:
420 switch (attr->attr) {
421 case KVM_DEV_ARM_VGIC_CTRL_INIT:
422 return 0;
423 }
fca25602 424 }
c86c7721
EA
425 return -ENXIO;
426}
427
428struct kvm_device_ops kvm_arm_vgic_v3_ops = {
429 .name = "kvm-arm-vgic-v3",
430 .create = vgic_create,
431 .destroy = vgic_destroy,
432 .set_attr = vgic_v3_set_attr,
433 .get_attr = vgic_v3_get_attr,
434 .has_attr = vgic_v3_has_attr,
435};
436
437#endif /* CONFIG_KVM_ARM_VGIC_V3 */
This page took 0.049757 seconds and 5 git commands to generate.