Merge remote-tracking branch 'keys/keys-next'
[deliverable/linux.git] / virt / kvm / arm / vgic / vgic-its.c
1 /*
2 * GICv3 ITS emulation
3 *
4 * Copyright (C) 2015,2016 ARM Ltd.
5 * Author: Andre Przywara <andre.przywara@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <linux/cpu.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/interrupt.h>
24 #include <linux/list.h>
25 #include <linux/uaccess.h>
26
27 #include <linux/irqchip/arm-gic-v3.h>
28
29 #include <asm/kvm_emulate.h>
30 #include <asm/kvm_arm.h>
31 #include <asm/kvm_mmu.h>
32
33 #include "vgic.h"
34 #include "vgic-mmio.h"
35
36 /*
37 * Creates a new (reference to a) struct vgic_irq for a given LPI.
38 * If this LPI is already mapped on another ITS, we increase its refcount
39 * and return a pointer to the existing structure.
40 * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq.
41 * This function returns a pointer to the _unlocked_ structure.
42 */
43 static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid)
44 {
45 struct vgic_dist *dist = &kvm->arch.vgic;
46 struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
47
48 /* In this case there is no put, since we keep the reference. */
49 if (irq)
50 return irq;
51
52 irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL);
53 if (!irq)
54 return ERR_PTR(-ENOMEM);
55
56 INIT_LIST_HEAD(&irq->lpi_list);
57 INIT_LIST_HEAD(&irq->ap_list);
58 spin_lock_init(&irq->irq_lock);
59
60 irq->config = VGIC_CONFIG_EDGE;
61 kref_init(&irq->refcount);
62 irq->intid = intid;
63
64 spin_lock(&dist->lpi_list_lock);
65
66 /*
67 * There could be a race with another vgic_add_lpi(), so we need to
68 * check that we don't add a second list entry with the same LPI.
69 */
70 list_for_each_entry(oldirq, &dist->lpi_list_head, lpi_list) {
71 if (oldirq->intid != intid)
72 continue;
73
74 /* Someone was faster with adding this LPI, lets use that. */
75 kfree(irq);
76 irq = oldirq;
77
78 /*
79 * This increases the refcount, the caller is expected to
80 * call vgic_put_irq() on the returned pointer once it's
81 * finished with the IRQ.
82 */
83 vgic_get_irq_kref(irq);
84
85 goto out_unlock;
86 }
87
88 list_add_tail(&irq->lpi_list, &dist->lpi_list_head);
89 dist->lpi_list_count++;
90
91 out_unlock:
92 spin_unlock(&dist->lpi_list_lock);
93
94 return irq;
95 }
96
97 struct its_device {
98 struct list_head dev_list;
99
100 /* the head for the list of ITTEs */
101 struct list_head itt_head;
102 u32 device_id;
103 };
104
105 #define COLLECTION_NOT_MAPPED ((u32)~0)
106
107 struct its_collection {
108 struct list_head coll_list;
109
110 u32 collection_id;
111 u32 target_addr;
112 };
113
114 #define its_is_collection_mapped(coll) ((coll) && \
115 ((coll)->target_addr != COLLECTION_NOT_MAPPED))
116
117 struct its_itte {
118 struct list_head itte_list;
119
120 struct vgic_irq *irq;
121 struct its_collection *collection;
122 u32 lpi;
123 u32 event_id;
124 };
125
126 /*
127 * Find and returns a device in the device table for an ITS.
128 * Must be called with the its_lock mutex held.
129 */
130 static struct its_device *find_its_device(struct vgic_its *its, u32 device_id)
131 {
132 struct its_device *device;
133
134 list_for_each_entry(device, &its->device_list, dev_list)
135 if (device_id == device->device_id)
136 return device;
137
138 return NULL;
139 }
140
141 /*
142 * Find and returns an interrupt translation table entry (ITTE) for a given
143 * Device ID/Event ID pair on an ITS.
144 * Must be called with the its_lock mutex held.
145 */
146 static struct its_itte *find_itte(struct vgic_its *its, u32 device_id,
147 u32 event_id)
148 {
149 struct its_device *device;
150 struct its_itte *itte;
151
152 device = find_its_device(its, device_id);
153 if (device == NULL)
154 return NULL;
155
156 list_for_each_entry(itte, &device->itt_head, itte_list)
157 if (itte->event_id == event_id)
158 return itte;
159
160 return NULL;
161 }
162
163 /* To be used as an iterator this macro misses the enclosing parentheses */
164 #define for_each_lpi_its(dev, itte, its) \
165 list_for_each_entry(dev, &(its)->device_list, dev_list) \
166 list_for_each_entry(itte, &(dev)->itt_head, itte_list)
167
168 /*
169 * We only implement 48 bits of PA at the moment, although the ITS
170 * supports more. Let's be restrictive here.
171 */
172 #define BASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16))
173 #define CBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12))
174 #define PENDBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16))
175 #define PROPBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12))
176
177 #define GIC_LPI_OFFSET 8192
178
179 /*
180 * Finds and returns a collection in the ITS collection table.
181 * Must be called with the its_lock mutex held.
182 */
183 static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
184 {
185 struct its_collection *collection;
186
187 list_for_each_entry(collection, &its->collection_list, coll_list) {
188 if (coll_id == collection->collection_id)
189 return collection;
190 }
191
192 return NULL;
193 }
194
195 #define LPI_PROP_ENABLE_BIT(p) ((p) & LPI_PROP_ENABLED)
196 #define LPI_PROP_PRIORITY(p) ((p) & 0xfc)
197
198 /*
199 * Reads the configuration data for a given LPI from guest memory and
200 * updates the fields in struct vgic_irq.
201 * If filter_vcpu is not NULL, applies only if the IRQ is targeting this
202 * VCPU. Unconditionally applies if filter_vcpu is NULL.
203 */
204 static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
205 struct kvm_vcpu *filter_vcpu)
206 {
207 u64 propbase = PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
208 u8 prop;
209 int ret;
210
211 ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
212 &prop, 1);
213
214 if (ret)
215 return ret;
216
217 spin_lock(&irq->irq_lock);
218
219 if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
220 irq->priority = LPI_PROP_PRIORITY(prop);
221 irq->enabled = LPI_PROP_ENABLE_BIT(prop);
222
223 vgic_queue_irq_unlock(kvm, irq);
224 } else {
225 spin_unlock(&irq->irq_lock);
226 }
227
228 return 0;
229 }
230
231 /*
232 * Create a snapshot of the current LPI list, so that we can enumerate all
233 * LPIs without holding any lock.
234 * Returns the array length and puts the kmalloc'ed array into intid_ptr.
235 */
236 static int vgic_copy_lpi_list(struct kvm *kvm, u32 **intid_ptr)
237 {
238 struct vgic_dist *dist = &kvm->arch.vgic;
239 struct vgic_irq *irq;
240 u32 *intids;
241 int irq_count = dist->lpi_list_count, i = 0;
242
243 /*
244 * We use the current value of the list length, which may change
245 * after the kmalloc. We don't care, because the guest shouldn't
246 * change anything while the command handling is still running,
247 * and in the worst case we would miss a new IRQ, which one wouldn't
248 * expect to be covered by this command anyway.
249 */
250 intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL);
251 if (!intids)
252 return -ENOMEM;
253
254 spin_lock(&dist->lpi_list_lock);
255 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
256 /* We don't need to "get" the IRQ, as we hold the list lock. */
257 intids[i] = irq->intid;
258 if (++i == irq_count)
259 break;
260 }
261 spin_unlock(&dist->lpi_list_lock);
262
263 *intid_ptr = intids;
264 return irq_count;
265 }
266
267 /*
268 * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
269 * is targeting) to the VGIC's view, which deals with target VCPUs.
270 * Needs to be called whenever either the collection for a LPIs has
271 * changed or the collection itself got retargeted.
272 */
273 static void update_affinity_itte(struct kvm *kvm, struct its_itte *itte)
274 {
275 struct kvm_vcpu *vcpu;
276
277 if (!its_is_collection_mapped(itte->collection))
278 return;
279
280 vcpu = kvm_get_vcpu(kvm, itte->collection->target_addr);
281
282 spin_lock(&itte->irq->irq_lock);
283 itte->irq->target_vcpu = vcpu;
284 spin_unlock(&itte->irq->irq_lock);
285 }
286
287 /*
288 * Updates the target VCPU for every LPI targeting this collection.
289 * Must be called with the its_lock mutex held.
290 */
291 static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its,
292 struct its_collection *coll)
293 {
294 struct its_device *device;
295 struct its_itte *itte;
296
297 for_each_lpi_its(device, itte, its) {
298 if (!itte->collection || coll != itte->collection)
299 continue;
300
301 update_affinity_itte(kvm, itte);
302 }
303 }
304
305 static u32 max_lpis_propbaser(u64 propbaser)
306 {
307 int nr_idbits = (propbaser & 0x1f) + 1;
308
309 return 1U << min(nr_idbits, INTERRUPT_ID_BITS_ITS);
310 }
311
312 /*
313 * Scan the whole LPI pending table and sync the pending bit in there
314 * with our own data structures. This relies on the LPI being
315 * mapped before.
316 */
317 static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
318 {
319 gpa_t pendbase = PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
320 struct vgic_irq *irq;
321 int last_byte_offset = -1;
322 int ret = 0;
323 u32 *intids;
324 int nr_irqs, i;
325
326 nr_irqs = vgic_copy_lpi_list(vcpu->kvm, &intids);
327 if (nr_irqs < 0)
328 return nr_irqs;
329
330 for (i = 0; i < nr_irqs; i++) {
331 int byte_offset, bit_nr;
332 u8 pendmask;
333
334 byte_offset = intids[i] / BITS_PER_BYTE;
335 bit_nr = intids[i] % BITS_PER_BYTE;
336
337 /*
338 * For contiguously allocated LPIs chances are we just read
339 * this very same byte in the last iteration. Reuse that.
340 */
341 if (byte_offset != last_byte_offset) {
342 ret = kvm_read_guest(vcpu->kvm, pendbase + byte_offset,
343 &pendmask, 1);
344 if (ret) {
345 kfree(intids);
346 return ret;
347 }
348 last_byte_offset = byte_offset;
349 }
350
351 irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
352 spin_lock(&irq->irq_lock);
353 irq->pending = pendmask & (1U << bit_nr);
354 vgic_queue_irq_unlock(vcpu->kvm, irq);
355 vgic_put_irq(vcpu->kvm, irq);
356 }
357
358 kfree(intids);
359
360 return ret;
361 }
362
363 static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
364 struct vgic_its *its,
365 gpa_t addr, unsigned int len)
366 {
367 u32 reg = 0;
368
369 mutex_lock(&its->cmd_lock);
370 if (its->creadr == its->cwriter)
371 reg |= GITS_CTLR_QUIESCENT;
372 if (its->enabled)
373 reg |= GITS_CTLR_ENABLE;
374 mutex_unlock(&its->cmd_lock);
375
376 return reg;
377 }
378
379 static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
380 gpa_t addr, unsigned int len,
381 unsigned long val)
382 {
383 its->enabled = !!(val & GITS_CTLR_ENABLE);
384 }
385
386 static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
387 struct vgic_its *its,
388 gpa_t addr, unsigned int len)
389 {
390 u64 reg = GITS_TYPER_PLPIS;
391
392 /*
393 * We use linear CPU numbers for redistributor addressing,
394 * so GITS_TYPER.PTA is 0.
395 * Also we force all PROPBASER registers to be the same, so
396 * CommonLPIAff is 0 as well.
397 * To avoid memory waste in the guest, we keep the number of IDBits and
398 * DevBits low - as least for the time being.
399 */
400 reg |= 0x0f << GITS_TYPER_DEVBITS_SHIFT;
401 reg |= 0x0f << GITS_TYPER_IDBITS_SHIFT;
402
403 return extract_bytes(reg, addr & 7, len);
404 }
405
406 static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm,
407 struct vgic_its *its,
408 gpa_t addr, unsigned int len)
409 {
410 return (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
411 }
412
413 static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
414 struct vgic_its *its,
415 gpa_t addr, unsigned int len)
416 {
417 switch (addr & 0xffff) {
418 case GITS_PIDR0:
419 return 0x92; /* part number, bits[7:0] */
420 case GITS_PIDR1:
421 return 0xb4; /* part number, bits[11:8] */
422 case GITS_PIDR2:
423 return GIC_PIDR2_ARCH_GICv3 | 0x0b;
424 case GITS_PIDR4:
425 return 0x40; /* This is a 64K software visible page */
426 /* The following are the ID registers for (any) GIC. */
427 case GITS_CIDR0:
428 return 0x0d;
429 case GITS_CIDR1:
430 return 0xf0;
431 case GITS_CIDR2:
432 return 0x05;
433 case GITS_CIDR3:
434 return 0xb1;
435 }
436
437 return 0;
438 }
439
440 /*
441 * Find the target VCPU and the LPI number for a given devid/eventid pair
442 * and make this IRQ pending, possibly injecting it.
443 * Must be called with the its_lock mutex held.
444 * Returns 0 on success, a positive error value for any ITS mapping
445 * related errors and negative error values for generic errors.
446 */
447 static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
448 u32 devid, u32 eventid)
449 {
450 struct kvm_vcpu *vcpu;
451 struct its_itte *itte;
452
453 if (!its->enabled)
454 return -EBUSY;
455
456 itte = find_itte(its, devid, eventid);
457 if (!itte || !its_is_collection_mapped(itte->collection))
458 return E_ITS_INT_UNMAPPED_INTERRUPT;
459
460 vcpu = kvm_get_vcpu(kvm, itte->collection->target_addr);
461 if (!vcpu)
462 return E_ITS_INT_UNMAPPED_INTERRUPT;
463
464 if (!vcpu->arch.vgic_cpu.lpis_enabled)
465 return -EBUSY;
466
467 spin_lock(&itte->irq->irq_lock);
468 itte->irq->pending = true;
469 vgic_queue_irq_unlock(kvm, itte->irq);
470
471 return 0;
472 }
473
474 static struct vgic_io_device *vgic_get_its_iodev(struct kvm_io_device *dev)
475 {
476 struct vgic_io_device *iodev;
477
478 if (dev->ops != &kvm_io_gic_ops)
479 return NULL;
480
481 iodev = container_of(dev, struct vgic_io_device, dev);
482
483 if (iodev->iodev_type != IODEV_ITS)
484 return NULL;
485
486 return iodev;
487 }
488
489 /*
490 * Queries the KVM IO bus framework to get the ITS pointer from the given
491 * doorbell address.
492 * We then call vgic_its_trigger_msi() with the decoded data.
493 * According to the KVM_SIGNAL_MSI API description returns 1 on success.
494 */
495 int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
496 {
497 u64 address;
498 struct kvm_io_device *kvm_io_dev;
499 struct vgic_io_device *iodev;
500 int ret;
501
502 if (!vgic_has_its(kvm))
503 return -ENODEV;
504
505 if (!(msi->flags & KVM_MSI_VALID_DEVID))
506 return -EINVAL;
507
508 address = (u64)msi->address_hi << 32 | msi->address_lo;
509
510 kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
511 if (!kvm_io_dev)
512 return -EINVAL;
513
514 iodev = vgic_get_its_iodev(kvm_io_dev);
515 if (!iodev)
516 return -EINVAL;
517
518 mutex_lock(&iodev->its->its_lock);
519 ret = vgic_its_trigger_msi(kvm, iodev->its, msi->devid, msi->data);
520 mutex_unlock(&iodev->its->its_lock);
521
522 if (ret < 0)
523 return ret;
524
525 /*
526 * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
527 * if the guest has blocked the MSI. So we map any LPI mapping
528 * related error to that.
529 */
530 if (ret)
531 return 0;
532 else
533 return 1;
534 }
535
536 /* Requires the its_lock to be held. */
537 static void its_free_itte(struct kvm *kvm, struct its_itte *itte)
538 {
539 list_del(&itte->itte_list);
540
541 /* This put matches the get in vgic_add_lpi. */
542 if (itte->irq)
543 vgic_put_irq(kvm, itte->irq);
544
545 kfree(itte);
546 }
547
548 static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size)
549 {
550 return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT_ULL(size) - 1);
551 }
552
553 #define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8)
554 #define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32)
555 #define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32)
556 #define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32)
557 #define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16)
558 #define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32)
559 #define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1)
560
561 /*
562 * The DISCARD command frees an Interrupt Translation Table Entry (ITTE).
563 * Must be called with the its_lock mutex held.
564 */
565 static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
566 u64 *its_cmd)
567 {
568 u32 device_id = its_cmd_get_deviceid(its_cmd);
569 u32 event_id = its_cmd_get_id(its_cmd);
570 struct its_itte *itte;
571
572
573 itte = find_itte(its, device_id, event_id);
574 if (itte && itte->collection) {
575 /*
576 * Though the spec talks about removing the pending state, we
577 * don't bother here since we clear the ITTE anyway and the
578 * pending state is a property of the ITTE struct.
579 */
580 its_free_itte(kvm, itte);
581 return 0;
582 }
583
584 return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
585 }
586
587 /*
588 * The MOVI command moves an ITTE to a different collection.
589 * Must be called with the its_lock mutex held.
590 */
591 static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
592 u64 *its_cmd)
593 {
594 u32 device_id = its_cmd_get_deviceid(its_cmd);
595 u32 event_id = its_cmd_get_id(its_cmd);
596 u32 coll_id = its_cmd_get_collection(its_cmd);
597 struct kvm_vcpu *vcpu;
598 struct its_itte *itte;
599 struct its_collection *collection;
600
601 itte = find_itte(its, device_id, event_id);
602 if (!itte)
603 return E_ITS_MOVI_UNMAPPED_INTERRUPT;
604
605 if (!its_is_collection_mapped(itte->collection))
606 return E_ITS_MOVI_UNMAPPED_COLLECTION;
607
608 collection = find_collection(its, coll_id);
609 if (!its_is_collection_mapped(collection))
610 return E_ITS_MOVI_UNMAPPED_COLLECTION;
611
612 itte->collection = collection;
613 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
614
615 spin_lock(&itte->irq->irq_lock);
616 itte->irq->target_vcpu = vcpu;
617 spin_unlock(&itte->irq->irq_lock);
618
619 return 0;
620 }
621
622 /*
623 * Check whether an ID can be stored into the corresponding guest table.
624 * For a direct table this is pretty easy, but gets a bit nasty for
625 * indirect tables. We check whether the resulting guest physical address
626 * is actually valid (covered by a memslot and guest accessbible).
627 * For this we have to read the respective first level entry.
628 */
629 static bool vgic_its_check_id(struct vgic_its *its, u64 baser, int id)
630 {
631 int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
632 int index;
633 u64 indirect_ptr;
634 gfn_t gfn;
635
636 if (!(baser & GITS_BASER_INDIRECT)) {
637 phys_addr_t addr;
638
639 if (id >= (l1_tbl_size / GITS_BASER_ENTRY_SIZE(baser)))
640 return false;
641
642 addr = BASER_ADDRESS(baser) + id * GITS_BASER_ENTRY_SIZE(baser);
643 gfn = addr >> PAGE_SHIFT;
644
645 return kvm_is_visible_gfn(its->dev->kvm, gfn);
646 }
647
648 /* calculate and check the index into the 1st level */
649 index = id / (SZ_64K / GITS_BASER_ENTRY_SIZE(baser));
650 if (index >= (l1_tbl_size / sizeof(u64)))
651 return false;
652
653 /* Each 1st level entry is represented by a 64-bit value. */
654 if (kvm_read_guest(its->dev->kvm,
655 BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
656 &indirect_ptr, sizeof(indirect_ptr)))
657 return false;
658
659 indirect_ptr = le64_to_cpu(indirect_ptr);
660
661 /* check the valid bit of the first level entry */
662 if (!(indirect_ptr & BIT_ULL(63)))
663 return false;
664
665 /*
666 * Mask the guest physical address and calculate the frame number.
667 * Any address beyond our supported 48 bits of PA will be caught
668 * by the actual check in the final step.
669 */
670 indirect_ptr &= GENMASK_ULL(51, 16);
671
672 /* Find the address of the actual entry */
673 index = id % (SZ_64K / GITS_BASER_ENTRY_SIZE(baser));
674 indirect_ptr += index * GITS_BASER_ENTRY_SIZE(baser);
675 gfn = indirect_ptr >> PAGE_SHIFT;
676
677 return kvm_is_visible_gfn(its->dev->kvm, gfn);
678 }
679
680 static int vgic_its_alloc_collection(struct vgic_its *its,
681 struct its_collection **colp,
682 u32 coll_id)
683 {
684 struct its_collection *collection;
685
686 if (!vgic_its_check_id(its, its->baser_coll_table, coll_id))
687 return E_ITS_MAPC_COLLECTION_OOR;
688
689 collection = kzalloc(sizeof(*collection), GFP_KERNEL);
690
691 collection->collection_id = coll_id;
692 collection->target_addr = COLLECTION_NOT_MAPPED;
693
694 list_add_tail(&collection->coll_list, &its->collection_list);
695 *colp = collection;
696
697 return 0;
698 }
699
700 static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id)
701 {
702 struct its_collection *collection;
703 struct its_device *device;
704 struct its_itte *itte;
705
706 /*
707 * Clearing the mapping for that collection ID removes the
708 * entry from the list. If there wasn't any before, we can
709 * go home early.
710 */
711 collection = find_collection(its, coll_id);
712 if (!collection)
713 return;
714
715 for_each_lpi_its(device, itte, its)
716 if (itte->collection &&
717 itte->collection->collection_id == coll_id)
718 itte->collection = NULL;
719
720 list_del(&collection->coll_list);
721 kfree(collection);
722 }
723
724 /*
725 * The MAPTI and MAPI commands map LPIs to ITTEs.
726 * Must be called with its_lock mutex held.
727 */
728 static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
729 u64 *its_cmd)
730 {
731 u32 device_id = its_cmd_get_deviceid(its_cmd);
732 u32 event_id = its_cmd_get_id(its_cmd);
733 u32 coll_id = its_cmd_get_collection(its_cmd);
734 struct its_itte *itte;
735 struct its_device *device;
736 struct its_collection *collection, *new_coll = NULL;
737 int lpi_nr;
738 struct vgic_irq *irq;
739
740 device = find_its_device(its, device_id);
741 if (!device)
742 return E_ITS_MAPTI_UNMAPPED_DEVICE;
743
744 if (its_cmd_get_command(its_cmd) == GITS_CMD_MAPTI)
745 lpi_nr = its_cmd_get_physical_id(its_cmd);
746 else
747 lpi_nr = event_id;
748 if (lpi_nr < GIC_LPI_OFFSET ||
749 lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
750 return E_ITS_MAPTI_PHYSICALID_OOR;
751
752 /* If there is an existing mapping, behavior is UNPREDICTABLE. */
753 if (find_itte(its, device_id, event_id))
754 return 0;
755
756 collection = find_collection(its, coll_id);
757 if (!collection) {
758 int ret = vgic_its_alloc_collection(its, &collection, coll_id);
759 if (ret)
760 return ret;
761 new_coll = collection;
762 }
763
764 itte = kzalloc(sizeof(struct its_itte), GFP_KERNEL);
765 if (!itte) {
766 if (new_coll)
767 vgic_its_free_collection(its, coll_id);
768 return -ENOMEM;
769 }
770
771 itte->event_id = event_id;
772 list_add_tail(&itte->itte_list, &device->itt_head);
773
774 itte->collection = collection;
775 itte->lpi = lpi_nr;
776
777 irq = vgic_add_lpi(kvm, lpi_nr);
778 if (IS_ERR(irq)) {
779 if (new_coll)
780 vgic_its_free_collection(its, coll_id);
781 its_free_itte(kvm, itte);
782 return PTR_ERR(irq);
783 }
784 itte->irq = irq;
785
786 update_affinity_itte(kvm, itte);
787
788 /*
789 * We "cache" the configuration table entries in out struct vgic_irq's.
790 * However we only have those structs for mapped IRQs, so we read in
791 * the respective config data from memory here upon mapping the LPI.
792 */
793 update_lpi_config(kvm, itte->irq, NULL);
794
795 return 0;
796 }
797
798 /* Requires the its_lock to be held. */
799 static void vgic_its_unmap_device(struct kvm *kvm, struct its_device *device)
800 {
801 struct its_itte *itte, *temp;
802
803 /*
804 * The spec says that unmapping a device with still valid
805 * ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
806 * since we cannot leave the memory unreferenced.
807 */
808 list_for_each_entry_safe(itte, temp, &device->itt_head, itte_list)
809 its_free_itte(kvm, itte);
810
811 list_del(&device->dev_list);
812 kfree(device);
813 }
814
815 /*
816 * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
817 * Must be called with the its_lock mutex held.
818 */
819 static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
820 u64 *its_cmd)
821 {
822 u32 device_id = its_cmd_get_deviceid(its_cmd);
823 bool valid = its_cmd_get_validbit(its_cmd);
824 struct its_device *device;
825
826 if (!vgic_its_check_id(its, its->baser_device_table, device_id))
827 return E_ITS_MAPD_DEVICE_OOR;
828
829 device = find_its_device(its, device_id);
830
831 /*
832 * The spec says that calling MAPD on an already mapped device
833 * invalidates all cached data for this device. We implement this
834 * by removing the mapping and re-establishing it.
835 */
836 if (device)
837 vgic_its_unmap_device(kvm, device);
838
839 /*
840 * The spec does not say whether unmapping a not-mapped device
841 * is an error, so we are done in any case.
842 */
843 if (!valid)
844 return 0;
845
846 device = kzalloc(sizeof(struct its_device), GFP_KERNEL);
847 if (!device)
848 return -ENOMEM;
849
850 device->device_id = device_id;
851 INIT_LIST_HEAD(&device->itt_head);
852
853 list_add_tail(&device->dev_list, &its->device_list);
854
855 return 0;
856 }
857
858 /*
859 * The MAPC command maps collection IDs to redistributors.
860 * Must be called with the its_lock mutex held.
861 */
862 static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
863 u64 *its_cmd)
864 {
865 u16 coll_id;
866 u32 target_addr;
867 struct its_collection *collection;
868 bool valid;
869
870 valid = its_cmd_get_validbit(its_cmd);
871 coll_id = its_cmd_get_collection(its_cmd);
872 target_addr = its_cmd_get_target_addr(its_cmd);
873
874 if (target_addr >= atomic_read(&kvm->online_vcpus))
875 return E_ITS_MAPC_PROCNUM_OOR;
876
877 if (!valid) {
878 vgic_its_free_collection(its, coll_id);
879 } else {
880 collection = find_collection(its, coll_id);
881
882 if (!collection) {
883 int ret;
884
885 ret = vgic_its_alloc_collection(its, &collection,
886 coll_id);
887 if (ret)
888 return ret;
889 collection->target_addr = target_addr;
890 } else {
891 collection->target_addr = target_addr;
892 update_affinity_collection(kvm, its, collection);
893 }
894 }
895
896 return 0;
897 }
898
899 /*
900 * The CLEAR command removes the pending state for a particular LPI.
901 * Must be called with the its_lock mutex held.
902 */
903 static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
904 u64 *its_cmd)
905 {
906 u32 device_id = its_cmd_get_deviceid(its_cmd);
907 u32 event_id = its_cmd_get_id(its_cmd);
908 struct its_itte *itte;
909
910
911 itte = find_itte(its, device_id, event_id);
912 if (!itte)
913 return E_ITS_CLEAR_UNMAPPED_INTERRUPT;
914
915 itte->irq->pending = false;
916
917 return 0;
918 }
919
920 /*
921 * The INV command syncs the configuration bits from the memory table.
922 * Must be called with the its_lock mutex held.
923 */
924 static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
925 u64 *its_cmd)
926 {
927 u32 device_id = its_cmd_get_deviceid(its_cmd);
928 u32 event_id = its_cmd_get_id(its_cmd);
929 struct its_itte *itte;
930
931
932 itte = find_itte(its, device_id, event_id);
933 if (!itte)
934 return E_ITS_INV_UNMAPPED_INTERRUPT;
935
936 return update_lpi_config(kvm, itte->irq, NULL);
937 }
938
939 /*
940 * The INVALL command requests flushing of all IRQ data in this collection.
941 * Find the VCPU mapped to that collection, then iterate over the VM's list
942 * of mapped LPIs and update the configuration for each IRQ which targets
943 * the specified vcpu. The configuration will be read from the in-memory
944 * configuration table.
945 * Must be called with the its_lock mutex held.
946 */
947 static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
948 u64 *its_cmd)
949 {
950 u32 coll_id = its_cmd_get_collection(its_cmd);
951 struct its_collection *collection;
952 struct kvm_vcpu *vcpu;
953 struct vgic_irq *irq;
954 u32 *intids;
955 int irq_count, i;
956
957 collection = find_collection(its, coll_id);
958 if (!its_is_collection_mapped(collection))
959 return E_ITS_INVALL_UNMAPPED_COLLECTION;
960
961 vcpu = kvm_get_vcpu(kvm, collection->target_addr);
962
963 irq_count = vgic_copy_lpi_list(kvm, &intids);
964 if (irq_count < 0)
965 return irq_count;
966
967 for (i = 0; i < irq_count; i++) {
968 irq = vgic_get_irq(kvm, NULL, intids[i]);
969 if (!irq)
970 continue;
971 update_lpi_config(kvm, irq, vcpu);
972 vgic_put_irq(kvm, irq);
973 }
974
975 kfree(intids);
976
977 return 0;
978 }
979
980 /*
981 * The MOVALL command moves the pending state of all IRQs targeting one
982 * redistributor to another. We don't hold the pending state in the VCPUs,
983 * but in the IRQs instead, so there is really not much to do for us here.
984 * However the spec says that no IRQ must target the old redistributor
985 * afterwards, so we make sure that no LPI is using the associated target_vcpu.
986 * This command affects all LPIs in the system that target that redistributor.
987 */
988 static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
989 u64 *its_cmd)
990 {
991 struct vgic_dist *dist = &kvm->arch.vgic;
992 u32 target1_addr = its_cmd_get_target_addr(its_cmd);
993 u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
994 struct kvm_vcpu *vcpu1, *vcpu2;
995 struct vgic_irq *irq;
996
997 if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
998 target2_addr >= atomic_read(&kvm->online_vcpus))
999 return E_ITS_MOVALL_PROCNUM_OOR;
1000
1001 if (target1_addr == target2_addr)
1002 return 0;
1003
1004 vcpu1 = kvm_get_vcpu(kvm, target1_addr);
1005 vcpu2 = kvm_get_vcpu(kvm, target2_addr);
1006
1007 spin_lock(&dist->lpi_list_lock);
1008
1009 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
1010 spin_lock(&irq->irq_lock);
1011
1012 if (irq->target_vcpu == vcpu1)
1013 irq->target_vcpu = vcpu2;
1014
1015 spin_unlock(&irq->irq_lock);
1016 }
1017
1018 spin_unlock(&dist->lpi_list_lock);
1019
1020 return 0;
1021 }
1022
1023 /*
1024 * The INT command injects the LPI associated with that DevID/EvID pair.
1025 * Must be called with the its_lock mutex held.
1026 */
1027 static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
1028 u64 *its_cmd)
1029 {
1030 u32 msi_data = its_cmd_get_id(its_cmd);
1031 u64 msi_devid = its_cmd_get_deviceid(its_cmd);
1032
1033 return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
1034 }
1035
1036 /*
1037 * This function is called with the its_cmd lock held, but the ITS data
1038 * structure lock dropped.
1039 */
1040 static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its,
1041 u64 *its_cmd)
1042 {
1043 int ret = -ENODEV;
1044
1045 mutex_lock(&its->its_lock);
1046 switch (its_cmd_get_command(its_cmd)) {
1047 case GITS_CMD_MAPD:
1048 ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd);
1049 break;
1050 case GITS_CMD_MAPC:
1051 ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd);
1052 break;
1053 case GITS_CMD_MAPI:
1054 ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1055 break;
1056 case GITS_CMD_MAPTI:
1057 ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1058 break;
1059 case GITS_CMD_MOVI:
1060 ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd);
1061 break;
1062 case GITS_CMD_DISCARD:
1063 ret = vgic_its_cmd_handle_discard(kvm, its, its_cmd);
1064 break;
1065 case GITS_CMD_CLEAR:
1066 ret = vgic_its_cmd_handle_clear(kvm, its, its_cmd);
1067 break;
1068 case GITS_CMD_MOVALL:
1069 ret = vgic_its_cmd_handle_movall(kvm, its, its_cmd);
1070 break;
1071 case GITS_CMD_INT:
1072 ret = vgic_its_cmd_handle_int(kvm, its, its_cmd);
1073 break;
1074 case GITS_CMD_INV:
1075 ret = vgic_its_cmd_handle_inv(kvm, its, its_cmd);
1076 break;
1077 case GITS_CMD_INVALL:
1078 ret = vgic_its_cmd_handle_invall(kvm, its, its_cmd);
1079 break;
1080 case GITS_CMD_SYNC:
1081 /* we ignore this command: we are in sync all of the time */
1082 ret = 0;
1083 break;
1084 }
1085 mutex_unlock(&its->its_lock);
1086
1087 return ret;
1088 }
1089
1090 static u64 vgic_sanitise_its_baser(u64 reg)
1091 {
1092 reg = vgic_sanitise_field(reg, GITS_BASER_SHAREABILITY_MASK,
1093 GITS_BASER_SHAREABILITY_SHIFT,
1094 vgic_sanitise_shareability);
1095 reg = vgic_sanitise_field(reg, GITS_BASER_INNER_CACHEABILITY_MASK,
1096 GITS_BASER_INNER_CACHEABILITY_SHIFT,
1097 vgic_sanitise_inner_cacheability);
1098 reg = vgic_sanitise_field(reg, GITS_BASER_OUTER_CACHEABILITY_MASK,
1099 GITS_BASER_OUTER_CACHEABILITY_SHIFT,
1100 vgic_sanitise_outer_cacheability);
1101
1102 /* Bits 15:12 contain bits 51:48 of the PA, which we don't support. */
1103 reg &= ~GENMASK_ULL(15, 12);
1104
1105 /* We support only one (ITS) page size: 64K */
1106 reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K;
1107
1108 return reg;
1109 }
1110
1111 static u64 vgic_sanitise_its_cbaser(u64 reg)
1112 {
1113 reg = vgic_sanitise_field(reg, GITS_CBASER_SHAREABILITY_MASK,
1114 GITS_CBASER_SHAREABILITY_SHIFT,
1115 vgic_sanitise_shareability);
1116 reg = vgic_sanitise_field(reg, GITS_CBASER_INNER_CACHEABILITY_MASK,
1117 GITS_CBASER_INNER_CACHEABILITY_SHIFT,
1118 vgic_sanitise_inner_cacheability);
1119 reg = vgic_sanitise_field(reg, GITS_CBASER_OUTER_CACHEABILITY_MASK,
1120 GITS_CBASER_OUTER_CACHEABILITY_SHIFT,
1121 vgic_sanitise_outer_cacheability);
1122
1123 /*
1124 * Sanitise the physical address to be 64k aligned.
1125 * Also limit the physical addresses to 48 bits.
1126 */
1127 reg &= ~(GENMASK_ULL(51, 48) | GENMASK_ULL(15, 12));
1128
1129 return reg;
1130 }
1131
1132 static unsigned long vgic_mmio_read_its_cbaser(struct kvm *kvm,
1133 struct vgic_its *its,
1134 gpa_t addr, unsigned int len)
1135 {
1136 return extract_bytes(its->cbaser, addr & 7, len);
1137 }
1138
1139 static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
1140 gpa_t addr, unsigned int len,
1141 unsigned long val)
1142 {
1143 /* When GITS_CTLR.Enable is 1, this register is RO. */
1144 if (its->enabled)
1145 return;
1146
1147 mutex_lock(&its->cmd_lock);
1148 its->cbaser = update_64bit_reg(its->cbaser, addr & 7, len, val);
1149 its->cbaser = vgic_sanitise_its_cbaser(its->cbaser);
1150 its->creadr = 0;
1151 /*
1152 * CWRITER is architecturally UNKNOWN on reset, but we need to reset
1153 * it to CREADR to make sure we start with an empty command buffer.
1154 */
1155 its->cwriter = its->creadr;
1156 mutex_unlock(&its->cmd_lock);
1157 }
1158
1159 #define ITS_CMD_BUFFER_SIZE(baser) ((((baser) & 0xff) + 1) << 12)
1160 #define ITS_CMD_SIZE 32
1161 #define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5))
1162
1163 /*
1164 * By writing to CWRITER the guest announces new commands to be processed.
1165 * To avoid any races in the first place, we take the its_cmd lock, which
1166 * protects our ring buffer variables, so that there is only one user
1167 * per ITS handling commands at a given time.
1168 */
1169 static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
1170 gpa_t addr, unsigned int len,
1171 unsigned long val)
1172 {
1173 gpa_t cbaser;
1174 u64 cmd_buf[4];
1175 u32 reg;
1176
1177 if (!its)
1178 return;
1179
1180 mutex_lock(&its->cmd_lock);
1181
1182 reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
1183 reg = ITS_CMD_OFFSET(reg);
1184 if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1185 mutex_unlock(&its->cmd_lock);
1186 return;
1187 }
1188
1189 its->cwriter = reg;
1190 cbaser = CBASER_ADDRESS(its->cbaser);
1191
1192 while (its->cwriter != its->creadr) {
1193 int ret = kvm_read_guest(kvm, cbaser + its->creadr,
1194 cmd_buf, ITS_CMD_SIZE);
1195 /*
1196 * If kvm_read_guest() fails, this could be due to the guest
1197 * programming a bogus value in CBASER or something else going
1198 * wrong from which we cannot easily recover.
1199 * According to section 6.3.2 in the GICv3 spec we can just
1200 * ignore that command then.
1201 */
1202 if (!ret)
1203 vgic_its_handle_command(kvm, its, cmd_buf);
1204
1205 its->creadr += ITS_CMD_SIZE;
1206 if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
1207 its->creadr = 0;
1208 }
1209
1210 mutex_unlock(&its->cmd_lock);
1211 }
1212
1213 static unsigned long vgic_mmio_read_its_cwriter(struct kvm *kvm,
1214 struct vgic_its *its,
1215 gpa_t addr, unsigned int len)
1216 {
1217 return extract_bytes(its->cwriter, addr & 0x7, len);
1218 }
1219
1220 static unsigned long vgic_mmio_read_its_creadr(struct kvm *kvm,
1221 struct vgic_its *its,
1222 gpa_t addr, unsigned int len)
1223 {
1224 return extract_bytes(its->creadr, addr & 0x7, len);
1225 }
1226
1227 #define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7)
1228 static unsigned long vgic_mmio_read_its_baser(struct kvm *kvm,
1229 struct vgic_its *its,
1230 gpa_t addr, unsigned int len)
1231 {
1232 u64 reg;
1233
1234 switch (BASER_INDEX(addr)) {
1235 case 0:
1236 reg = its->baser_device_table;
1237 break;
1238 case 1:
1239 reg = its->baser_coll_table;
1240 break;
1241 default:
1242 reg = 0;
1243 break;
1244 }
1245
1246 return extract_bytes(reg, addr & 7, len);
1247 }
1248
1249 #define GITS_BASER_RO_MASK (GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56))
1250 static void vgic_mmio_write_its_baser(struct kvm *kvm,
1251 struct vgic_its *its,
1252 gpa_t addr, unsigned int len,
1253 unsigned long val)
1254 {
1255 u64 entry_size, device_type;
1256 u64 reg, *regptr, clearbits = 0;
1257
1258 /* When GITS_CTLR.Enable is 1, we ignore write accesses. */
1259 if (its->enabled)
1260 return;
1261
1262 switch (BASER_INDEX(addr)) {
1263 case 0:
1264 regptr = &its->baser_device_table;
1265 entry_size = 8;
1266 device_type = GITS_BASER_TYPE_DEVICE;
1267 break;
1268 case 1:
1269 regptr = &its->baser_coll_table;
1270 entry_size = 8;
1271 device_type = GITS_BASER_TYPE_COLLECTION;
1272 clearbits = GITS_BASER_INDIRECT;
1273 break;
1274 default:
1275 return;
1276 }
1277
1278 reg = update_64bit_reg(*regptr, addr & 7, len, val);
1279 reg &= ~GITS_BASER_RO_MASK;
1280 reg &= ~clearbits;
1281
1282 reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT;
1283 reg |= device_type << GITS_BASER_TYPE_SHIFT;
1284 reg = vgic_sanitise_its_baser(reg);
1285
1286 *regptr = reg;
1287 }
1288
1289 #define REGISTER_ITS_DESC(off, rd, wr, length, acc) \
1290 { \
1291 .reg_offset = off, \
1292 .len = length, \
1293 .access_flags = acc, \
1294 .its_read = rd, \
1295 .its_write = wr, \
1296 }
1297
1298 static void its_mmio_write_wi(struct kvm *kvm, struct vgic_its *its,
1299 gpa_t addr, unsigned int len, unsigned long val)
1300 {
1301 /* Ignore */
1302 }
1303
1304 static struct vgic_register_region its_registers[] = {
1305 REGISTER_ITS_DESC(GITS_CTLR,
1306 vgic_mmio_read_its_ctlr, vgic_mmio_write_its_ctlr, 4,
1307 VGIC_ACCESS_32bit),
1308 REGISTER_ITS_DESC(GITS_IIDR,
1309 vgic_mmio_read_its_iidr, its_mmio_write_wi, 4,
1310 VGIC_ACCESS_32bit),
1311 REGISTER_ITS_DESC(GITS_TYPER,
1312 vgic_mmio_read_its_typer, its_mmio_write_wi, 8,
1313 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1314 REGISTER_ITS_DESC(GITS_CBASER,
1315 vgic_mmio_read_its_cbaser, vgic_mmio_write_its_cbaser, 8,
1316 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1317 REGISTER_ITS_DESC(GITS_CWRITER,
1318 vgic_mmio_read_its_cwriter, vgic_mmio_write_its_cwriter, 8,
1319 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1320 REGISTER_ITS_DESC(GITS_CREADR,
1321 vgic_mmio_read_its_creadr, its_mmio_write_wi, 8,
1322 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1323 REGISTER_ITS_DESC(GITS_BASER,
1324 vgic_mmio_read_its_baser, vgic_mmio_write_its_baser, 0x40,
1325 VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1326 REGISTER_ITS_DESC(GITS_IDREGS_BASE,
1327 vgic_mmio_read_its_idregs, its_mmio_write_wi, 0x30,
1328 VGIC_ACCESS_32bit),
1329 };
1330
1331 /* This is called on setting the LPI enable bit in the redistributor. */
1332 void vgic_enable_lpis(struct kvm_vcpu *vcpu)
1333 {
1334 if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ))
1335 its_sync_lpi_pending_table(vcpu);
1336 }
1337
1338 static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its)
1339 {
1340 struct vgic_io_device *iodev = &its->iodev;
1341 int ret;
1342
1343 if (!its->initialized)
1344 return -EBUSY;
1345
1346 if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base))
1347 return -ENXIO;
1348
1349 iodev->regions = its_registers;
1350 iodev->nr_regions = ARRAY_SIZE(its_registers);
1351 kvm_iodevice_init(&iodev->dev, &kvm_io_gic_ops);
1352
1353 iodev->base_addr = its->vgic_its_base;
1354 iodev->iodev_type = IODEV_ITS;
1355 iodev->its = its;
1356 mutex_lock(&kvm->slots_lock);
1357 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, iodev->base_addr,
1358 KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
1359 mutex_unlock(&kvm->slots_lock);
1360
1361 return ret;
1362 }
1363
1364 #define INITIAL_BASER_VALUE \
1365 (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \
1366 GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \
1367 GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) | \
1368 ((8ULL - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | \
1369 GITS_BASER_PAGE_SIZE_64K)
1370
1371 #define INITIAL_PROPBASER_VALUE \
1372 (GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) | \
1373 GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner) | \
1374 GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable))
1375
1376 static int vgic_its_create(struct kvm_device *dev, u32 type)
1377 {
1378 struct vgic_its *its;
1379
1380 if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
1381 return -ENODEV;
1382
1383 its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL);
1384 if (!its)
1385 return -ENOMEM;
1386
1387 mutex_init(&its->its_lock);
1388 mutex_init(&its->cmd_lock);
1389
1390 its->vgic_its_base = VGIC_ADDR_UNDEF;
1391
1392 INIT_LIST_HEAD(&its->device_list);
1393 INIT_LIST_HEAD(&its->collection_list);
1394
1395 dev->kvm->arch.vgic.has_its = true;
1396 its->initialized = false;
1397 its->enabled = false;
1398 its->dev = dev;
1399
1400 its->baser_device_table = INITIAL_BASER_VALUE |
1401 ((u64)GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT);
1402 its->baser_coll_table = INITIAL_BASER_VALUE |
1403 ((u64)GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT);
1404 dev->kvm->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE;
1405
1406 dev->private = its;
1407
1408 return 0;
1409 }
1410
1411 static void vgic_its_destroy(struct kvm_device *kvm_dev)
1412 {
1413 struct kvm *kvm = kvm_dev->kvm;
1414 struct vgic_its *its = kvm_dev->private;
1415 struct its_device *dev;
1416 struct its_itte *itte;
1417 struct list_head *dev_cur, *dev_temp;
1418 struct list_head *cur, *temp;
1419
1420 /*
1421 * We may end up here without the lists ever having been initialized.
1422 * Check this and bail out early to avoid dereferencing a NULL pointer.
1423 */
1424 if (!its->device_list.next)
1425 return;
1426
1427 mutex_lock(&its->its_lock);
1428 list_for_each_safe(dev_cur, dev_temp, &its->device_list) {
1429 dev = container_of(dev_cur, struct its_device, dev_list);
1430 list_for_each_safe(cur, temp, &dev->itt_head) {
1431 itte = (container_of(cur, struct its_itte, itte_list));
1432 its_free_itte(kvm, itte);
1433 }
1434 list_del(dev_cur);
1435 kfree(dev);
1436 }
1437
1438 list_for_each_safe(cur, temp, &its->collection_list) {
1439 list_del(cur);
1440 kfree(container_of(cur, struct its_collection, coll_list));
1441 }
1442 mutex_unlock(&its->its_lock);
1443
1444 kfree(its);
1445 }
1446
1447 static int vgic_its_has_attr(struct kvm_device *dev,
1448 struct kvm_device_attr *attr)
1449 {
1450 switch (attr->group) {
1451 case KVM_DEV_ARM_VGIC_GRP_ADDR:
1452 switch (attr->attr) {
1453 case KVM_VGIC_ITS_ADDR_TYPE:
1454 return 0;
1455 }
1456 break;
1457 case KVM_DEV_ARM_VGIC_GRP_CTRL:
1458 switch (attr->attr) {
1459 case KVM_DEV_ARM_VGIC_CTRL_INIT:
1460 return 0;
1461 }
1462 break;
1463 }
1464 return -ENXIO;
1465 }
1466
1467 static int vgic_its_set_attr(struct kvm_device *dev,
1468 struct kvm_device_attr *attr)
1469 {
1470 struct vgic_its *its = dev->private;
1471 int ret;
1472
1473 switch (attr->group) {
1474 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
1475 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
1476 unsigned long type = (unsigned long)attr->attr;
1477 u64 addr;
1478
1479 if (type != KVM_VGIC_ITS_ADDR_TYPE)
1480 return -ENODEV;
1481
1482 if (copy_from_user(&addr, uaddr, sizeof(addr)))
1483 return -EFAULT;
1484
1485 ret = vgic_check_ioaddr(dev->kvm, &its->vgic_its_base,
1486 addr, SZ_64K);
1487 if (ret)
1488 return ret;
1489
1490 its->vgic_its_base = addr;
1491
1492 return 0;
1493 }
1494 case KVM_DEV_ARM_VGIC_GRP_CTRL:
1495 switch (attr->attr) {
1496 case KVM_DEV_ARM_VGIC_CTRL_INIT:
1497 its->initialized = true;
1498
1499 return 0;
1500 }
1501 break;
1502 }
1503 return -ENXIO;
1504 }
1505
1506 static int vgic_its_get_attr(struct kvm_device *dev,
1507 struct kvm_device_attr *attr)
1508 {
1509 switch (attr->group) {
1510 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
1511 struct vgic_its *its = dev->private;
1512 u64 addr = its->vgic_its_base;
1513 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
1514 unsigned long type = (unsigned long)attr->attr;
1515
1516 if (type != KVM_VGIC_ITS_ADDR_TYPE)
1517 return -ENODEV;
1518
1519 if (copy_to_user(uaddr, &addr, sizeof(addr)))
1520 return -EFAULT;
1521 break;
1522 default:
1523 return -ENXIO;
1524 }
1525 }
1526
1527 return 0;
1528 }
1529
1530 static struct kvm_device_ops kvm_arm_vgic_its_ops = {
1531 .name = "kvm-arm-vgic-its",
1532 .create = vgic_its_create,
1533 .destroy = vgic_its_destroy,
1534 .set_attr = vgic_its_set_attr,
1535 .get_attr = vgic_its_get_attr,
1536 .has_attr = vgic_its_has_attr,
1537 };
1538
1539 int kvm_vgic_register_its_device(void)
1540 {
1541 return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
1542 KVM_DEV_TYPE_ARM_VGIC_ITS);
1543 }
1544
1545 /*
1546 * Registers all ITSes with the kvm_io_bus framework.
1547 * To follow the existing VGIC initialization sequence, this has to be
1548 * done as late as possible, just before the first VCPU runs.
1549 */
1550 int vgic_register_its_iodevs(struct kvm *kvm)
1551 {
1552 struct kvm_device *dev;
1553 int ret = 0;
1554
1555 list_for_each_entry(dev, &kvm->devices, vm_node) {
1556 if (dev->ops != &kvm_arm_vgic_its_ops)
1557 continue;
1558
1559 ret = vgic_register_its_iodev(kvm, dev->private);
1560 if (ret)
1561 return ret;
1562 /*
1563 * We don't need to care about tearing down previously
1564 * registered ITSes, as the kvm_io_bus framework removes
1565 * them for us if the VM gets destroyed.
1566 */
1567 }
1568
1569 return ret;
1570 }
This page took 0.080638 seconds and 5 git commands to generate.