Merge remote-tracking branches 'spi/fix/lock', 'spi/fix/maintainers', 'spi/fix/put...
[deliverable/linux.git] / drivers / vfio / pci / vfio_pci.c
1 /*
2 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
3 * Author: Alex Williamson <alex.williamson@redhat.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Derived from original vfio:
10 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
11 * Author: Tom Lyon, pugs@cisco.com
12 */
13
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16 #include <linux/device.h>
17 #include <linux/eventfd.h>
18 #include <linux/file.h>
19 #include <linux/interrupt.h>
20 #include <linux/iommu.h>
21 #include <linux/module.h>
22 #include <linux/mutex.h>
23 #include <linux/notifier.h>
24 #include <linux/pci.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
28 #include <linux/uaccess.h>
29 #include <linux/vfio.h>
30 #include <linux/vgaarb.h>
31
32 #include "vfio_pci_private.h"
33
34 #define DRIVER_VERSION "0.2"
35 #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
36 #define DRIVER_DESC "VFIO PCI - User Level meta-driver"
37
38 static char ids[1024] __initdata;
39 module_param_string(ids, ids, sizeof(ids), 0);
40 MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the vfio driver, format is \"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\" and multiple comma separated entries can be specified");
41
42 static bool nointxmask;
43 module_param_named(nointxmask, nointxmask, bool, S_IRUGO | S_IWUSR);
44 MODULE_PARM_DESC(nointxmask,
45 "Disable support for PCI 2.3 style INTx masking. If this resolves problems for specific devices, report lspci -vvvxxx to linux-pci@vger.kernel.org so the device can be fixed automatically via the broken_intx_masking flag.");
46
47 #ifdef CONFIG_VFIO_PCI_VGA
48 static bool disable_vga;
49 module_param(disable_vga, bool, S_IRUGO);
50 MODULE_PARM_DESC(disable_vga, "Disable VGA resource access through vfio-pci");
51 #endif
52
53 static bool disable_idle_d3;
54 module_param(disable_idle_d3, bool, S_IRUGO | S_IWUSR);
55 MODULE_PARM_DESC(disable_idle_d3,
56 "Disable using the PCI D3 low power state for idle, unused devices");
57
58 static DEFINE_MUTEX(driver_lock);
59
60 static inline bool vfio_vga_disabled(void)
61 {
62 #ifdef CONFIG_VFIO_PCI_VGA
63 return disable_vga;
64 #else
65 return true;
66 #endif
67 }
68
69 /*
70 * Our VGA arbiter participation is limited since we don't know anything
71 * about the device itself. However, if the device is the only VGA device
72 * downstream of a bridge and VFIO VGA support is disabled, then we can
73 * safely return legacy VGA IO and memory as not decoded since the user
74 * has no way to get to it and routing can be disabled externally at the
75 * bridge.
76 */
77 static unsigned int vfio_pci_set_vga_decode(void *opaque, bool single_vga)
78 {
79 struct vfio_pci_device *vdev = opaque;
80 struct pci_dev *tmp = NULL, *pdev = vdev->pdev;
81 unsigned char max_busnr;
82 unsigned int decodes;
83
84 if (single_vga || !vfio_vga_disabled() || pci_is_root_bus(pdev->bus))
85 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
86 VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
87
88 max_busnr = pci_bus_max_busnr(pdev->bus);
89 decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
90
91 while ((tmp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, tmp)) != NULL) {
92 if (tmp == pdev ||
93 pci_domain_nr(tmp->bus) != pci_domain_nr(pdev->bus) ||
94 pci_is_root_bus(tmp->bus))
95 continue;
96
97 if (tmp->bus->number >= pdev->bus->number &&
98 tmp->bus->number <= max_busnr) {
99 pci_dev_put(tmp);
100 decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
101 break;
102 }
103 }
104
105 return decodes;
106 }
107
108 static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
109 {
110 return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
111 }
112
113 static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
114 {
115 struct resource *res;
116 int bar;
117 struct vfio_pci_dummy_resource *dummy_res;
118
119 INIT_LIST_HEAD(&vdev->dummy_resources_list);
120
121 for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
122 res = vdev->pdev->resource + bar;
123
124 if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP))
125 goto no_mmap;
126
127 if (!(res->flags & IORESOURCE_MEM))
128 goto no_mmap;
129
130 /*
131 * The PCI core shouldn't set up a resource with a
132 * type but zero size. But there may be bugs that
133 * cause us to do that.
134 */
135 if (!resource_size(res))
136 goto no_mmap;
137
138 if (resource_size(res) >= PAGE_SIZE) {
139 vdev->bar_mmap_supported[bar] = true;
140 continue;
141 }
142
143 if (!(res->start & ~PAGE_MASK)) {
144 /*
145 * Add a dummy resource to reserve the remainder
146 * of the exclusive page in case that hot-add
147 * device's bar is assigned into it.
148 */
149 dummy_res = kzalloc(sizeof(*dummy_res), GFP_KERNEL);
150 if (dummy_res == NULL)
151 goto no_mmap;
152
153 dummy_res->resource.name = "vfio sub-page reserved";
154 dummy_res->resource.start = res->end + 1;
155 dummy_res->resource.end = res->start + PAGE_SIZE - 1;
156 dummy_res->resource.flags = res->flags;
157 if (request_resource(res->parent,
158 &dummy_res->resource)) {
159 kfree(dummy_res);
160 goto no_mmap;
161 }
162 dummy_res->index = bar;
163 list_add(&dummy_res->res_next,
164 &vdev->dummy_resources_list);
165 vdev->bar_mmap_supported[bar] = true;
166 continue;
167 }
168 /*
169 * Here we don't handle the case when the BAR is not page
170 * aligned because we can't expect the BAR will be
171 * assigned into the same location in a page in guest
172 * when we passthrough the BAR. And it's hard to access
173 * this BAR in userspace because we have no way to get
174 * the BAR's location in a page.
175 */
176 no_mmap:
177 vdev->bar_mmap_supported[bar] = false;
178 }
179 }
180
181 static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
182 static void vfio_pci_disable(struct vfio_pci_device *vdev);
183
184 /*
185 * INTx masking requires the ability to disable INTx signaling via PCI_COMMAND
186 * _and_ the ability detect when the device is asserting INTx via PCI_STATUS.
187 * If a device implements the former but not the latter we would typically
188 * expect broken_intx_masking be set and require an exclusive interrupt.
189 * However since we do have control of the device's ability to assert INTx,
190 * we can instead pretend that the device does not implement INTx, virtualizing
191 * the pin register to report zero and maintaining DisINTx set on the host.
192 */
193 static bool vfio_pci_nointx(struct pci_dev *pdev)
194 {
195 switch (pdev->vendor) {
196 case PCI_VENDOR_ID_INTEL:
197 switch (pdev->device) {
198 /* All i40e (XL710/X710) 10/20/40GbE NICs */
199 case 0x1572:
200 case 0x1574:
201 case 0x1580 ... 0x1581:
202 case 0x1583 ... 0x1589:
203 case 0x37d0 ... 0x37d2:
204 return true;
205 default:
206 return false;
207 }
208 }
209
210 return false;
211 }
212
213 static int vfio_pci_enable(struct vfio_pci_device *vdev)
214 {
215 struct pci_dev *pdev = vdev->pdev;
216 int ret;
217 u16 cmd;
218 u8 msix_pos;
219
220 pci_set_power_state(pdev, PCI_D0);
221
222 /* Don't allow our initial saved state to include busmaster */
223 pci_clear_master(pdev);
224
225 ret = pci_enable_device(pdev);
226 if (ret)
227 return ret;
228
229 vdev->reset_works = (pci_reset_function(pdev) == 0);
230 pci_save_state(pdev);
231 vdev->pci_saved_state = pci_store_saved_state(pdev);
232 if (!vdev->pci_saved_state)
233 pr_debug("%s: Couldn't store %s saved state\n",
234 __func__, dev_name(&pdev->dev));
235
236 if (likely(!nointxmask)) {
237 if (vfio_pci_nointx(pdev)) {
238 dev_info(&pdev->dev, "Masking broken INTx support\n");
239 vdev->nointx = true;
240 pci_intx(pdev, 0);
241 } else
242 vdev->pci_2_3 = pci_intx_mask_supported(pdev);
243 }
244
245 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
246 if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) {
247 cmd &= ~PCI_COMMAND_INTX_DISABLE;
248 pci_write_config_word(pdev, PCI_COMMAND, cmd);
249 }
250
251 ret = vfio_config_init(vdev);
252 if (ret) {
253 kfree(vdev->pci_saved_state);
254 vdev->pci_saved_state = NULL;
255 pci_disable_device(pdev);
256 return ret;
257 }
258
259 msix_pos = pdev->msix_cap;
260 if (msix_pos) {
261 u16 flags;
262 u32 table;
263
264 pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags);
265 pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table);
266
267 vdev->msix_bar = table & PCI_MSIX_TABLE_BIR;
268 vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET;
269 vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16;
270 } else
271 vdev->msix_bar = 0xFF;
272
273 if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
274 vdev->has_vga = true;
275
276
277 if (vfio_pci_is_vga(pdev) &&
278 pdev->vendor == PCI_VENDOR_ID_INTEL &&
279 IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
280 ret = vfio_pci_igd_init(vdev);
281 if (ret) {
282 dev_warn(&vdev->pdev->dev,
283 "Failed to setup Intel IGD regions\n");
284 vfio_pci_disable(vdev);
285 return ret;
286 }
287 }
288
289 vfio_pci_probe_mmaps(vdev);
290
291 return 0;
292 }
293
294 static void vfio_pci_disable(struct vfio_pci_device *vdev)
295 {
296 struct pci_dev *pdev = vdev->pdev;
297 struct vfio_pci_dummy_resource *dummy_res, *tmp;
298 int i, bar;
299
300 /* Stop the device from further DMA */
301 pci_clear_master(pdev);
302
303 vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE |
304 VFIO_IRQ_SET_ACTION_TRIGGER,
305 vdev->irq_type, 0, 0, NULL);
306
307 vdev->virq_disabled = false;
308
309 for (i = 0; i < vdev->num_regions; i++)
310 vdev->region[i].ops->release(vdev, &vdev->region[i]);
311
312 vdev->num_regions = 0;
313 kfree(vdev->region);
314 vdev->region = NULL; /* don't krealloc a freed pointer */
315
316 vfio_config_free(vdev);
317
318 for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
319 if (!vdev->barmap[bar])
320 continue;
321 pci_iounmap(pdev, vdev->barmap[bar]);
322 pci_release_selected_regions(pdev, 1 << bar);
323 vdev->barmap[bar] = NULL;
324 }
325
326 list_for_each_entry_safe(dummy_res, tmp,
327 &vdev->dummy_resources_list, res_next) {
328 list_del(&dummy_res->res_next);
329 release_resource(&dummy_res->resource);
330 kfree(dummy_res);
331 }
332
333 vdev->needs_reset = true;
334
335 /*
336 * If we have saved state, restore it. If we can reset the device,
337 * even better. Resetting with current state seems better than
338 * nothing, but saving and restoring current state without reset
339 * is just busy work.
340 */
341 if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
342 pr_info("%s: Couldn't reload %s saved state\n",
343 __func__, dev_name(&pdev->dev));
344
345 if (!vdev->reset_works)
346 goto out;
347
348 pci_save_state(pdev);
349 }
350
351 /*
352 * Disable INTx and MSI, presumably to avoid spurious interrupts
353 * during reset. Stolen from pci_reset_function()
354 */
355 pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
356
357 /*
358 * Try to reset the device. The success of this is dependent on
359 * being able to lock the device, which is not always possible.
360 */
361 if (vdev->reset_works && !pci_try_reset_function(pdev))
362 vdev->needs_reset = false;
363
364 pci_restore_state(pdev);
365 out:
366 pci_disable_device(pdev);
367
368 vfio_pci_try_bus_reset(vdev);
369
370 if (!disable_idle_d3)
371 pci_set_power_state(pdev, PCI_D3hot);
372 }
373
374 static void vfio_pci_release(void *device_data)
375 {
376 struct vfio_pci_device *vdev = device_data;
377
378 mutex_lock(&driver_lock);
379
380 if (!(--vdev->refcnt)) {
381 vfio_spapr_pci_eeh_release(vdev->pdev);
382 vfio_pci_disable(vdev);
383 }
384
385 mutex_unlock(&driver_lock);
386
387 module_put(THIS_MODULE);
388 }
389
390 static int vfio_pci_open(void *device_data)
391 {
392 struct vfio_pci_device *vdev = device_data;
393 int ret = 0;
394
395 if (!try_module_get(THIS_MODULE))
396 return -ENODEV;
397
398 mutex_lock(&driver_lock);
399
400 if (!vdev->refcnt) {
401 ret = vfio_pci_enable(vdev);
402 if (ret)
403 goto error;
404
405 vfio_spapr_pci_eeh_open(vdev->pdev);
406 }
407 vdev->refcnt++;
408 error:
409 mutex_unlock(&driver_lock);
410 if (ret)
411 module_put(THIS_MODULE);
412 return ret;
413 }
414
415 static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
416 {
417 if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
418 u8 pin;
419 pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
420 if (IS_ENABLED(CONFIG_VFIO_PCI_INTX) && !vdev->nointx && pin)
421 return 1;
422
423 } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
424 u8 pos;
425 u16 flags;
426
427 pos = vdev->pdev->msi_cap;
428 if (pos) {
429 pci_read_config_word(vdev->pdev,
430 pos + PCI_MSI_FLAGS, &flags);
431 return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
432 }
433 } else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
434 u8 pos;
435 u16 flags;
436
437 pos = vdev->pdev->msix_cap;
438 if (pos) {
439 pci_read_config_word(vdev->pdev,
440 pos + PCI_MSIX_FLAGS, &flags);
441
442 return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
443 }
444 } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
445 if (pci_is_pcie(vdev->pdev))
446 return 1;
447 } else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
448 return 1;
449 }
450
451 return 0;
452 }
453
454 static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
455 {
456 (*(int *)data)++;
457 return 0;
458 }
459
460 struct vfio_pci_fill_info {
461 int max;
462 int cur;
463 struct vfio_pci_dependent_device *devices;
464 };
465
466 static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
467 {
468 struct vfio_pci_fill_info *fill = data;
469 struct iommu_group *iommu_group;
470
471 if (fill->cur == fill->max)
472 return -EAGAIN; /* Something changed, try again */
473
474 iommu_group = iommu_group_get(&pdev->dev);
475 if (!iommu_group)
476 return -EPERM; /* Cannot reset non-isolated devices */
477
478 fill->devices[fill->cur].group_id = iommu_group_id(iommu_group);
479 fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus);
480 fill->devices[fill->cur].bus = pdev->bus->number;
481 fill->devices[fill->cur].devfn = pdev->devfn;
482 fill->cur++;
483 iommu_group_put(iommu_group);
484 return 0;
485 }
486
487 struct vfio_pci_group_entry {
488 struct vfio_group *group;
489 int id;
490 };
491
492 struct vfio_pci_group_info {
493 int count;
494 struct vfio_pci_group_entry *groups;
495 };
496
497 static int vfio_pci_validate_devs(struct pci_dev *pdev, void *data)
498 {
499 struct vfio_pci_group_info *info = data;
500 struct iommu_group *group;
501 int id, i;
502
503 group = iommu_group_get(&pdev->dev);
504 if (!group)
505 return -EPERM;
506
507 id = iommu_group_id(group);
508
509 for (i = 0; i < info->count; i++)
510 if (info->groups[i].id == id)
511 break;
512
513 iommu_group_put(group);
514
515 return (i == info->count) ? -EINVAL : 0;
516 }
517
518 static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
519 {
520 for (; pdev; pdev = pdev->bus->self)
521 if (pdev->bus == slot->bus)
522 return (pdev->slot == slot);
523 return false;
524 }
525
526 struct vfio_pci_walk_info {
527 int (*fn)(struct pci_dev *, void *data);
528 void *data;
529 struct pci_dev *pdev;
530 bool slot;
531 int ret;
532 };
533
534 static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data)
535 {
536 struct vfio_pci_walk_info *walk = data;
537
538 if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot))
539 walk->ret = walk->fn(pdev, walk->data);
540
541 return walk->ret;
542 }
543
544 static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
545 int (*fn)(struct pci_dev *,
546 void *data), void *data,
547 bool slot)
548 {
549 struct vfio_pci_walk_info walk = {
550 .fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0,
551 };
552
553 pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk);
554
555 return walk.ret;
556 }
557
558 static int msix_sparse_mmap_cap(struct vfio_pci_device *vdev,
559 struct vfio_info_cap *caps)
560 {
561 struct vfio_info_cap_header *header;
562 struct vfio_region_info_cap_sparse_mmap *sparse;
563 size_t end, size;
564 int nr_areas = 2, i = 0;
565
566 end = pci_resource_len(vdev->pdev, vdev->msix_bar);
567
568 /* If MSI-X table is aligned to the start or end, only one area */
569 if (((vdev->msix_offset & PAGE_MASK) == 0) ||
570 (PAGE_ALIGN(vdev->msix_offset + vdev->msix_size) >= end))
571 nr_areas = 1;
572
573 size = sizeof(*sparse) + (nr_areas * sizeof(*sparse->areas));
574
575 header = vfio_info_cap_add(caps, size,
576 VFIO_REGION_INFO_CAP_SPARSE_MMAP, 1);
577 if (IS_ERR(header))
578 return PTR_ERR(header);
579
580 sparse = container_of(header,
581 struct vfio_region_info_cap_sparse_mmap, header);
582 sparse->nr_areas = nr_areas;
583
584 if (vdev->msix_offset & PAGE_MASK) {
585 sparse->areas[i].offset = 0;
586 sparse->areas[i].size = vdev->msix_offset & PAGE_MASK;
587 i++;
588 }
589
590 if (PAGE_ALIGN(vdev->msix_offset + vdev->msix_size) < end) {
591 sparse->areas[i].offset = PAGE_ALIGN(vdev->msix_offset +
592 vdev->msix_size);
593 sparse->areas[i].size = end - sparse->areas[i].offset;
594 i++;
595 }
596
597 return 0;
598 }
599
600 static int region_type_cap(struct vfio_pci_device *vdev,
601 struct vfio_info_cap *caps,
602 unsigned int type, unsigned int subtype)
603 {
604 struct vfio_info_cap_header *header;
605 struct vfio_region_info_cap_type *cap;
606
607 header = vfio_info_cap_add(caps, sizeof(*cap),
608 VFIO_REGION_INFO_CAP_TYPE, 1);
609 if (IS_ERR(header))
610 return PTR_ERR(header);
611
612 cap = container_of(header, struct vfio_region_info_cap_type, header);
613 cap->type = type;
614 cap->subtype = subtype;
615
616 return 0;
617 }
618
619 int vfio_pci_register_dev_region(struct vfio_pci_device *vdev,
620 unsigned int type, unsigned int subtype,
621 const struct vfio_pci_regops *ops,
622 size_t size, u32 flags, void *data)
623 {
624 struct vfio_pci_region *region;
625
626 region = krealloc(vdev->region,
627 (vdev->num_regions + 1) * sizeof(*region),
628 GFP_KERNEL);
629 if (!region)
630 return -ENOMEM;
631
632 vdev->region = region;
633 vdev->region[vdev->num_regions].type = type;
634 vdev->region[vdev->num_regions].subtype = subtype;
635 vdev->region[vdev->num_regions].ops = ops;
636 vdev->region[vdev->num_regions].size = size;
637 vdev->region[vdev->num_regions].flags = flags;
638 vdev->region[vdev->num_regions].data = data;
639
640 vdev->num_regions++;
641
642 return 0;
643 }
644
645 static long vfio_pci_ioctl(void *device_data,
646 unsigned int cmd, unsigned long arg)
647 {
648 struct vfio_pci_device *vdev = device_data;
649 unsigned long minsz;
650
651 if (cmd == VFIO_DEVICE_GET_INFO) {
652 struct vfio_device_info info;
653
654 minsz = offsetofend(struct vfio_device_info, num_irqs);
655
656 if (copy_from_user(&info, (void __user *)arg, minsz))
657 return -EFAULT;
658
659 if (info.argsz < minsz)
660 return -EINVAL;
661
662 info.flags = VFIO_DEVICE_FLAGS_PCI;
663
664 if (vdev->reset_works)
665 info.flags |= VFIO_DEVICE_FLAGS_RESET;
666
667 info.num_regions = VFIO_PCI_NUM_REGIONS + vdev->num_regions;
668 info.num_irqs = VFIO_PCI_NUM_IRQS;
669
670 return copy_to_user((void __user *)arg, &info, minsz) ?
671 -EFAULT : 0;
672
673 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
674 struct pci_dev *pdev = vdev->pdev;
675 struct vfio_region_info info;
676 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
677 int i, ret;
678
679 minsz = offsetofend(struct vfio_region_info, offset);
680
681 if (copy_from_user(&info, (void __user *)arg, minsz))
682 return -EFAULT;
683
684 if (info.argsz < minsz)
685 return -EINVAL;
686
687 switch (info.index) {
688 case VFIO_PCI_CONFIG_REGION_INDEX:
689 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
690 info.size = pdev->cfg_size;
691 info.flags = VFIO_REGION_INFO_FLAG_READ |
692 VFIO_REGION_INFO_FLAG_WRITE;
693 break;
694 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
695 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
696 info.size = pci_resource_len(pdev, info.index);
697 if (!info.size) {
698 info.flags = 0;
699 break;
700 }
701
702 info.flags = VFIO_REGION_INFO_FLAG_READ |
703 VFIO_REGION_INFO_FLAG_WRITE;
704 if (vdev->bar_mmap_supported[info.index]) {
705 info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
706 if (info.index == vdev->msix_bar) {
707 ret = msix_sparse_mmap_cap(vdev, &caps);
708 if (ret)
709 return ret;
710 }
711 }
712
713 break;
714 case VFIO_PCI_ROM_REGION_INDEX:
715 {
716 void __iomem *io;
717 size_t size;
718
719 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
720 info.flags = 0;
721
722 /* Report the BAR size, not the ROM size */
723 info.size = pci_resource_len(pdev, info.index);
724 if (!info.size) {
725 /* Shadow ROMs appear as PCI option ROMs */
726 if (pdev->resource[PCI_ROM_RESOURCE].flags &
727 IORESOURCE_ROM_SHADOW)
728 info.size = 0x20000;
729 else
730 break;
731 }
732
733 /* Is it really there? */
734 io = pci_map_rom(pdev, &size);
735 if (!io || !size) {
736 info.size = 0;
737 break;
738 }
739 pci_unmap_rom(pdev, io);
740
741 info.flags = VFIO_REGION_INFO_FLAG_READ;
742 break;
743 }
744 case VFIO_PCI_VGA_REGION_INDEX:
745 if (!vdev->has_vga)
746 return -EINVAL;
747
748 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
749 info.size = 0xc0000;
750 info.flags = VFIO_REGION_INFO_FLAG_READ |
751 VFIO_REGION_INFO_FLAG_WRITE;
752
753 break;
754 default:
755 if (info.index >=
756 VFIO_PCI_NUM_REGIONS + vdev->num_regions)
757 return -EINVAL;
758
759 i = info.index - VFIO_PCI_NUM_REGIONS;
760
761 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
762 info.size = vdev->region[i].size;
763 info.flags = vdev->region[i].flags;
764
765 ret = region_type_cap(vdev, &caps,
766 vdev->region[i].type,
767 vdev->region[i].subtype);
768 if (ret)
769 return ret;
770 }
771
772 if (caps.size) {
773 info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
774 if (info.argsz < sizeof(info) + caps.size) {
775 info.argsz = sizeof(info) + caps.size;
776 info.cap_offset = 0;
777 } else {
778 vfio_info_cap_shift(&caps, sizeof(info));
779 if (copy_to_user((void __user *)arg +
780 sizeof(info), caps.buf,
781 caps.size)) {
782 kfree(caps.buf);
783 return -EFAULT;
784 }
785 info.cap_offset = sizeof(info);
786 }
787
788 kfree(caps.buf);
789 }
790
791 return copy_to_user((void __user *)arg, &info, minsz) ?
792 -EFAULT : 0;
793
794 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
795 struct vfio_irq_info info;
796
797 minsz = offsetofend(struct vfio_irq_info, count);
798
799 if (copy_from_user(&info, (void __user *)arg, minsz))
800 return -EFAULT;
801
802 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
803 return -EINVAL;
804
805 switch (info.index) {
806 case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
807 case VFIO_PCI_REQ_IRQ_INDEX:
808 break;
809 case VFIO_PCI_ERR_IRQ_INDEX:
810 if (pci_is_pcie(vdev->pdev))
811 break;
812 /* pass thru to return error */
813 default:
814 return -EINVAL;
815 }
816
817 info.flags = VFIO_IRQ_INFO_EVENTFD;
818
819 info.count = vfio_pci_get_irq_count(vdev, info.index);
820
821 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
822 info.flags |= (VFIO_IRQ_INFO_MASKABLE |
823 VFIO_IRQ_INFO_AUTOMASKED);
824 else
825 info.flags |= VFIO_IRQ_INFO_NORESIZE;
826
827 return copy_to_user((void __user *)arg, &info, minsz) ?
828 -EFAULT : 0;
829
830 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
831 struct vfio_irq_set hdr;
832 u8 *data = NULL;
833 int ret = 0;
834
835 minsz = offsetofend(struct vfio_irq_set, count);
836
837 if (copy_from_user(&hdr, (void __user *)arg, minsz))
838 return -EFAULT;
839
840 if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS ||
841 hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
842 VFIO_IRQ_SET_ACTION_TYPE_MASK))
843 return -EINVAL;
844
845 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
846 size_t size;
847 int max = vfio_pci_get_irq_count(vdev, hdr.index);
848
849 if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
850 size = sizeof(uint8_t);
851 else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
852 size = sizeof(int32_t);
853 else
854 return -EINVAL;
855
856 if (hdr.argsz - minsz < hdr.count * size ||
857 hdr.start >= max || hdr.start + hdr.count > max)
858 return -EINVAL;
859
860 data = memdup_user((void __user *)(arg + minsz),
861 hdr.count * size);
862 if (IS_ERR(data))
863 return PTR_ERR(data);
864 }
865
866 mutex_lock(&vdev->igate);
867
868 ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
869 hdr.start, hdr.count, data);
870
871 mutex_unlock(&vdev->igate);
872 kfree(data);
873
874 return ret;
875
876 } else if (cmd == VFIO_DEVICE_RESET) {
877 return vdev->reset_works ?
878 pci_try_reset_function(vdev->pdev) : -EINVAL;
879
880 } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
881 struct vfio_pci_hot_reset_info hdr;
882 struct vfio_pci_fill_info fill = { 0 };
883 struct vfio_pci_dependent_device *devices = NULL;
884 bool slot = false;
885 int ret = 0;
886
887 minsz = offsetofend(struct vfio_pci_hot_reset_info, count);
888
889 if (copy_from_user(&hdr, (void __user *)arg, minsz))
890 return -EFAULT;
891
892 if (hdr.argsz < minsz)
893 return -EINVAL;
894
895 hdr.flags = 0;
896
897 /* Can we do a slot or bus reset or neither? */
898 if (!pci_probe_reset_slot(vdev->pdev->slot))
899 slot = true;
900 else if (pci_probe_reset_bus(vdev->pdev->bus))
901 return -ENODEV;
902
903 /* How many devices are affected? */
904 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
905 vfio_pci_count_devs,
906 &fill.max, slot);
907 if (ret)
908 return ret;
909
910 WARN_ON(!fill.max); /* Should always be at least one */
911
912 /*
913 * If there's enough space, fill it now, otherwise return
914 * -ENOSPC and the number of devices affected.
915 */
916 if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
917 ret = -ENOSPC;
918 hdr.count = fill.max;
919 goto reset_info_exit;
920 }
921
922 devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
923 if (!devices)
924 return -ENOMEM;
925
926 fill.devices = devices;
927
928 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
929 vfio_pci_fill_devs,
930 &fill, slot);
931
932 /*
933 * If a device was removed between counting and filling,
934 * we may come up short of fill.max. If a device was
935 * added, we'll have a return of -EAGAIN above.
936 */
937 if (!ret)
938 hdr.count = fill.cur;
939
940 reset_info_exit:
941 if (copy_to_user((void __user *)arg, &hdr, minsz))
942 ret = -EFAULT;
943
944 if (!ret) {
945 if (copy_to_user((void __user *)(arg + minsz), devices,
946 hdr.count * sizeof(*devices)))
947 ret = -EFAULT;
948 }
949
950 kfree(devices);
951 return ret;
952
953 } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) {
954 struct vfio_pci_hot_reset hdr;
955 int32_t *group_fds;
956 struct vfio_pci_group_entry *groups;
957 struct vfio_pci_group_info info;
958 bool slot = false;
959 int i, count = 0, ret = 0;
960
961 minsz = offsetofend(struct vfio_pci_hot_reset, count);
962
963 if (copy_from_user(&hdr, (void __user *)arg, minsz))
964 return -EFAULT;
965
966 if (hdr.argsz < minsz || hdr.flags)
967 return -EINVAL;
968
969 /* Can we do a slot or bus reset or neither? */
970 if (!pci_probe_reset_slot(vdev->pdev->slot))
971 slot = true;
972 else if (pci_probe_reset_bus(vdev->pdev->bus))
973 return -ENODEV;
974
975 /*
976 * We can't let userspace give us an arbitrarily large
977 * buffer to copy, so verify how many we think there
978 * could be. Note groups can have multiple devices so
979 * one group per device is the max.
980 */
981 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
982 vfio_pci_count_devs,
983 &count, slot);
984 if (ret)
985 return ret;
986
987 /* Somewhere between 1 and count is OK */
988 if (!hdr.count || hdr.count > count)
989 return -EINVAL;
990
991 group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
992 groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL);
993 if (!group_fds || !groups) {
994 kfree(group_fds);
995 kfree(groups);
996 return -ENOMEM;
997 }
998
999 if (copy_from_user(group_fds, (void __user *)(arg + minsz),
1000 hdr.count * sizeof(*group_fds))) {
1001 kfree(group_fds);
1002 kfree(groups);
1003 return -EFAULT;
1004 }
1005
1006 /*
1007 * For each group_fd, get the group through the vfio external
1008 * user interface and store the group and iommu ID. This
1009 * ensures the group is held across the reset.
1010 */
1011 for (i = 0; i < hdr.count; i++) {
1012 struct vfio_group *group;
1013 struct fd f = fdget(group_fds[i]);
1014 if (!f.file) {
1015 ret = -EBADF;
1016 break;
1017 }
1018
1019 group = vfio_group_get_external_user(f.file);
1020 fdput(f);
1021 if (IS_ERR(group)) {
1022 ret = PTR_ERR(group);
1023 break;
1024 }
1025
1026 groups[i].group = group;
1027 groups[i].id = vfio_external_user_iommu_id(group);
1028 }
1029
1030 kfree(group_fds);
1031
1032 /* release reference to groups on error */
1033 if (ret)
1034 goto hot_reset_release;
1035
1036 info.count = hdr.count;
1037 info.groups = groups;
1038
1039 /*
1040 * Test whether all the affected devices are contained
1041 * by the set of groups provided by the user.
1042 */
1043 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
1044 vfio_pci_validate_devs,
1045 &info, slot);
1046 if (!ret)
1047 /* User has access, do the reset */
1048 ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
1049 pci_try_reset_bus(vdev->pdev->bus);
1050
1051 hot_reset_release:
1052 for (i--; i >= 0; i--)
1053 vfio_group_put_external_user(groups[i].group);
1054
1055 kfree(groups);
1056 return ret;
1057 }
1058
1059 return -ENOTTY;
1060 }
1061
1062 static ssize_t vfio_pci_rw(void *device_data, char __user *buf,
1063 size_t count, loff_t *ppos, bool iswrite)
1064 {
1065 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
1066 struct vfio_pci_device *vdev = device_data;
1067
1068 if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
1069 return -EINVAL;
1070
1071 switch (index) {
1072 case VFIO_PCI_CONFIG_REGION_INDEX:
1073 return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
1074
1075 case VFIO_PCI_ROM_REGION_INDEX:
1076 if (iswrite)
1077 return -EINVAL;
1078 return vfio_pci_bar_rw(vdev, buf, count, ppos, false);
1079
1080 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1081 return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
1082
1083 case VFIO_PCI_VGA_REGION_INDEX:
1084 return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
1085 default:
1086 index -= VFIO_PCI_NUM_REGIONS;
1087 return vdev->region[index].ops->rw(vdev, buf,
1088 count, ppos, iswrite);
1089 }
1090
1091 return -EINVAL;
1092 }
1093
1094 static ssize_t vfio_pci_read(void *device_data, char __user *buf,
1095 size_t count, loff_t *ppos)
1096 {
1097 if (!count)
1098 return 0;
1099
1100 return vfio_pci_rw(device_data, buf, count, ppos, false);
1101 }
1102
1103 static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
1104 size_t count, loff_t *ppos)
1105 {
1106 if (!count)
1107 return 0;
1108
1109 return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
1110 }
1111
1112 static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
1113 {
1114 struct vfio_pci_device *vdev = device_data;
1115 struct pci_dev *pdev = vdev->pdev;
1116 unsigned int index;
1117 u64 phys_len, req_len, pgoff, req_start;
1118 int ret;
1119
1120 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1121
1122 if (vma->vm_end < vma->vm_start)
1123 return -EINVAL;
1124 if ((vma->vm_flags & VM_SHARED) == 0)
1125 return -EINVAL;
1126 if (index >= VFIO_PCI_ROM_REGION_INDEX)
1127 return -EINVAL;
1128 if (!vdev->bar_mmap_supported[index])
1129 return -EINVAL;
1130
1131 phys_len = PAGE_ALIGN(pci_resource_len(pdev, index));
1132 req_len = vma->vm_end - vma->vm_start;
1133 pgoff = vma->vm_pgoff &
1134 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1135 req_start = pgoff << PAGE_SHIFT;
1136
1137 if (req_start + req_len > phys_len)
1138 return -EINVAL;
1139
1140 if (index == vdev->msix_bar) {
1141 /*
1142 * Disallow mmaps overlapping the MSI-X table; users don't
1143 * get to touch this directly. We could find somewhere
1144 * else to map the overlap, but page granularity is only
1145 * a recommendation, not a requirement, so the user needs
1146 * to know which bits are real. Requiring them to mmap
1147 * around the table makes that clear.
1148 */
1149
1150 /* If neither entirely above nor below, then it overlaps */
1151 if (!(req_start >= vdev->msix_offset + vdev->msix_size ||
1152 req_start + req_len <= vdev->msix_offset))
1153 return -EINVAL;
1154 }
1155
1156 /*
1157 * Even though we don't make use of the barmap for the mmap,
1158 * we need to request the region and the barmap tracks that.
1159 */
1160 if (!vdev->barmap[index]) {
1161 ret = pci_request_selected_regions(pdev,
1162 1 << index, "vfio-pci");
1163 if (ret)
1164 return ret;
1165
1166 vdev->barmap[index] = pci_iomap(pdev, index, 0);
1167 }
1168
1169 vma->vm_private_data = vdev;
1170 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1171 vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
1172
1173 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
1174 req_len, vma->vm_page_prot);
1175 }
1176
1177 static void vfio_pci_request(void *device_data, unsigned int count)
1178 {
1179 struct vfio_pci_device *vdev = device_data;
1180
1181 mutex_lock(&vdev->igate);
1182
1183 if (vdev->req_trigger) {
1184 if (!(count % 10))
1185 dev_notice_ratelimited(&vdev->pdev->dev,
1186 "Relaying device request to user (#%u)\n",
1187 count);
1188 eventfd_signal(vdev->req_trigger, 1);
1189 } else if (count == 0) {
1190 dev_warn(&vdev->pdev->dev,
1191 "No device request channel registered, blocked until released by user\n");
1192 }
1193
1194 mutex_unlock(&vdev->igate);
1195 }
1196
1197 static const struct vfio_device_ops vfio_pci_ops = {
1198 .name = "vfio-pci",
1199 .open = vfio_pci_open,
1200 .release = vfio_pci_release,
1201 .ioctl = vfio_pci_ioctl,
1202 .read = vfio_pci_read,
1203 .write = vfio_pci_write,
1204 .mmap = vfio_pci_mmap,
1205 .request = vfio_pci_request,
1206 };
1207
1208 static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1209 {
1210 struct vfio_pci_device *vdev;
1211 struct iommu_group *group;
1212 int ret;
1213
1214 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
1215 return -EINVAL;
1216
1217 group = vfio_iommu_group_get(&pdev->dev);
1218 if (!group)
1219 return -EINVAL;
1220
1221 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
1222 if (!vdev) {
1223 vfio_iommu_group_put(group, &pdev->dev);
1224 return -ENOMEM;
1225 }
1226
1227 vdev->pdev = pdev;
1228 vdev->irq_type = VFIO_PCI_NUM_IRQS;
1229 mutex_init(&vdev->igate);
1230 spin_lock_init(&vdev->irqlock);
1231
1232 ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
1233 if (ret) {
1234 vfio_iommu_group_put(group, &pdev->dev);
1235 kfree(vdev);
1236 return ret;
1237 }
1238
1239 if (vfio_pci_is_vga(pdev)) {
1240 vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
1241 vga_set_legacy_decoding(pdev,
1242 vfio_pci_set_vga_decode(vdev, false));
1243 }
1244
1245 if (!disable_idle_d3) {
1246 /*
1247 * pci-core sets the device power state to an unknown value at
1248 * bootup and after being removed from a driver. The only
1249 * transition it allows from this unknown state is to D0, which
1250 * typically happens when a driver calls pci_enable_device().
1251 * We're not ready to enable the device yet, but we do want to
1252 * be able to get to D3. Therefore first do a D0 transition
1253 * before going to D3.
1254 */
1255 pci_set_power_state(pdev, PCI_D0);
1256 pci_set_power_state(pdev, PCI_D3hot);
1257 }
1258
1259 return ret;
1260 }
1261
1262 static void vfio_pci_remove(struct pci_dev *pdev)
1263 {
1264 struct vfio_pci_device *vdev;
1265
1266 vdev = vfio_del_group_dev(&pdev->dev);
1267 if (!vdev)
1268 return;
1269
1270 vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
1271 kfree(vdev->region);
1272 kfree(vdev);
1273
1274 if (vfio_pci_is_vga(pdev)) {
1275 vga_client_register(pdev, NULL, NULL, NULL);
1276 vga_set_legacy_decoding(pdev,
1277 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
1278 VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
1279 }
1280
1281 if (!disable_idle_d3)
1282 pci_set_power_state(pdev, PCI_D0);
1283 }
1284
1285 static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
1286 pci_channel_state_t state)
1287 {
1288 struct vfio_pci_device *vdev;
1289 struct vfio_device *device;
1290
1291 device = vfio_device_get_from_dev(&pdev->dev);
1292 if (device == NULL)
1293 return PCI_ERS_RESULT_DISCONNECT;
1294
1295 vdev = vfio_device_data(device);
1296 if (vdev == NULL) {
1297 vfio_device_put(device);
1298 return PCI_ERS_RESULT_DISCONNECT;
1299 }
1300
1301 mutex_lock(&vdev->igate);
1302
1303 if (vdev->err_trigger)
1304 eventfd_signal(vdev->err_trigger, 1);
1305
1306 mutex_unlock(&vdev->igate);
1307
1308 vfio_device_put(device);
1309
1310 return PCI_ERS_RESULT_CAN_RECOVER;
1311 }
1312
1313 static const struct pci_error_handlers vfio_err_handlers = {
1314 .error_detected = vfio_pci_aer_err_detected,
1315 };
1316
1317 static struct pci_driver vfio_pci_driver = {
1318 .name = "vfio-pci",
1319 .id_table = NULL, /* only dynamic ids */
1320 .probe = vfio_pci_probe,
1321 .remove = vfio_pci_remove,
1322 .err_handler = &vfio_err_handlers,
1323 };
1324
1325 struct vfio_devices {
1326 struct vfio_device **devices;
1327 int cur_index;
1328 int max_index;
1329 };
1330
1331 static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
1332 {
1333 struct vfio_devices *devs = data;
1334 struct vfio_device *device;
1335
1336 if (devs->cur_index == devs->max_index)
1337 return -ENOSPC;
1338
1339 device = vfio_device_get_from_dev(&pdev->dev);
1340 if (!device)
1341 return -EINVAL;
1342
1343 if (pci_dev_driver(pdev) != &vfio_pci_driver) {
1344 vfio_device_put(device);
1345 return -EBUSY;
1346 }
1347
1348 devs->devices[devs->cur_index++] = device;
1349 return 0;
1350 }
1351
1352 /*
1353 * Attempt to do a bus/slot reset if there are devices affected by a reset for
1354 * this device that are needs_reset and all of the affected devices are unused
1355 * (!refcnt). Callers are required to hold driver_lock when calling this to
1356 * prevent device opens and concurrent bus reset attempts. We prevent device
1357 * unbinds by acquiring and holding a reference to the vfio_device.
1358 *
1359 * NB: vfio-core considers a group to be viable even if some devices are
1360 * bound to drivers like pci-stub or pcieport. Here we require all devices
1361 * to be bound to vfio_pci since that's the only way we can be sure they
1362 * stay put.
1363 */
1364 static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev)
1365 {
1366 struct vfio_devices devs = { .cur_index = 0 };
1367 int i = 0, ret = -EINVAL;
1368 bool needs_reset = false, slot = false;
1369 struct vfio_pci_device *tmp;
1370
1371 if (!pci_probe_reset_slot(vdev->pdev->slot))
1372 slot = true;
1373 else if (pci_probe_reset_bus(vdev->pdev->bus))
1374 return;
1375
1376 if (vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
1377 &i, slot) || !i)
1378 return;
1379
1380 devs.max_index = i;
1381 devs.devices = kcalloc(i, sizeof(struct vfio_device *), GFP_KERNEL);
1382 if (!devs.devices)
1383 return;
1384
1385 if (vfio_pci_for_each_slot_or_bus(vdev->pdev,
1386 vfio_pci_get_devs, &devs, slot))
1387 goto put_devs;
1388
1389 for (i = 0; i < devs.cur_index; i++) {
1390 tmp = vfio_device_data(devs.devices[i]);
1391 if (tmp->needs_reset)
1392 needs_reset = true;
1393 if (tmp->refcnt)
1394 goto put_devs;
1395 }
1396
1397 if (needs_reset)
1398 ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
1399 pci_try_reset_bus(vdev->pdev->bus);
1400
1401 put_devs:
1402 for (i = 0; i < devs.cur_index; i++) {
1403 tmp = vfio_device_data(devs.devices[i]);
1404 if (!ret)
1405 tmp->needs_reset = false;
1406
1407 if (!tmp->refcnt && !disable_idle_d3)
1408 pci_set_power_state(tmp->pdev, PCI_D3hot);
1409
1410 vfio_device_put(devs.devices[i]);
1411 }
1412
1413 kfree(devs.devices);
1414 }
1415
1416 static void __exit vfio_pci_cleanup(void)
1417 {
1418 pci_unregister_driver(&vfio_pci_driver);
1419 vfio_pci_uninit_perm_bits();
1420 }
1421
1422 static void __init vfio_pci_fill_ids(void)
1423 {
1424 char *p, *id;
1425 int rc;
1426
1427 /* no ids passed actually */
1428 if (ids[0] == '\0')
1429 return;
1430
1431 /* add ids specified in the module parameter */
1432 p = ids;
1433 while ((id = strsep(&p, ","))) {
1434 unsigned int vendor, device, subvendor = PCI_ANY_ID,
1435 subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
1436 int fields;
1437
1438 if (!strlen(id))
1439 continue;
1440
1441 fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
1442 &vendor, &device, &subvendor, &subdevice,
1443 &class, &class_mask);
1444
1445 if (fields < 2) {
1446 pr_warn("invalid id string \"%s\"\n", id);
1447 continue;
1448 }
1449
1450 rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
1451 subvendor, subdevice, class, class_mask, 0);
1452 if (rc)
1453 pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n",
1454 vendor, device, subvendor, subdevice,
1455 class, class_mask, rc);
1456 else
1457 pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n",
1458 vendor, device, subvendor, subdevice,
1459 class, class_mask);
1460 }
1461 }
1462
1463 static int __init vfio_pci_init(void)
1464 {
1465 int ret;
1466
1467 /* Allocate shared config space permision data used by all devices */
1468 ret = vfio_pci_init_perm_bits();
1469 if (ret)
1470 return ret;
1471
1472 /* Register and scan for devices */
1473 ret = pci_register_driver(&vfio_pci_driver);
1474 if (ret)
1475 goto out_driver;
1476
1477 vfio_pci_fill_ids();
1478
1479 return 0;
1480
1481 out_driver:
1482 vfio_pci_uninit_perm_bits();
1483 return ret;
1484 }
1485
1486 module_init(vfio_pci_init);
1487 module_exit(vfio_pci_cleanup);
1488
1489 MODULE_VERSION(DRIVER_VERSION);
1490 MODULE_LICENSE("GPL v2");
1491 MODULE_AUTHOR(DRIVER_AUTHOR);
1492 MODULE_DESCRIPTION(DRIVER_DESC);
This page took 0.068455 seconds and 5 git commands to generate.