Commit | Line | Data |
---|---|---|
cba3345c AW |
1 | /* |
2 | * VFIO core | |
3 | * | |
4 | * Copyright (C) 2012 Red Hat, Inc. All rights reserved. | |
5 | * Author: Alex Williamson <alex.williamson@redhat.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * Derived from original vfio: | |
12 | * Copyright 2010 Cisco Systems, Inc. All rights reserved. | |
13 | * Author: Tom Lyon, pugs@cisco.com | |
14 | */ | |
15 | ||
16 | #include <linux/cdev.h> | |
17 | #include <linux/compat.h> | |
18 | #include <linux/device.h> | |
19 | #include <linux/file.h> | |
20 | #include <linux/anon_inodes.h> | |
21 | #include <linux/fs.h> | |
22 | #include <linux/idr.h> | |
23 | #include <linux/iommu.h> | |
24 | #include <linux/list.h> | |
d1099901 | 25 | #include <linux/miscdevice.h> |
cba3345c AW |
26 | #include <linux/module.h> |
27 | #include <linux/mutex.h> | |
5f096b14 | 28 | #include <linux/pci.h> |
9587f44a | 29 | #include <linux/rwsem.h> |
cba3345c AW |
30 | #include <linux/sched.h> |
31 | #include <linux/slab.h> | |
664e9386 | 32 | #include <linux/stat.h> |
cba3345c AW |
33 | #include <linux/string.h> |
34 | #include <linux/uaccess.h> | |
35 | #include <linux/vfio.h> | |
36 | #include <linux/wait.h> | |
37 | ||
38 | #define DRIVER_VERSION "0.3" | |
39 | #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>" | |
40 | #define DRIVER_DESC "VFIO - User Level meta-driver" | |
41 | ||
42 | static struct vfio { | |
43 | struct class *class; | |
44 | struct list_head iommu_drivers_list; | |
45 | struct mutex iommu_drivers_lock; | |
46 | struct list_head group_list; | |
47 | struct idr group_idr; | |
48 | struct mutex group_lock; | |
49 | struct cdev group_cdev; | |
d1099901 | 50 | dev_t group_devt; |
cba3345c AW |
51 | wait_queue_head_t release_q; |
52 | } vfio; | |
53 | ||
54 | struct vfio_iommu_driver { | |
55 | const struct vfio_iommu_driver_ops *ops; | |
56 | struct list_head vfio_next; | |
57 | }; | |
58 | ||
59 | struct vfio_container { | |
60 | struct kref kref; | |
61 | struct list_head group_list; | |
9587f44a | 62 | struct rw_semaphore group_lock; |
cba3345c AW |
63 | struct vfio_iommu_driver *iommu_driver; |
64 | void *iommu_data; | |
65 | }; | |
66 | ||
60720a0f AW |
67 | struct vfio_unbound_dev { |
68 | struct device *dev; | |
69 | struct list_head unbound_next; | |
70 | }; | |
71 | ||
cba3345c AW |
72 | struct vfio_group { |
73 | struct kref kref; | |
74 | int minor; | |
75 | atomic_t container_users; | |
76 | struct iommu_group *iommu_group; | |
77 | struct vfio_container *container; | |
78 | struct list_head device_list; | |
79 | struct mutex device_lock; | |
80 | struct device *dev; | |
81 | struct notifier_block nb; | |
82 | struct list_head vfio_next; | |
83 | struct list_head container_next; | |
60720a0f AW |
84 | struct list_head unbound_list; |
85 | struct mutex unbound_lock; | |
6d6768c6 | 86 | atomic_t opened; |
cba3345c AW |
87 | }; |
88 | ||
89 | struct vfio_device { | |
90 | struct kref kref; | |
91 | struct device *dev; | |
92 | const struct vfio_device_ops *ops; | |
93 | struct vfio_group *group; | |
94 | struct list_head group_next; | |
95 | void *device_data; | |
96 | }; | |
97 | ||
98 | /** | |
99 | * IOMMU driver registration | |
100 | */ | |
101 | int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops) | |
102 | { | |
103 | struct vfio_iommu_driver *driver, *tmp; | |
104 | ||
105 | driver = kzalloc(sizeof(*driver), GFP_KERNEL); | |
106 | if (!driver) | |
107 | return -ENOMEM; | |
108 | ||
109 | driver->ops = ops; | |
110 | ||
111 | mutex_lock(&vfio.iommu_drivers_lock); | |
112 | ||
113 | /* Check for duplicates */ | |
114 | list_for_each_entry(tmp, &vfio.iommu_drivers_list, vfio_next) { | |
115 | if (tmp->ops == ops) { | |
116 | mutex_unlock(&vfio.iommu_drivers_lock); | |
117 | kfree(driver); | |
118 | return -EINVAL; | |
119 | } | |
120 | } | |
121 | ||
122 | list_add(&driver->vfio_next, &vfio.iommu_drivers_list); | |
123 | ||
124 | mutex_unlock(&vfio.iommu_drivers_lock); | |
125 | ||
126 | return 0; | |
127 | } | |
128 | EXPORT_SYMBOL_GPL(vfio_register_iommu_driver); | |
129 | ||
130 | void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops) | |
131 | { | |
132 | struct vfio_iommu_driver *driver; | |
133 | ||
134 | mutex_lock(&vfio.iommu_drivers_lock); | |
135 | list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) { | |
136 | if (driver->ops == ops) { | |
137 | list_del(&driver->vfio_next); | |
138 | mutex_unlock(&vfio.iommu_drivers_lock); | |
139 | kfree(driver); | |
140 | return; | |
141 | } | |
142 | } | |
143 | mutex_unlock(&vfio.iommu_drivers_lock); | |
144 | } | |
145 | EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver); | |
146 | ||
147 | /** | |
148 | * Group minor allocation/free - both called with vfio.group_lock held | |
149 | */ | |
150 | static int vfio_alloc_group_minor(struct vfio_group *group) | |
151 | { | |
d1099901 | 152 | return idr_alloc(&vfio.group_idr, group, 0, MINORMASK + 1, GFP_KERNEL); |
cba3345c AW |
153 | } |
154 | ||
155 | static void vfio_free_group_minor(int minor) | |
156 | { | |
157 | idr_remove(&vfio.group_idr, minor); | |
158 | } | |
159 | ||
160 | static int vfio_iommu_group_notifier(struct notifier_block *nb, | |
161 | unsigned long action, void *data); | |
162 | static void vfio_group_get(struct vfio_group *group); | |
163 | ||
164 | /** | |
165 | * Container objects - containers are created when /dev/vfio/vfio is | |
166 | * opened, but their lifecycle extends until the last user is done, so | |
167 | * it's freed via kref. Must support container/group/device being | |
168 | * closed in any order. | |
169 | */ | |
170 | static void vfio_container_get(struct vfio_container *container) | |
171 | { | |
172 | kref_get(&container->kref); | |
173 | } | |
174 | ||
175 | static void vfio_container_release(struct kref *kref) | |
176 | { | |
177 | struct vfio_container *container; | |
178 | container = container_of(kref, struct vfio_container, kref); | |
179 | ||
180 | kfree(container); | |
181 | } | |
182 | ||
183 | static void vfio_container_put(struct vfio_container *container) | |
184 | { | |
185 | kref_put(&container->kref, vfio_container_release); | |
186 | } | |
187 | ||
9df7b25a JL |
188 | static void vfio_group_unlock_and_free(struct vfio_group *group) |
189 | { | |
190 | mutex_unlock(&vfio.group_lock); | |
191 | /* | |
192 | * Unregister outside of lock. A spurious callback is harmless now | |
193 | * that the group is no longer in vfio.group_list. | |
194 | */ | |
195 | iommu_group_unregister_notifier(group->iommu_group, &group->nb); | |
196 | kfree(group); | |
197 | } | |
198 | ||
cba3345c AW |
199 | /** |
200 | * Group objects - create, release, get, put, search | |
201 | */ | |
202 | static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group) | |
203 | { | |
204 | struct vfio_group *group, *tmp; | |
205 | struct device *dev; | |
206 | int ret, minor; | |
207 | ||
208 | group = kzalloc(sizeof(*group), GFP_KERNEL); | |
209 | if (!group) | |
210 | return ERR_PTR(-ENOMEM); | |
211 | ||
212 | kref_init(&group->kref); | |
213 | INIT_LIST_HEAD(&group->device_list); | |
214 | mutex_init(&group->device_lock); | |
60720a0f AW |
215 | INIT_LIST_HEAD(&group->unbound_list); |
216 | mutex_init(&group->unbound_lock); | |
cba3345c | 217 | atomic_set(&group->container_users, 0); |
6d6768c6 | 218 | atomic_set(&group->opened, 0); |
cba3345c AW |
219 | group->iommu_group = iommu_group; |
220 | ||
221 | group->nb.notifier_call = vfio_iommu_group_notifier; | |
222 | ||
223 | /* | |
224 | * blocking notifiers acquire a rwsem around registering and hold | |
225 | * it around callback. Therefore, need to register outside of | |
226 | * vfio.group_lock to avoid A-B/B-A contention. Our callback won't | |
227 | * do anything unless it can find the group in vfio.group_list, so | |
228 | * no harm in registering early. | |
229 | */ | |
230 | ret = iommu_group_register_notifier(iommu_group, &group->nb); | |
231 | if (ret) { | |
232 | kfree(group); | |
233 | return ERR_PTR(ret); | |
234 | } | |
235 | ||
236 | mutex_lock(&vfio.group_lock); | |
237 | ||
cba3345c AW |
238 | /* Did we race creating this group? */ |
239 | list_for_each_entry(tmp, &vfio.group_list, vfio_next) { | |
240 | if (tmp->iommu_group == iommu_group) { | |
241 | vfio_group_get(tmp); | |
9df7b25a | 242 | vfio_group_unlock_and_free(group); |
cba3345c AW |
243 | return tmp; |
244 | } | |
245 | } | |
246 | ||
2f51bf4b ZL |
247 | minor = vfio_alloc_group_minor(group); |
248 | if (minor < 0) { | |
249 | vfio_group_unlock_and_free(group); | |
250 | return ERR_PTR(minor); | |
251 | } | |
252 | ||
d1099901 AW |
253 | dev = device_create(vfio.class, NULL, |
254 | MKDEV(MAJOR(vfio.group_devt), minor), | |
cba3345c AW |
255 | group, "%d", iommu_group_id(iommu_group)); |
256 | if (IS_ERR(dev)) { | |
257 | vfio_free_group_minor(minor); | |
9df7b25a | 258 | vfio_group_unlock_and_free(group); |
cba3345c AW |
259 | return (struct vfio_group *)dev; /* ERR_PTR */ |
260 | } | |
261 | ||
262 | group->minor = minor; | |
263 | group->dev = dev; | |
264 | ||
265 | list_add(&group->vfio_next, &vfio.group_list); | |
266 | ||
267 | mutex_unlock(&vfio.group_lock); | |
268 | ||
269 | return group; | |
270 | } | |
271 | ||
6d2cd3ce | 272 | /* called with vfio.group_lock held */ |
cba3345c AW |
273 | static void vfio_group_release(struct kref *kref) |
274 | { | |
275 | struct vfio_group *group = container_of(kref, struct vfio_group, kref); | |
60720a0f | 276 | struct vfio_unbound_dev *unbound, *tmp; |
4a68810d | 277 | struct iommu_group *iommu_group = group->iommu_group; |
cba3345c AW |
278 | |
279 | WARN_ON(!list_empty(&group->device_list)); | |
280 | ||
60720a0f AW |
281 | list_for_each_entry_safe(unbound, tmp, |
282 | &group->unbound_list, unbound_next) { | |
283 | list_del(&unbound->unbound_next); | |
284 | kfree(unbound); | |
285 | } | |
286 | ||
d1099901 | 287 | device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor)); |
cba3345c AW |
288 | list_del(&group->vfio_next); |
289 | vfio_free_group_minor(group->minor); | |
9df7b25a | 290 | vfio_group_unlock_and_free(group); |
4a68810d | 291 | iommu_group_put(iommu_group); |
cba3345c AW |
292 | } |
293 | ||
294 | static void vfio_group_put(struct vfio_group *group) | |
295 | { | |
6d2cd3ce | 296 | kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock); |
cba3345c AW |
297 | } |
298 | ||
299 | /* Assume group_lock or group reference is held */ | |
300 | static void vfio_group_get(struct vfio_group *group) | |
301 | { | |
302 | kref_get(&group->kref); | |
303 | } | |
304 | ||
305 | /* | |
306 | * Not really a try as we will sleep for mutex, but we need to make | |
307 | * sure the group pointer is valid under lock and get a reference. | |
308 | */ | |
309 | static struct vfio_group *vfio_group_try_get(struct vfio_group *group) | |
310 | { | |
311 | struct vfio_group *target = group; | |
312 | ||
313 | mutex_lock(&vfio.group_lock); | |
314 | list_for_each_entry(group, &vfio.group_list, vfio_next) { | |
315 | if (group == target) { | |
316 | vfio_group_get(group); | |
317 | mutex_unlock(&vfio.group_lock); | |
318 | return group; | |
319 | } | |
320 | } | |
321 | mutex_unlock(&vfio.group_lock); | |
322 | ||
323 | return NULL; | |
324 | } | |
325 | ||
326 | static | |
327 | struct vfio_group *vfio_group_get_from_iommu(struct iommu_group *iommu_group) | |
328 | { | |
329 | struct vfio_group *group; | |
330 | ||
331 | mutex_lock(&vfio.group_lock); | |
332 | list_for_each_entry(group, &vfio.group_list, vfio_next) { | |
333 | if (group->iommu_group == iommu_group) { | |
334 | vfio_group_get(group); | |
335 | mutex_unlock(&vfio.group_lock); | |
336 | return group; | |
337 | } | |
338 | } | |
339 | mutex_unlock(&vfio.group_lock); | |
340 | ||
341 | return NULL; | |
342 | } | |
343 | ||
344 | static struct vfio_group *vfio_group_get_from_minor(int minor) | |
345 | { | |
346 | struct vfio_group *group; | |
347 | ||
348 | mutex_lock(&vfio.group_lock); | |
349 | group = idr_find(&vfio.group_idr, minor); | |
350 | if (!group) { | |
351 | mutex_unlock(&vfio.group_lock); | |
352 | return NULL; | |
353 | } | |
354 | vfio_group_get(group); | |
355 | mutex_unlock(&vfio.group_lock); | |
356 | ||
357 | return group; | |
358 | } | |
359 | ||
360 | /** | |
361 | * Device objects - create, release, get, put, search | |
362 | */ | |
363 | static | |
364 | struct vfio_device *vfio_group_create_device(struct vfio_group *group, | |
365 | struct device *dev, | |
366 | const struct vfio_device_ops *ops, | |
367 | void *device_data) | |
368 | { | |
369 | struct vfio_device *device; | |
cba3345c AW |
370 | |
371 | device = kzalloc(sizeof(*device), GFP_KERNEL); | |
372 | if (!device) | |
373 | return ERR_PTR(-ENOMEM); | |
374 | ||
375 | kref_init(&device->kref); | |
376 | device->dev = dev; | |
377 | device->group = group; | |
378 | device->ops = ops; | |
379 | device->device_data = device_data; | |
8283b491 | 380 | dev_set_drvdata(dev, device); |
cba3345c AW |
381 | |
382 | /* No need to get group_lock, caller has group reference */ | |
383 | vfio_group_get(group); | |
384 | ||
385 | mutex_lock(&group->device_lock); | |
386 | list_add(&device->group_next, &group->device_list); | |
387 | mutex_unlock(&group->device_lock); | |
388 | ||
389 | return device; | |
390 | } | |
391 | ||
392 | static void vfio_device_release(struct kref *kref) | |
393 | { | |
394 | struct vfio_device *device = container_of(kref, | |
395 | struct vfio_device, kref); | |
396 | struct vfio_group *group = device->group; | |
397 | ||
cba3345c AW |
398 | list_del(&device->group_next); |
399 | mutex_unlock(&group->device_lock); | |
400 | ||
401 | dev_set_drvdata(device->dev, NULL); | |
402 | ||
403 | kfree(device); | |
404 | ||
405 | /* vfio_del_group_dev may be waiting for this device */ | |
406 | wake_up(&vfio.release_q); | |
407 | } | |
408 | ||
409 | /* Device reference always implies a group reference */ | |
44f50716 | 410 | void vfio_device_put(struct vfio_device *device) |
cba3345c | 411 | { |
934ad4c2 | 412 | struct vfio_group *group = device->group; |
90b1253e | 413 | kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock); |
934ad4c2 | 414 | vfio_group_put(group); |
cba3345c | 415 | } |
44f50716 | 416 | EXPORT_SYMBOL_GPL(vfio_device_put); |
cba3345c AW |
417 | |
418 | static void vfio_device_get(struct vfio_device *device) | |
419 | { | |
420 | vfio_group_get(device->group); | |
421 | kref_get(&device->kref); | |
422 | } | |
423 | ||
424 | static struct vfio_device *vfio_group_get_device(struct vfio_group *group, | |
425 | struct device *dev) | |
426 | { | |
427 | struct vfio_device *device; | |
428 | ||
429 | mutex_lock(&group->device_lock); | |
430 | list_for_each_entry(device, &group->device_list, group_next) { | |
431 | if (device->dev == dev) { | |
432 | vfio_device_get(device); | |
433 | mutex_unlock(&group->device_lock); | |
434 | return device; | |
435 | } | |
436 | } | |
437 | mutex_unlock(&group->device_lock); | |
438 | return NULL; | |
439 | } | |
440 | ||
441 | /* | |
5f096b14 AW |
442 | * Some drivers, like pci-stub, are only used to prevent other drivers from |
443 | * claiming a device and are therefore perfectly legitimate for a user owned | |
444 | * group. The pci-stub driver has no dependencies on DMA or the IOVA mapping | |
445 | * of the device, but it does prevent the user from having direct access to | |
446 | * the device, which is useful in some circumstances. | |
447 | * | |
448 | * We also assume that we can include PCI interconnect devices, ie. bridges. | |
449 | * IOMMU grouping on PCI necessitates that if we lack isolation on a bridge | |
450 | * then all of the downstream devices will be part of the same IOMMU group as | |
451 | * the bridge. Thus, if placing the bridge into the user owned IOVA space | |
452 | * breaks anything, it only does so for user owned devices downstream. Note | |
453 | * that error notification via MSI can be affected for platforms that handle | |
454 | * MSI within the same IOVA space as DMA. | |
cba3345c | 455 | */ |
5f096b14 | 456 | static const char * const vfio_driver_whitelist[] = { "pci-stub" }; |
cba3345c | 457 | |
5f096b14 | 458 | static bool vfio_dev_whitelisted(struct device *dev, struct device_driver *drv) |
cba3345c AW |
459 | { |
460 | int i; | |
461 | ||
5f096b14 AW |
462 | if (dev_is_pci(dev)) { |
463 | struct pci_dev *pdev = to_pci_dev(dev); | |
464 | ||
465 | if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL) | |
466 | return true; | |
467 | } | |
468 | ||
cba3345c AW |
469 | for (i = 0; i < ARRAY_SIZE(vfio_driver_whitelist); i++) { |
470 | if (!strcmp(drv->name, vfio_driver_whitelist[i])) | |
471 | return true; | |
472 | } | |
473 | ||
474 | return false; | |
475 | } | |
476 | ||
477 | /* | |
60720a0f AW |
478 | * A vfio group is viable for use by userspace if all devices are in |
479 | * one of the following states: | |
480 | * - driver-less | |
481 | * - bound to a vfio driver | |
482 | * - bound to a whitelisted driver | |
5f096b14 | 483 | * - a PCI interconnect device |
60720a0f AW |
484 | * |
485 | * We use two methods to determine whether a device is bound to a vfio | |
486 | * driver. The first is to test whether the device exists in the vfio | |
487 | * group. The second is to test if the device exists on the group | |
488 | * unbound_list, indicating it's in the middle of transitioning from | |
489 | * a vfio driver to driver-less. | |
cba3345c AW |
490 | */ |
491 | static int vfio_dev_viable(struct device *dev, void *data) | |
492 | { | |
493 | struct vfio_group *group = data; | |
494 | struct vfio_device *device; | |
de2b3eea | 495 | struct device_driver *drv = ACCESS_ONCE(dev->driver); |
60720a0f AW |
496 | struct vfio_unbound_dev *unbound; |
497 | int ret = -EINVAL; | |
498 | ||
499 | mutex_lock(&group->unbound_lock); | |
500 | list_for_each_entry(unbound, &group->unbound_list, unbound_next) { | |
501 | if (dev == unbound->dev) { | |
502 | ret = 0; | |
503 | break; | |
504 | } | |
505 | } | |
506 | mutex_unlock(&group->unbound_lock); | |
cba3345c | 507 | |
5f096b14 | 508 | if (!ret || !drv || vfio_dev_whitelisted(dev, drv)) |
cba3345c AW |
509 | return 0; |
510 | ||
511 | device = vfio_group_get_device(group, dev); | |
512 | if (device) { | |
513 | vfio_device_put(device); | |
514 | return 0; | |
515 | } | |
516 | ||
60720a0f | 517 | return ret; |
cba3345c AW |
518 | } |
519 | ||
520 | /** | |
521 | * Async device support | |
522 | */ | |
523 | static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev) | |
524 | { | |
525 | struct vfio_device *device; | |
526 | ||
527 | /* Do we already know about it? We shouldn't */ | |
528 | device = vfio_group_get_device(group, dev); | |
529 | if (WARN_ON_ONCE(device)) { | |
530 | vfio_device_put(device); | |
531 | return 0; | |
532 | } | |
533 | ||
534 | /* Nothing to do for idle groups */ | |
535 | if (!atomic_read(&group->container_users)) | |
536 | return 0; | |
537 | ||
538 | /* TODO Prevent device auto probing */ | |
539 | WARN("Device %s added to live group %d!\n", dev_name(dev), | |
540 | iommu_group_id(group->iommu_group)); | |
541 | ||
542 | return 0; | |
543 | } | |
544 | ||
cba3345c AW |
545 | static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev) |
546 | { | |
547 | /* We don't care what happens when the group isn't in use */ | |
548 | if (!atomic_read(&group->container_users)) | |
549 | return 0; | |
550 | ||
551 | return vfio_dev_viable(dev, group); | |
552 | } | |
553 | ||
554 | static int vfio_iommu_group_notifier(struct notifier_block *nb, | |
555 | unsigned long action, void *data) | |
556 | { | |
557 | struct vfio_group *group = container_of(nb, struct vfio_group, nb); | |
558 | struct device *dev = data; | |
60720a0f | 559 | struct vfio_unbound_dev *unbound; |
cba3345c AW |
560 | |
561 | /* | |
c6401930 AW |
562 | * Need to go through a group_lock lookup to get a reference or we |
563 | * risk racing a group being removed. Ignore spurious notifies. | |
cba3345c AW |
564 | */ |
565 | group = vfio_group_try_get(group); | |
c6401930 | 566 | if (!group) |
cba3345c AW |
567 | return NOTIFY_OK; |
568 | ||
569 | switch (action) { | |
570 | case IOMMU_GROUP_NOTIFY_ADD_DEVICE: | |
571 | vfio_group_nb_add_dev(group, dev); | |
572 | break; | |
573 | case IOMMU_GROUP_NOTIFY_DEL_DEVICE: | |
de9c7602 AW |
574 | /* |
575 | * Nothing to do here. If the device is in use, then the | |
576 | * vfio sub-driver should block the remove callback until | |
577 | * it is unused. If the device is unused or attached to a | |
578 | * stub driver, then it should be released and we don't | |
579 | * care that it will be going away. | |
580 | */ | |
cba3345c AW |
581 | break; |
582 | case IOMMU_GROUP_NOTIFY_BIND_DRIVER: | |
583 | pr_debug("%s: Device %s, group %d binding to driver\n", | |
584 | __func__, dev_name(dev), | |
585 | iommu_group_id(group->iommu_group)); | |
586 | break; | |
587 | case IOMMU_GROUP_NOTIFY_BOUND_DRIVER: | |
588 | pr_debug("%s: Device %s, group %d bound to driver %s\n", | |
589 | __func__, dev_name(dev), | |
590 | iommu_group_id(group->iommu_group), dev->driver->name); | |
591 | BUG_ON(vfio_group_nb_verify(group, dev)); | |
592 | break; | |
593 | case IOMMU_GROUP_NOTIFY_UNBIND_DRIVER: | |
594 | pr_debug("%s: Device %s, group %d unbinding from driver %s\n", | |
595 | __func__, dev_name(dev), | |
596 | iommu_group_id(group->iommu_group), dev->driver->name); | |
597 | break; | |
598 | case IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER: | |
599 | pr_debug("%s: Device %s, group %d unbound from driver\n", | |
600 | __func__, dev_name(dev), | |
601 | iommu_group_id(group->iommu_group)); | |
602 | /* | |
603 | * XXX An unbound device in a live group is ok, but we'd | |
604 | * really like to avoid the above BUG_ON by preventing other | |
605 | * drivers from binding to it. Once that occurs, we have to | |
606 | * stop the system to maintain isolation. At a minimum, we'd | |
607 | * want a toggle to disable driver auto probe for this device. | |
608 | */ | |
60720a0f AW |
609 | |
610 | mutex_lock(&group->unbound_lock); | |
611 | list_for_each_entry(unbound, | |
612 | &group->unbound_list, unbound_next) { | |
613 | if (dev == unbound->dev) { | |
614 | list_del(&unbound->unbound_next); | |
615 | kfree(unbound); | |
616 | break; | |
617 | } | |
618 | } | |
619 | mutex_unlock(&group->unbound_lock); | |
cba3345c AW |
620 | break; |
621 | } | |
622 | ||
623 | vfio_group_put(group); | |
624 | return NOTIFY_OK; | |
625 | } | |
626 | ||
627 | /** | |
628 | * VFIO driver API | |
629 | */ | |
630 | int vfio_add_group_dev(struct device *dev, | |
631 | const struct vfio_device_ops *ops, void *device_data) | |
632 | { | |
633 | struct iommu_group *iommu_group; | |
634 | struct vfio_group *group; | |
635 | struct vfio_device *device; | |
636 | ||
637 | iommu_group = iommu_group_get(dev); | |
638 | if (!iommu_group) | |
639 | return -EINVAL; | |
640 | ||
641 | group = vfio_group_get_from_iommu(iommu_group); | |
642 | if (!group) { | |
643 | group = vfio_create_group(iommu_group); | |
644 | if (IS_ERR(group)) { | |
645 | iommu_group_put(iommu_group); | |
646 | return PTR_ERR(group); | |
647 | } | |
4a68810d AW |
648 | } else { |
649 | /* | |
650 | * A found vfio_group already holds a reference to the | |
651 | * iommu_group. A created vfio_group keeps the reference. | |
652 | */ | |
653 | iommu_group_put(iommu_group); | |
cba3345c AW |
654 | } |
655 | ||
656 | device = vfio_group_get_device(group, dev); | |
657 | if (device) { | |
658 | WARN(1, "Device %s already exists on group %d\n", | |
659 | dev_name(dev), iommu_group_id(iommu_group)); | |
660 | vfio_device_put(device); | |
661 | vfio_group_put(group); | |
cba3345c AW |
662 | return -EBUSY; |
663 | } | |
664 | ||
665 | device = vfio_group_create_device(group, dev, ops, device_data); | |
666 | if (IS_ERR(device)) { | |
667 | vfio_group_put(group); | |
cba3345c AW |
668 | return PTR_ERR(device); |
669 | } | |
670 | ||
671 | /* | |
4a68810d AW |
672 | * Drop all but the vfio_device reference. The vfio_device holds |
673 | * a reference to the vfio_group, which holds a reference to the | |
674 | * iommu_group. | |
cba3345c AW |
675 | */ |
676 | vfio_group_put(group); | |
677 | ||
678 | return 0; | |
679 | } | |
680 | EXPORT_SYMBOL_GPL(vfio_add_group_dev); | |
681 | ||
44f50716 | 682 | /** |
20f30017 AW |
683 | * Get a reference to the vfio_device for a device. Even if the |
684 | * caller thinks they own the device, they could be racing with a | |
685 | * release call path, so we can't trust drvdata for the shortcut. | |
686 | * Go the long way around, from the iommu_group to the vfio_group | |
687 | * to the vfio_device. | |
44f50716 VMP |
688 | */ |
689 | struct vfio_device *vfio_device_get_from_dev(struct device *dev) | |
690 | { | |
20f30017 AW |
691 | struct iommu_group *iommu_group; |
692 | struct vfio_group *group; | |
693 | struct vfio_device *device; | |
694 | ||
695 | iommu_group = iommu_group_get(dev); | |
696 | if (!iommu_group) | |
697 | return NULL; | |
44f50716 | 698 | |
20f30017 AW |
699 | group = vfio_group_get_from_iommu(iommu_group); |
700 | iommu_group_put(iommu_group); | |
701 | if (!group) | |
702 | return NULL; | |
703 | ||
704 | device = vfio_group_get_device(group, dev); | |
705 | vfio_group_put(group); | |
44f50716 VMP |
706 | |
707 | return device; | |
708 | } | |
709 | EXPORT_SYMBOL_GPL(vfio_device_get_from_dev); | |
710 | ||
4bc94d5d AW |
711 | static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group, |
712 | char *buf) | |
713 | { | |
714 | struct vfio_device *device; | |
715 | ||
716 | mutex_lock(&group->device_lock); | |
717 | list_for_each_entry(device, &group->device_list, group_next) { | |
718 | if (!strcmp(dev_name(device->dev), buf)) { | |
719 | vfio_device_get(device); | |
720 | break; | |
721 | } | |
722 | } | |
723 | mutex_unlock(&group->device_lock); | |
724 | ||
725 | return device; | |
726 | } | |
727 | ||
44f50716 VMP |
728 | /* |
729 | * Caller must hold a reference to the vfio_device | |
730 | */ | |
731 | void *vfio_device_data(struct vfio_device *device) | |
732 | { | |
733 | return device->device_data; | |
734 | } | |
735 | EXPORT_SYMBOL_GPL(vfio_device_data); | |
736 | ||
e014e944 AW |
737 | /* Given a referenced group, check if it contains the device */ |
738 | static bool vfio_dev_present(struct vfio_group *group, struct device *dev) | |
cba3345c | 739 | { |
cba3345c AW |
740 | struct vfio_device *device; |
741 | ||
cba3345c | 742 | device = vfio_group_get_device(group, dev); |
e014e944 | 743 | if (!device) |
cba3345c | 744 | return false; |
cba3345c AW |
745 | |
746 | vfio_device_put(device); | |
cba3345c AW |
747 | return true; |
748 | } | |
749 | ||
750 | /* | |
751 | * Decrement the device reference count and wait for the device to be | |
752 | * removed. Open file descriptors for the device... */ | |
753 | void *vfio_del_group_dev(struct device *dev) | |
754 | { | |
755 | struct vfio_device *device = dev_get_drvdata(dev); | |
756 | struct vfio_group *group = device->group; | |
cba3345c | 757 | void *device_data = device->device_data; |
60720a0f | 758 | struct vfio_unbound_dev *unbound; |
13060b64 | 759 | unsigned int i = 0; |
db7d4d7f AW |
760 | long ret; |
761 | bool interrupted = false; | |
cba3345c | 762 | |
e014e944 AW |
763 | /* |
764 | * The group exists so long as we have a device reference. Get | |
765 | * a group reference and use it to scan for the device going away. | |
766 | */ | |
767 | vfio_group_get(group); | |
768 | ||
60720a0f AW |
769 | /* |
770 | * When the device is removed from the group, the group suddenly | |
771 | * becomes non-viable; the device has a driver (until the unbind | |
772 | * completes), but it's not present in the group. This is bad news | |
773 | * for any external users that need to re-acquire a group reference | |
774 | * in order to match and release their existing reference. To | |
775 | * solve this, we track such devices on the unbound_list to bridge | |
776 | * the gap until they're fully unbound. | |
777 | */ | |
778 | unbound = kzalloc(sizeof(*unbound), GFP_KERNEL); | |
779 | if (unbound) { | |
780 | unbound->dev = dev; | |
781 | mutex_lock(&group->unbound_lock); | |
782 | list_add(&unbound->unbound_next, &group->unbound_list); | |
783 | mutex_unlock(&group->unbound_lock); | |
784 | } | |
785 | WARN_ON(!unbound); | |
786 | ||
cba3345c AW |
787 | vfio_device_put(device); |
788 | ||
13060b64 AW |
789 | /* |
790 | * If the device is still present in the group after the above | |
791 | * 'put', then it is in use and we need to request it from the | |
792 | * bus driver. The driver may in turn need to request the | |
793 | * device from the user. We send the request on an arbitrary | |
794 | * interval with counter to allow the driver to take escalating | |
795 | * measures to release the device if it has the ability to do so. | |
796 | */ | |
797 | do { | |
798 | device = vfio_group_get_device(group, dev); | |
799 | if (!device) | |
800 | break; | |
801 | ||
802 | if (device->ops->request) | |
803 | device->ops->request(device_data, i++); | |
804 | ||
805 | vfio_device_put(device); | |
806 | ||
db7d4d7f AW |
807 | if (interrupted) { |
808 | ret = wait_event_timeout(vfio.release_q, | |
809 | !vfio_dev_present(group, dev), HZ * 10); | |
810 | } else { | |
811 | ret = wait_event_interruptible_timeout(vfio.release_q, | |
812 | !vfio_dev_present(group, dev), HZ * 10); | |
813 | if (ret == -ERESTARTSYS) { | |
814 | interrupted = true; | |
815 | dev_warn(dev, | |
816 | "Device is currently in use, task" | |
817 | " \"%s\" (%d) " | |
818 | "blocked until device is released", | |
819 | current->comm, task_pid_nr(current)); | |
820 | } | |
821 | } | |
822 | } while (ret <= 0); | |
e014e944 AW |
823 | |
824 | vfio_group_put(group); | |
cba3345c | 825 | |
cba3345c AW |
826 | return device_data; |
827 | } | |
828 | EXPORT_SYMBOL_GPL(vfio_del_group_dev); | |
829 | ||
830 | /** | |
831 | * VFIO base fd, /dev/vfio/vfio | |
832 | */ | |
833 | static long vfio_ioctl_check_extension(struct vfio_container *container, | |
834 | unsigned long arg) | |
835 | { | |
0b43c082 | 836 | struct vfio_iommu_driver *driver; |
cba3345c AW |
837 | long ret = 0; |
838 | ||
0b43c082 AW |
839 | down_read(&container->group_lock); |
840 | ||
841 | driver = container->iommu_driver; | |
842 | ||
cba3345c AW |
843 | switch (arg) { |
844 | /* No base extensions yet */ | |
845 | default: | |
846 | /* | |
847 | * If no driver is set, poll all registered drivers for | |
848 | * extensions and return the first positive result. If | |
849 | * a driver is already set, further queries will be passed | |
850 | * only to that driver. | |
851 | */ | |
852 | if (!driver) { | |
853 | mutex_lock(&vfio.iommu_drivers_lock); | |
854 | list_for_each_entry(driver, &vfio.iommu_drivers_list, | |
855 | vfio_next) { | |
856 | if (!try_module_get(driver->ops->owner)) | |
857 | continue; | |
858 | ||
859 | ret = driver->ops->ioctl(NULL, | |
860 | VFIO_CHECK_EXTENSION, | |
861 | arg); | |
862 | module_put(driver->ops->owner); | |
863 | if (ret > 0) | |
864 | break; | |
865 | } | |
866 | mutex_unlock(&vfio.iommu_drivers_lock); | |
867 | } else | |
868 | ret = driver->ops->ioctl(container->iommu_data, | |
869 | VFIO_CHECK_EXTENSION, arg); | |
870 | } | |
871 | ||
0b43c082 AW |
872 | up_read(&container->group_lock); |
873 | ||
cba3345c AW |
874 | return ret; |
875 | } | |
876 | ||
9587f44a | 877 | /* hold write lock on container->group_lock */ |
cba3345c AW |
878 | static int __vfio_container_attach_groups(struct vfio_container *container, |
879 | struct vfio_iommu_driver *driver, | |
880 | void *data) | |
881 | { | |
882 | struct vfio_group *group; | |
883 | int ret = -ENODEV; | |
884 | ||
885 | list_for_each_entry(group, &container->group_list, container_next) { | |
886 | ret = driver->ops->attach_group(data, group->iommu_group); | |
887 | if (ret) | |
888 | goto unwind; | |
889 | } | |
890 | ||
891 | return ret; | |
892 | ||
893 | unwind: | |
894 | list_for_each_entry_continue_reverse(group, &container->group_list, | |
895 | container_next) { | |
896 | driver->ops->detach_group(data, group->iommu_group); | |
897 | } | |
898 | ||
899 | return ret; | |
900 | } | |
901 | ||
902 | static long vfio_ioctl_set_iommu(struct vfio_container *container, | |
903 | unsigned long arg) | |
904 | { | |
905 | struct vfio_iommu_driver *driver; | |
906 | long ret = -ENODEV; | |
907 | ||
9587f44a | 908 | down_write(&container->group_lock); |
cba3345c AW |
909 | |
910 | /* | |
911 | * The container is designed to be an unprivileged interface while | |
912 | * the group can be assigned to specific users. Therefore, only by | |
913 | * adding a group to a container does the user get the privilege of | |
914 | * enabling the iommu, which may allocate finite resources. There | |
915 | * is no unset_iommu, but by removing all the groups from a container, | |
916 | * the container is deprivileged and returns to an unset state. | |
917 | */ | |
918 | if (list_empty(&container->group_list) || container->iommu_driver) { | |
9587f44a | 919 | up_write(&container->group_lock); |
cba3345c AW |
920 | return -EINVAL; |
921 | } | |
922 | ||
923 | mutex_lock(&vfio.iommu_drivers_lock); | |
924 | list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) { | |
925 | void *data; | |
926 | ||
927 | if (!try_module_get(driver->ops->owner)) | |
928 | continue; | |
929 | ||
930 | /* | |
931 | * The arg magic for SET_IOMMU is the same as CHECK_EXTENSION, | |
932 | * so test which iommu driver reported support for this | |
933 | * extension and call open on them. We also pass them the | |
934 | * magic, allowing a single driver to support multiple | |
935 | * interfaces if they'd like. | |
936 | */ | |
937 | if (driver->ops->ioctl(NULL, VFIO_CHECK_EXTENSION, arg) <= 0) { | |
938 | module_put(driver->ops->owner); | |
939 | continue; | |
940 | } | |
941 | ||
942 | /* module reference holds the driver we're working on */ | |
943 | mutex_unlock(&vfio.iommu_drivers_lock); | |
944 | ||
945 | data = driver->ops->open(arg); | |
946 | if (IS_ERR(data)) { | |
947 | ret = PTR_ERR(data); | |
948 | module_put(driver->ops->owner); | |
949 | goto skip_drivers_unlock; | |
950 | } | |
951 | ||
952 | ret = __vfio_container_attach_groups(container, driver, data); | |
953 | if (!ret) { | |
954 | container->iommu_driver = driver; | |
955 | container->iommu_data = data; | |
956 | } else { | |
957 | driver->ops->release(data); | |
958 | module_put(driver->ops->owner); | |
959 | } | |
960 | ||
961 | goto skip_drivers_unlock; | |
962 | } | |
963 | ||
964 | mutex_unlock(&vfio.iommu_drivers_lock); | |
965 | skip_drivers_unlock: | |
9587f44a | 966 | up_write(&container->group_lock); |
cba3345c AW |
967 | |
968 | return ret; | |
969 | } | |
970 | ||
971 | static long vfio_fops_unl_ioctl(struct file *filep, | |
972 | unsigned int cmd, unsigned long arg) | |
973 | { | |
974 | struct vfio_container *container = filep->private_data; | |
975 | struct vfio_iommu_driver *driver; | |
976 | void *data; | |
977 | long ret = -EINVAL; | |
978 | ||
979 | if (!container) | |
980 | return ret; | |
981 | ||
cba3345c AW |
982 | switch (cmd) { |
983 | case VFIO_GET_API_VERSION: | |
984 | ret = VFIO_API_VERSION; | |
985 | break; | |
986 | case VFIO_CHECK_EXTENSION: | |
987 | ret = vfio_ioctl_check_extension(container, arg); | |
988 | break; | |
989 | case VFIO_SET_IOMMU: | |
990 | ret = vfio_ioctl_set_iommu(container, arg); | |
991 | break; | |
992 | default: | |
0b43c082 AW |
993 | down_read(&container->group_lock); |
994 | ||
995 | driver = container->iommu_driver; | |
996 | data = container->iommu_data; | |
997 | ||
cba3345c AW |
998 | if (driver) /* passthrough all unrecognized ioctls */ |
999 | ret = driver->ops->ioctl(data, cmd, arg); | |
0b43c082 AW |
1000 | |
1001 | up_read(&container->group_lock); | |
cba3345c AW |
1002 | } |
1003 | ||
1004 | return ret; | |
1005 | } | |
1006 | ||
1007 | #ifdef CONFIG_COMPAT | |
1008 | static long vfio_fops_compat_ioctl(struct file *filep, | |
1009 | unsigned int cmd, unsigned long arg) | |
1010 | { | |
1011 | arg = (unsigned long)compat_ptr(arg); | |
1012 | return vfio_fops_unl_ioctl(filep, cmd, arg); | |
1013 | } | |
1014 | #endif /* CONFIG_COMPAT */ | |
1015 | ||
1016 | static int vfio_fops_open(struct inode *inode, struct file *filep) | |
1017 | { | |
1018 | struct vfio_container *container; | |
1019 | ||
1020 | container = kzalloc(sizeof(*container), GFP_KERNEL); | |
1021 | if (!container) | |
1022 | return -ENOMEM; | |
1023 | ||
1024 | INIT_LIST_HEAD(&container->group_list); | |
9587f44a | 1025 | init_rwsem(&container->group_lock); |
cba3345c AW |
1026 | kref_init(&container->kref); |
1027 | ||
1028 | filep->private_data = container; | |
1029 | ||
1030 | return 0; | |
1031 | } | |
1032 | ||
1033 | static int vfio_fops_release(struct inode *inode, struct file *filep) | |
1034 | { | |
1035 | struct vfio_container *container = filep->private_data; | |
1036 | ||
1037 | filep->private_data = NULL; | |
1038 | ||
1039 | vfio_container_put(container); | |
1040 | ||
1041 | return 0; | |
1042 | } | |
1043 | ||
1044 | /* | |
1045 | * Once an iommu driver is set, we optionally pass read/write/mmap | |
1046 | * on to the driver, allowing management interfaces beyond ioctl. | |
1047 | */ | |
1048 | static ssize_t vfio_fops_read(struct file *filep, char __user *buf, | |
1049 | size_t count, loff_t *ppos) | |
1050 | { | |
1051 | struct vfio_container *container = filep->private_data; | |
0b43c082 AW |
1052 | struct vfio_iommu_driver *driver; |
1053 | ssize_t ret = -EINVAL; | |
cba3345c | 1054 | |
0b43c082 AW |
1055 | down_read(&container->group_lock); |
1056 | ||
1057 | driver = container->iommu_driver; | |
1058 | if (likely(driver && driver->ops->read)) | |
1059 | ret = driver->ops->read(container->iommu_data, | |
1060 | buf, count, ppos); | |
cba3345c | 1061 | |
0b43c082 AW |
1062 | up_read(&container->group_lock); |
1063 | ||
1064 | return ret; | |
cba3345c AW |
1065 | } |
1066 | ||
1067 | static ssize_t vfio_fops_write(struct file *filep, const char __user *buf, | |
1068 | size_t count, loff_t *ppos) | |
1069 | { | |
1070 | struct vfio_container *container = filep->private_data; | |
0b43c082 AW |
1071 | struct vfio_iommu_driver *driver; |
1072 | ssize_t ret = -EINVAL; | |
cba3345c | 1073 | |
0b43c082 AW |
1074 | down_read(&container->group_lock); |
1075 | ||
1076 | driver = container->iommu_driver; | |
1077 | if (likely(driver && driver->ops->write)) | |
1078 | ret = driver->ops->write(container->iommu_data, | |
1079 | buf, count, ppos); | |
1080 | ||
1081 | up_read(&container->group_lock); | |
cba3345c | 1082 | |
0b43c082 | 1083 | return ret; |
cba3345c AW |
1084 | } |
1085 | ||
1086 | static int vfio_fops_mmap(struct file *filep, struct vm_area_struct *vma) | |
1087 | { | |
1088 | struct vfio_container *container = filep->private_data; | |
0b43c082 AW |
1089 | struct vfio_iommu_driver *driver; |
1090 | int ret = -EINVAL; | |
cba3345c | 1091 | |
0b43c082 | 1092 | down_read(&container->group_lock); |
cba3345c | 1093 | |
0b43c082 AW |
1094 | driver = container->iommu_driver; |
1095 | if (likely(driver && driver->ops->mmap)) | |
1096 | ret = driver->ops->mmap(container->iommu_data, vma); | |
1097 | ||
1098 | up_read(&container->group_lock); | |
1099 | ||
1100 | return ret; | |
cba3345c AW |
1101 | } |
1102 | ||
1103 | static const struct file_operations vfio_fops = { | |
1104 | .owner = THIS_MODULE, | |
1105 | .open = vfio_fops_open, | |
1106 | .release = vfio_fops_release, | |
1107 | .read = vfio_fops_read, | |
1108 | .write = vfio_fops_write, | |
1109 | .unlocked_ioctl = vfio_fops_unl_ioctl, | |
1110 | #ifdef CONFIG_COMPAT | |
1111 | .compat_ioctl = vfio_fops_compat_ioctl, | |
1112 | #endif | |
1113 | .mmap = vfio_fops_mmap, | |
1114 | }; | |
1115 | ||
1116 | /** | |
1117 | * VFIO Group fd, /dev/vfio/$GROUP | |
1118 | */ | |
1119 | static void __vfio_group_unset_container(struct vfio_group *group) | |
1120 | { | |
1121 | struct vfio_container *container = group->container; | |
1122 | struct vfio_iommu_driver *driver; | |
1123 | ||
9587f44a | 1124 | down_write(&container->group_lock); |
cba3345c AW |
1125 | |
1126 | driver = container->iommu_driver; | |
1127 | if (driver) | |
1128 | driver->ops->detach_group(container->iommu_data, | |
1129 | group->iommu_group); | |
1130 | ||
1131 | group->container = NULL; | |
1132 | list_del(&group->container_next); | |
1133 | ||
1134 | /* Detaching the last group deprivileges a container, remove iommu */ | |
1135 | if (driver && list_empty(&container->group_list)) { | |
1136 | driver->ops->release(container->iommu_data); | |
1137 | module_put(driver->ops->owner); | |
1138 | container->iommu_driver = NULL; | |
1139 | container->iommu_data = NULL; | |
1140 | } | |
1141 | ||
9587f44a | 1142 | up_write(&container->group_lock); |
cba3345c AW |
1143 | |
1144 | vfio_container_put(container); | |
1145 | } | |
1146 | ||
1147 | /* | |
1148 | * VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or | |
1149 | * if there was no container to unset. Since the ioctl is called on | |
1150 | * the group, we know that still exists, therefore the only valid | |
1151 | * transition here is 1->0. | |
1152 | */ | |
1153 | static int vfio_group_unset_container(struct vfio_group *group) | |
1154 | { | |
1155 | int users = atomic_cmpxchg(&group->container_users, 1, 0); | |
1156 | ||
1157 | if (!users) | |
1158 | return -EINVAL; | |
1159 | if (users != 1) | |
1160 | return -EBUSY; | |
1161 | ||
1162 | __vfio_group_unset_container(group); | |
1163 | ||
1164 | return 0; | |
1165 | } | |
1166 | ||
1167 | /* | |
1168 | * When removing container users, anything that removes the last user | |
1169 | * implicitly removes the group from the container. That is, if the | |
1170 | * group file descriptor is closed, as well as any device file descriptors, | |
1171 | * the group is free. | |
1172 | */ | |
1173 | static void vfio_group_try_dissolve_container(struct vfio_group *group) | |
1174 | { | |
1175 | if (0 == atomic_dec_if_positive(&group->container_users)) | |
1176 | __vfio_group_unset_container(group); | |
1177 | } | |
1178 | ||
1179 | static int vfio_group_set_container(struct vfio_group *group, int container_fd) | |
1180 | { | |
2903ff01 | 1181 | struct fd f; |
cba3345c AW |
1182 | struct vfio_container *container; |
1183 | struct vfio_iommu_driver *driver; | |
2903ff01 | 1184 | int ret = 0; |
cba3345c AW |
1185 | |
1186 | if (atomic_read(&group->container_users)) | |
1187 | return -EINVAL; | |
1188 | ||
2903ff01 AV |
1189 | f = fdget(container_fd); |
1190 | if (!f.file) | |
cba3345c AW |
1191 | return -EBADF; |
1192 | ||
1193 | /* Sanity check, is this really our fd? */ | |
2903ff01 AV |
1194 | if (f.file->f_op != &vfio_fops) { |
1195 | fdput(f); | |
cba3345c AW |
1196 | return -EINVAL; |
1197 | } | |
1198 | ||
2903ff01 | 1199 | container = f.file->private_data; |
cba3345c AW |
1200 | WARN_ON(!container); /* fget ensures we don't race vfio_release */ |
1201 | ||
9587f44a | 1202 | down_write(&container->group_lock); |
cba3345c AW |
1203 | |
1204 | driver = container->iommu_driver; | |
1205 | if (driver) { | |
1206 | ret = driver->ops->attach_group(container->iommu_data, | |
1207 | group->iommu_group); | |
1208 | if (ret) | |
1209 | goto unlock_out; | |
1210 | } | |
1211 | ||
1212 | group->container = container; | |
1213 | list_add(&group->container_next, &container->group_list); | |
1214 | ||
1215 | /* Get a reference on the container and mark a user within the group */ | |
1216 | vfio_container_get(container); | |
1217 | atomic_inc(&group->container_users); | |
1218 | ||
1219 | unlock_out: | |
9587f44a | 1220 | up_write(&container->group_lock); |
2903ff01 | 1221 | fdput(f); |
cba3345c AW |
1222 | return ret; |
1223 | } | |
1224 | ||
1225 | static bool vfio_group_viable(struct vfio_group *group) | |
1226 | { | |
1227 | return (iommu_group_for_each_dev(group->iommu_group, | |
1228 | group, vfio_dev_viable) == 0); | |
1229 | } | |
1230 | ||
1231 | static const struct file_operations vfio_device_fops; | |
1232 | ||
1233 | static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) | |
1234 | { | |
1235 | struct vfio_device *device; | |
1236 | struct file *filep; | |
4bc94d5d | 1237 | int ret; |
cba3345c AW |
1238 | |
1239 | if (0 == atomic_read(&group->container_users) || | |
1240 | !group->container->iommu_driver || !vfio_group_viable(group)) | |
1241 | return -EINVAL; | |
1242 | ||
4bc94d5d AW |
1243 | device = vfio_device_get_from_name(group, buf); |
1244 | if (!device) | |
1245 | return -ENODEV; | |
cba3345c | 1246 | |
4bc94d5d AW |
1247 | ret = device->ops->open(device->device_data); |
1248 | if (ret) { | |
1249 | vfio_device_put(device); | |
1250 | return ret; | |
1251 | } | |
cba3345c | 1252 | |
4bc94d5d AW |
1253 | /* |
1254 | * We can't use anon_inode_getfd() because we need to modify | |
1255 | * the f_mode flags directly to allow more than just ioctls | |
1256 | */ | |
1257 | ret = get_unused_fd_flags(O_CLOEXEC); | |
1258 | if (ret < 0) { | |
1259 | device->ops->release(device->device_data); | |
1260 | vfio_device_put(device); | |
1261 | return ret; | |
1262 | } | |
cba3345c | 1263 | |
4bc94d5d AW |
1264 | filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops, |
1265 | device, O_RDWR); | |
1266 | if (IS_ERR(filep)) { | |
1267 | put_unused_fd(ret); | |
1268 | ret = PTR_ERR(filep); | |
1269 | device->ops->release(device->device_data); | |
1270 | vfio_device_put(device); | |
1271 | return ret; | |
1272 | } | |
1273 | ||
1274 | /* | |
1275 | * TODO: add an anon_inode interface to do this. | |
1276 | * Appears to be missing by lack of need rather than | |
1277 | * explicitly prevented. Now there's need. | |
1278 | */ | |
1279 | filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); | |
cba3345c | 1280 | |
4bc94d5d | 1281 | atomic_inc(&group->container_users); |
31605deb | 1282 | |
4bc94d5d | 1283 | fd_install(ret, filep); |
cba3345c AW |
1284 | |
1285 | return ret; | |
1286 | } | |
1287 | ||
1288 | static long vfio_group_fops_unl_ioctl(struct file *filep, | |
1289 | unsigned int cmd, unsigned long arg) | |
1290 | { | |
1291 | struct vfio_group *group = filep->private_data; | |
1292 | long ret = -ENOTTY; | |
1293 | ||
1294 | switch (cmd) { | |
1295 | case VFIO_GROUP_GET_STATUS: | |
1296 | { | |
1297 | struct vfio_group_status status; | |
1298 | unsigned long minsz; | |
1299 | ||
1300 | minsz = offsetofend(struct vfio_group_status, flags); | |
1301 | ||
1302 | if (copy_from_user(&status, (void __user *)arg, minsz)) | |
1303 | return -EFAULT; | |
1304 | ||
1305 | if (status.argsz < minsz) | |
1306 | return -EINVAL; | |
1307 | ||
1308 | status.flags = 0; | |
1309 | ||
1310 | if (vfio_group_viable(group)) | |
1311 | status.flags |= VFIO_GROUP_FLAGS_VIABLE; | |
1312 | ||
1313 | if (group->container) | |
1314 | status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET; | |
1315 | ||
1316 | if (copy_to_user((void __user *)arg, &status, minsz)) | |
1317 | return -EFAULT; | |
1318 | ||
1319 | ret = 0; | |
1320 | break; | |
1321 | } | |
1322 | case VFIO_GROUP_SET_CONTAINER: | |
1323 | { | |
1324 | int fd; | |
1325 | ||
1326 | if (get_user(fd, (int __user *)arg)) | |
1327 | return -EFAULT; | |
1328 | ||
1329 | if (fd < 0) | |
1330 | return -EINVAL; | |
1331 | ||
1332 | ret = vfio_group_set_container(group, fd); | |
1333 | break; | |
1334 | } | |
1335 | case VFIO_GROUP_UNSET_CONTAINER: | |
1336 | ret = vfio_group_unset_container(group); | |
1337 | break; | |
1338 | case VFIO_GROUP_GET_DEVICE_FD: | |
1339 | { | |
1340 | char *buf; | |
1341 | ||
1342 | buf = strndup_user((const char __user *)arg, PAGE_SIZE); | |
1343 | if (IS_ERR(buf)) | |
1344 | return PTR_ERR(buf); | |
1345 | ||
1346 | ret = vfio_group_get_device_fd(group, buf); | |
1347 | kfree(buf); | |
1348 | break; | |
1349 | } | |
1350 | } | |
1351 | ||
1352 | return ret; | |
1353 | } | |
1354 | ||
1355 | #ifdef CONFIG_COMPAT | |
1356 | static long vfio_group_fops_compat_ioctl(struct file *filep, | |
1357 | unsigned int cmd, unsigned long arg) | |
1358 | { | |
1359 | arg = (unsigned long)compat_ptr(arg); | |
1360 | return vfio_group_fops_unl_ioctl(filep, cmd, arg); | |
1361 | } | |
1362 | #endif /* CONFIG_COMPAT */ | |
1363 | ||
1364 | static int vfio_group_fops_open(struct inode *inode, struct file *filep) | |
1365 | { | |
1366 | struct vfio_group *group; | |
6d6768c6 | 1367 | int opened; |
cba3345c AW |
1368 | |
1369 | group = vfio_group_get_from_minor(iminor(inode)); | |
1370 | if (!group) | |
1371 | return -ENODEV; | |
1372 | ||
6d6768c6 AW |
1373 | /* Do we need multiple instances of the group open? Seems not. */ |
1374 | opened = atomic_cmpxchg(&group->opened, 0, 1); | |
1375 | if (opened) { | |
1376 | vfio_group_put(group); | |
1377 | return -EBUSY; | |
1378 | } | |
1379 | ||
1380 | /* Is something still in use from a previous open? */ | |
cba3345c | 1381 | if (group->container) { |
6d6768c6 | 1382 | atomic_dec(&group->opened); |
cba3345c AW |
1383 | vfio_group_put(group); |
1384 | return -EBUSY; | |
1385 | } | |
1386 | ||
1387 | filep->private_data = group; | |
1388 | ||
1389 | return 0; | |
1390 | } | |
1391 | ||
1392 | static int vfio_group_fops_release(struct inode *inode, struct file *filep) | |
1393 | { | |
1394 | struct vfio_group *group = filep->private_data; | |
1395 | ||
1396 | filep->private_data = NULL; | |
1397 | ||
1398 | vfio_group_try_dissolve_container(group); | |
1399 | ||
6d6768c6 AW |
1400 | atomic_dec(&group->opened); |
1401 | ||
cba3345c AW |
1402 | vfio_group_put(group); |
1403 | ||
1404 | return 0; | |
1405 | } | |
1406 | ||
1407 | static const struct file_operations vfio_group_fops = { | |
1408 | .owner = THIS_MODULE, | |
1409 | .unlocked_ioctl = vfio_group_fops_unl_ioctl, | |
1410 | #ifdef CONFIG_COMPAT | |
1411 | .compat_ioctl = vfio_group_fops_compat_ioctl, | |
1412 | #endif | |
1413 | .open = vfio_group_fops_open, | |
1414 | .release = vfio_group_fops_release, | |
1415 | }; | |
1416 | ||
1417 | /** | |
1418 | * VFIO Device fd | |
1419 | */ | |
1420 | static int vfio_device_fops_release(struct inode *inode, struct file *filep) | |
1421 | { | |
1422 | struct vfio_device *device = filep->private_data; | |
1423 | ||
1424 | device->ops->release(device->device_data); | |
1425 | ||
1426 | vfio_group_try_dissolve_container(device->group); | |
1427 | ||
1428 | vfio_device_put(device); | |
1429 | ||
1430 | return 0; | |
1431 | } | |
1432 | ||
1433 | static long vfio_device_fops_unl_ioctl(struct file *filep, | |
1434 | unsigned int cmd, unsigned long arg) | |
1435 | { | |
1436 | struct vfio_device *device = filep->private_data; | |
1437 | ||
1438 | if (unlikely(!device->ops->ioctl)) | |
1439 | return -EINVAL; | |
1440 | ||
1441 | return device->ops->ioctl(device->device_data, cmd, arg); | |
1442 | } | |
1443 | ||
1444 | static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf, | |
1445 | size_t count, loff_t *ppos) | |
1446 | { | |
1447 | struct vfio_device *device = filep->private_data; | |
1448 | ||
1449 | if (unlikely(!device->ops->read)) | |
1450 | return -EINVAL; | |
1451 | ||
1452 | return device->ops->read(device->device_data, buf, count, ppos); | |
1453 | } | |
1454 | ||
1455 | static ssize_t vfio_device_fops_write(struct file *filep, | |
1456 | const char __user *buf, | |
1457 | size_t count, loff_t *ppos) | |
1458 | { | |
1459 | struct vfio_device *device = filep->private_data; | |
1460 | ||
1461 | if (unlikely(!device->ops->write)) | |
1462 | return -EINVAL; | |
1463 | ||
1464 | return device->ops->write(device->device_data, buf, count, ppos); | |
1465 | } | |
1466 | ||
1467 | static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma) | |
1468 | { | |
1469 | struct vfio_device *device = filep->private_data; | |
1470 | ||
1471 | if (unlikely(!device->ops->mmap)) | |
1472 | return -EINVAL; | |
1473 | ||
1474 | return device->ops->mmap(device->device_data, vma); | |
1475 | } | |
1476 | ||
1477 | #ifdef CONFIG_COMPAT | |
1478 | static long vfio_device_fops_compat_ioctl(struct file *filep, | |
1479 | unsigned int cmd, unsigned long arg) | |
1480 | { | |
1481 | arg = (unsigned long)compat_ptr(arg); | |
1482 | return vfio_device_fops_unl_ioctl(filep, cmd, arg); | |
1483 | } | |
1484 | #endif /* CONFIG_COMPAT */ | |
1485 | ||
1486 | static const struct file_operations vfio_device_fops = { | |
1487 | .owner = THIS_MODULE, | |
1488 | .release = vfio_device_fops_release, | |
1489 | .read = vfio_device_fops_read, | |
1490 | .write = vfio_device_fops_write, | |
1491 | .unlocked_ioctl = vfio_device_fops_unl_ioctl, | |
1492 | #ifdef CONFIG_COMPAT | |
1493 | .compat_ioctl = vfio_device_fops_compat_ioctl, | |
1494 | #endif | |
1495 | .mmap = vfio_device_fops_mmap, | |
1496 | }; | |
1497 | ||
6cdd9782 AK |
1498 | /** |
1499 | * External user API, exported by symbols to be linked dynamically. | |
1500 | * | |
1501 | * The protocol includes: | |
1502 | * 1. do normal VFIO init operation: | |
1503 | * - opening a new container; | |
1504 | * - attaching group(s) to it; | |
1505 | * - setting an IOMMU driver for a container. | |
1506 | * When IOMMU is set for a container, all groups in it are | |
1507 | * considered ready to use by an external user. | |
1508 | * | |
1509 | * 2. User space passes a group fd to an external user. | |
1510 | * The external user calls vfio_group_get_external_user() | |
1511 | * to verify that: | |
1512 | * - the group is initialized; | |
1513 | * - IOMMU is set for it. | |
1514 | * If both checks passed, vfio_group_get_external_user() | |
1515 | * increments the container user counter to prevent | |
1516 | * the VFIO group from disposal before KVM exits. | |
1517 | * | |
1518 | * 3. The external user calls vfio_external_user_iommu_id() | |
1519 | * to know an IOMMU ID. | |
1520 | * | |
1521 | * 4. When the external KVM finishes, it calls | |
1522 | * vfio_group_put_external_user() to release the VFIO group. | |
1523 | * This call decrements the container user counter. | |
1524 | */ | |
1525 | struct vfio_group *vfio_group_get_external_user(struct file *filep) | |
1526 | { | |
1527 | struct vfio_group *group = filep->private_data; | |
1528 | ||
1529 | if (filep->f_op != &vfio_group_fops) | |
1530 | return ERR_PTR(-EINVAL); | |
1531 | ||
1532 | if (!atomic_inc_not_zero(&group->container_users)) | |
1533 | return ERR_PTR(-EINVAL); | |
1534 | ||
1535 | if (!group->container->iommu_driver || | |
1536 | !vfio_group_viable(group)) { | |
1537 | atomic_dec(&group->container_users); | |
1538 | return ERR_PTR(-EINVAL); | |
1539 | } | |
1540 | ||
1541 | vfio_group_get(group); | |
1542 | ||
1543 | return group; | |
1544 | } | |
1545 | EXPORT_SYMBOL_GPL(vfio_group_get_external_user); | |
1546 | ||
1547 | void vfio_group_put_external_user(struct vfio_group *group) | |
1548 | { | |
1549 | vfio_group_put(group); | |
1550 | vfio_group_try_dissolve_container(group); | |
1551 | } | |
1552 | EXPORT_SYMBOL_GPL(vfio_group_put_external_user); | |
1553 | ||
1554 | int vfio_external_user_iommu_id(struct vfio_group *group) | |
1555 | { | |
1556 | return iommu_group_id(group->iommu_group); | |
1557 | } | |
1558 | EXPORT_SYMBOL_GPL(vfio_external_user_iommu_id); | |
1559 | ||
88d7ab89 AW |
1560 | long vfio_external_check_extension(struct vfio_group *group, unsigned long arg) |
1561 | { | |
1562 | return vfio_ioctl_check_extension(group->container, arg); | |
1563 | } | |
1564 | EXPORT_SYMBOL_GPL(vfio_external_check_extension); | |
1565 | ||
cba3345c AW |
1566 | /** |
1567 | * Module/class support | |
1568 | */ | |
1569 | static char *vfio_devnode(struct device *dev, umode_t *mode) | |
1570 | { | |
1571 | return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev)); | |
1572 | } | |
1573 | ||
d1099901 AW |
1574 | static struct miscdevice vfio_dev = { |
1575 | .minor = VFIO_MINOR, | |
1576 | .name = "vfio", | |
1577 | .fops = &vfio_fops, | |
1578 | .nodename = "vfio/vfio", | |
1579 | .mode = S_IRUGO | S_IWUGO, | |
1580 | }; | |
1581 | ||
cba3345c AW |
1582 | static int __init vfio_init(void) |
1583 | { | |
1584 | int ret; | |
1585 | ||
1586 | idr_init(&vfio.group_idr); | |
1587 | mutex_init(&vfio.group_lock); | |
1588 | mutex_init(&vfio.iommu_drivers_lock); | |
1589 | INIT_LIST_HEAD(&vfio.group_list); | |
1590 | INIT_LIST_HEAD(&vfio.iommu_drivers_list); | |
1591 | init_waitqueue_head(&vfio.release_q); | |
1592 | ||
d1099901 AW |
1593 | ret = misc_register(&vfio_dev); |
1594 | if (ret) { | |
1595 | pr_err("vfio: misc device register failed\n"); | |
1596 | return ret; | |
1597 | } | |
1598 | ||
1599 | /* /dev/vfio/$GROUP */ | |
cba3345c AW |
1600 | vfio.class = class_create(THIS_MODULE, "vfio"); |
1601 | if (IS_ERR(vfio.class)) { | |
1602 | ret = PTR_ERR(vfio.class); | |
1603 | goto err_class; | |
1604 | } | |
1605 | ||
1606 | vfio.class->devnode = vfio_devnode; | |
1607 | ||
d1099901 | 1608 | ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK, "vfio"); |
cba3345c | 1609 | if (ret) |
d1099901 | 1610 | goto err_alloc_chrdev; |
cba3345c | 1611 | |
cba3345c | 1612 | cdev_init(&vfio.group_cdev, &vfio_group_fops); |
d1099901 | 1613 | ret = cdev_add(&vfio.group_cdev, vfio.group_devt, MINORMASK); |
cba3345c | 1614 | if (ret) |
d1099901 | 1615 | goto err_cdev_add; |
cba3345c AW |
1616 | |
1617 | pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); | |
1618 | ||
73fa0d10 AW |
1619 | /* |
1620 | * Attempt to load known iommu-drivers. This gives us a working | |
1621 | * environment without the user needing to explicitly load iommu | |
1622 | * drivers. | |
1623 | */ | |
1624 | request_module_nowait("vfio_iommu_type1"); | |
5ffd229c | 1625 | request_module_nowait("vfio_iommu_spapr_tce"); |
73fa0d10 | 1626 | |
cba3345c AW |
1627 | return 0; |
1628 | ||
d1099901 AW |
1629 | err_cdev_add: |
1630 | unregister_chrdev_region(vfio.group_devt, MINORMASK); | |
1631 | err_alloc_chrdev: | |
cba3345c AW |
1632 | class_destroy(vfio.class); |
1633 | vfio.class = NULL; | |
1634 | err_class: | |
d1099901 | 1635 | misc_deregister(&vfio_dev); |
cba3345c AW |
1636 | return ret; |
1637 | } | |
1638 | ||
1639 | static void __exit vfio_cleanup(void) | |
1640 | { | |
1641 | WARN_ON(!list_empty(&vfio.group_list)); | |
1642 | ||
1643 | idr_destroy(&vfio.group_idr); | |
1644 | cdev_del(&vfio.group_cdev); | |
d1099901 | 1645 | unregister_chrdev_region(vfio.group_devt, MINORMASK); |
cba3345c AW |
1646 | class_destroy(vfio.class); |
1647 | vfio.class = NULL; | |
d1099901 | 1648 | misc_deregister(&vfio_dev); |
cba3345c AW |
1649 | } |
1650 | ||
1651 | module_init(vfio_init); | |
1652 | module_exit(vfio_cleanup); | |
1653 | ||
1654 | MODULE_VERSION(DRIVER_VERSION); | |
1655 | MODULE_LICENSE("GPL v2"); | |
1656 | MODULE_AUTHOR(DRIVER_AUTHOR); | |
1657 | MODULE_DESCRIPTION(DRIVER_DESC); | |
d1099901 AW |
1658 | MODULE_ALIAS_MISCDEV(VFIO_MINOR); |
1659 | MODULE_ALIAS("devname:vfio/vfio"); |