2 * kvm eventfd support - use eventfd objects to signal various KVM events
4 * Copyright 2009 Novell. All Rights Reserved.
7 * Gregory Haskins <ghaskins@novell.com>
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License
11 * as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
23 #include <linux/kvm_host.h>
24 #include <linux/kvm.h>
25 #include <linux/workqueue.h>
26 #include <linux/syscalls.h>
27 #include <linux/wait.h>
28 #include <linux/poll.h>
29 #include <linux/file.h>
30 #include <linux/list.h>
31 #include <linux/eventfd.h>
32 #include <linux/kernel.h>
37 * --------------------------------------------------------------------
38 * irqfd: Allows an fd to be used to inject an interrupt to the guest
40 * Credit goes to Avi Kivity for the original idea.
41 * --------------------------------------------------------------------
46 struct eventfd_ctx
*eventfd
;
48 struct list_head list
;
51 struct work_struct inject
;
52 struct work_struct shutdown
;
55 static struct workqueue_struct
*irqfd_cleanup_wq
;
58 irqfd_inject(struct work_struct
*work
)
60 struct _irqfd
*irqfd
= container_of(work
, struct _irqfd
, inject
);
61 struct kvm
*kvm
= irqfd
->kvm
;
63 kvm_set_irq(kvm
, KVM_USERSPACE_IRQ_SOURCE_ID
, irqfd
->gsi
, 1);
64 kvm_set_irq(kvm
, KVM_USERSPACE_IRQ_SOURCE_ID
, irqfd
->gsi
, 0);
68 * Race-free decouple logic (ordering is critical)
71 irqfd_shutdown(struct work_struct
*work
)
73 struct _irqfd
*irqfd
= container_of(work
, struct _irqfd
, shutdown
);
77 * Synchronize with the wait-queue and unhook ourselves to prevent
80 eventfd_ctx_remove_wait_queue(irqfd
->eventfd
, &irqfd
->wait
, &cnt
);
83 * We know no new events will be scheduled at this point, so block
84 * until all previously outstanding events have completed
86 flush_work(&irqfd
->inject
);
89 * It is now safe to release the object's resources
91 eventfd_ctx_put(irqfd
->eventfd
);
96 /* assumes kvm->irqfds.lock is held */
98 irqfd_is_active(struct _irqfd
*irqfd
)
100 return list_empty(&irqfd
->list
) ? false : true;
104 * Mark the irqfd as inactive and schedule it for removal
106 * assumes kvm->irqfds.lock is held
109 irqfd_deactivate(struct _irqfd
*irqfd
)
111 BUG_ON(!irqfd_is_active(irqfd
));
113 list_del_init(&irqfd
->list
);
115 queue_work(irqfd_cleanup_wq
, &irqfd
->shutdown
);
119 * Called with wqh->lock held and interrupts disabled
122 irqfd_wakeup(wait_queue_t
*wait
, unsigned mode
, int sync
, void *key
)
124 struct _irqfd
*irqfd
= container_of(wait
, struct _irqfd
, wait
);
125 unsigned long flags
= (unsigned long)key
;
128 /* An event has been signaled, inject an interrupt */
129 schedule_work(&irqfd
->inject
);
131 if (flags
& POLLHUP
) {
132 /* The eventfd is closing, detach from KVM */
133 struct kvm
*kvm
= irqfd
->kvm
;
136 spin_lock_irqsave(&kvm
->irqfds
.lock
, flags
);
139 * We must check if someone deactivated the irqfd before
140 * we could acquire the irqfds.lock since the item is
141 * deactivated from the KVM side before it is unhooked from
142 * the wait-queue. If it is already deactivated, we can
143 * simply return knowing the other side will cleanup for us.
144 * We cannot race against the irqfd going away since the
145 * other side is required to acquire wqh->lock, which we hold
147 if (irqfd_is_active(irqfd
))
148 irqfd_deactivate(irqfd
);
150 spin_unlock_irqrestore(&kvm
->irqfds
.lock
, flags
);
157 irqfd_ptable_queue_proc(struct file
*file
, wait_queue_head_t
*wqh
,
160 struct _irqfd
*irqfd
= container_of(pt
, struct _irqfd
, pt
);
161 add_wait_queue(wqh
, &irqfd
->wait
);
165 kvm_irqfd_assign(struct kvm
*kvm
, int fd
, int gsi
)
167 struct _irqfd
*irqfd
, *tmp
;
168 struct file
*file
= NULL
;
169 struct eventfd_ctx
*eventfd
= NULL
;
173 irqfd
= kzalloc(sizeof(*irqfd
), GFP_KERNEL
);
179 INIT_LIST_HEAD(&irqfd
->list
);
180 INIT_WORK(&irqfd
->inject
, irqfd_inject
);
181 INIT_WORK(&irqfd
->shutdown
, irqfd_shutdown
);
183 file
= eventfd_fget(fd
);
189 eventfd
= eventfd_ctx_fileget(file
);
190 if (IS_ERR(eventfd
)) {
191 ret
= PTR_ERR(eventfd
);
195 irqfd
->eventfd
= eventfd
;
198 * Install our own custom wake-up handling so we are notified via
199 * a callback whenever someone signals the underlying eventfd
201 init_waitqueue_func_entry(&irqfd
->wait
, irqfd_wakeup
);
202 init_poll_funcptr(&irqfd
->pt
, irqfd_ptable_queue_proc
);
204 spin_lock_irq(&kvm
->irqfds
.lock
);
207 list_for_each_entry(tmp
, &kvm
->irqfds
.items
, list
) {
208 if (irqfd
->eventfd
!= tmp
->eventfd
)
210 /* This fd is used for another irq already. */
212 spin_unlock_irq(&kvm
->irqfds
.lock
);
216 events
= file
->f_op
->poll(file
, &irqfd
->pt
);
218 list_add_tail(&irqfd
->list
, &kvm
->irqfds
.items
);
219 spin_unlock_irq(&kvm
->irqfds
.lock
);
222 * Check if there was an event already pending on the eventfd
223 * before we registered, and trigger it as if we didn't miss it.
226 schedule_work(&irqfd
->inject
);
229 * do not drop the file until the irqfd is fully initialized, otherwise
230 * we might race against the POLLHUP
237 if (eventfd
&& !IS_ERR(eventfd
))
238 eventfd_ctx_put(eventfd
);
248 kvm_eventfd_init(struct kvm
*kvm
)
250 spin_lock_init(&kvm
->irqfds
.lock
);
251 INIT_LIST_HEAD(&kvm
->irqfds
.items
);
252 INIT_LIST_HEAD(&kvm
->ioeventfds
);
256 * shutdown any irqfd's that match fd+gsi
259 kvm_irqfd_deassign(struct kvm
*kvm
, int fd
, int gsi
)
261 struct _irqfd
*irqfd
, *tmp
;
262 struct eventfd_ctx
*eventfd
;
264 eventfd
= eventfd_ctx_fdget(fd
);
266 return PTR_ERR(eventfd
);
268 spin_lock_irq(&kvm
->irqfds
.lock
);
270 list_for_each_entry_safe(irqfd
, tmp
, &kvm
->irqfds
.items
, list
) {
271 if (irqfd
->eventfd
== eventfd
&& irqfd
->gsi
== gsi
)
272 irqfd_deactivate(irqfd
);
275 spin_unlock_irq(&kvm
->irqfds
.lock
);
276 eventfd_ctx_put(eventfd
);
279 * Block until we know all outstanding shutdown jobs have completed
280 * so that we guarantee there will not be any more interrupts on this
281 * gsi once this deassign function returns.
283 flush_workqueue(irqfd_cleanup_wq
);
289 kvm_irqfd(struct kvm
*kvm
, int fd
, int gsi
, int flags
)
291 if (flags
& KVM_IRQFD_FLAG_DEASSIGN
)
292 return kvm_irqfd_deassign(kvm
, fd
, gsi
);
294 return kvm_irqfd_assign(kvm
, fd
, gsi
);
298 * This function is called as the kvm VM fd is being released. Shutdown all
299 * irqfds that still remain open
302 kvm_irqfd_release(struct kvm
*kvm
)
304 struct _irqfd
*irqfd
, *tmp
;
306 spin_lock_irq(&kvm
->irqfds
.lock
);
308 list_for_each_entry_safe(irqfd
, tmp
, &kvm
->irqfds
.items
, list
)
309 irqfd_deactivate(irqfd
);
311 spin_unlock_irq(&kvm
->irqfds
.lock
);
314 * Block until we know all outstanding shutdown jobs have completed
315 * since we do not take a kvm* reference.
317 flush_workqueue(irqfd_cleanup_wq
);
322 * create a host-wide workqueue for issuing deferred shutdown requests
323 * aggregated from all vm* instances. We need our own isolated single-thread
324 * queue to prevent deadlock against flushing the normal work-queue.
326 static int __init
irqfd_module_init(void)
328 irqfd_cleanup_wq
= create_singlethread_workqueue("kvm-irqfd-cleanup");
329 if (!irqfd_cleanup_wq
)
335 static void __exit
irqfd_module_exit(void)
337 destroy_workqueue(irqfd_cleanup_wq
);
340 module_init(irqfd_module_init
);
341 module_exit(irqfd_module_exit
);
344 * --------------------------------------------------------------------
345 * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
347 * userspace can register a PIO/MMIO address with an eventfd for receiving
348 * notification when the memory has been touched.
349 * --------------------------------------------------------------------
353 struct list_head list
;
356 struct eventfd_ctx
*eventfd
;
358 struct kvm_io_device dev
;
362 static inline struct _ioeventfd
*
363 to_ioeventfd(struct kvm_io_device
*dev
)
365 return container_of(dev
, struct _ioeventfd
, dev
);
369 ioeventfd_release(struct _ioeventfd
*p
)
371 eventfd_ctx_put(p
->eventfd
);
377 ioeventfd_in_range(struct _ioeventfd
*p
, gpa_t addr
, int len
, const void *val
)
381 if (!(addr
== p
->addr
&& len
== p
->length
))
382 /* address-range must be precise for a hit */
386 /* all else equal, wildcard is always a hit */
389 /* otherwise, we have to actually compare the data */
391 BUG_ON(!IS_ALIGNED((unsigned long)val
, len
));
410 return _val
== p
->datamatch
? true : false;
413 /* MMIO/PIO writes trigger an event if the addr/val match */
415 ioeventfd_write(struct kvm_io_device
*this, gpa_t addr
, int len
,
418 struct _ioeventfd
*p
= to_ioeventfd(this);
420 if (!ioeventfd_in_range(p
, addr
, len
, val
))
423 eventfd_signal(p
->eventfd
, 1);
428 * This function is called as KVM is completely shutting down. We do not
429 * need to worry about locking just nuke anything we have as quickly as possible
432 ioeventfd_destructor(struct kvm_io_device
*this)
434 struct _ioeventfd
*p
= to_ioeventfd(this);
436 ioeventfd_release(p
);
439 static const struct kvm_io_device_ops ioeventfd_ops
= {
440 .write
= ioeventfd_write
,
441 .destructor
= ioeventfd_destructor
,
444 /* assumes kvm->slots_lock held */
446 ioeventfd_check_collision(struct kvm
*kvm
, struct _ioeventfd
*p
)
448 struct _ioeventfd
*_p
;
450 list_for_each_entry(_p
, &kvm
->ioeventfds
, list
)
451 if (_p
->addr
== p
->addr
&& _p
->length
== p
->length
&&
452 (_p
->wildcard
|| p
->wildcard
||
453 _p
->datamatch
== p
->datamatch
))
460 kvm_assign_ioeventfd(struct kvm
*kvm
, struct kvm_ioeventfd
*args
)
462 int pio
= args
->flags
& KVM_IOEVENTFD_FLAG_PIO
;
463 enum kvm_bus bus_idx
= pio
? KVM_PIO_BUS
: KVM_MMIO_BUS
;
464 struct _ioeventfd
*p
;
465 struct eventfd_ctx
*eventfd
;
468 /* must be natural-word sized */
479 /* check for range overflow */
480 if (args
->addr
+ args
->len
< args
->addr
)
483 /* check for extra flags that we don't understand */
484 if (args
->flags
& ~KVM_IOEVENTFD_VALID_FLAG_MASK
)
487 eventfd
= eventfd_ctx_fdget(args
->fd
);
489 return PTR_ERR(eventfd
);
491 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
497 INIT_LIST_HEAD(&p
->list
);
498 p
->addr
= args
->addr
;
499 p
->length
= args
->len
;
500 p
->eventfd
= eventfd
;
502 /* The datamatch feature is optional, otherwise this is a wildcard */
503 if (args
->flags
& KVM_IOEVENTFD_FLAG_DATAMATCH
)
504 p
->datamatch
= args
->datamatch
;
508 mutex_lock(&kvm
->slots_lock
);
510 /* Verify that there isnt a match already */
511 if (ioeventfd_check_collision(kvm
, p
)) {
516 kvm_iodevice_init(&p
->dev
, &ioeventfd_ops
);
518 ret
= kvm_io_bus_register_dev(kvm
, bus_idx
, &p
->dev
);
522 list_add_tail(&p
->list
, &kvm
->ioeventfds
);
524 mutex_unlock(&kvm
->slots_lock
);
529 mutex_unlock(&kvm
->slots_lock
);
533 eventfd_ctx_put(eventfd
);
539 kvm_deassign_ioeventfd(struct kvm
*kvm
, struct kvm_ioeventfd
*args
)
541 int pio
= args
->flags
& KVM_IOEVENTFD_FLAG_PIO
;
542 enum kvm_bus bus_idx
= pio
? KVM_PIO_BUS
: KVM_MMIO_BUS
;
543 struct _ioeventfd
*p
, *tmp
;
544 struct eventfd_ctx
*eventfd
;
547 eventfd
= eventfd_ctx_fdget(args
->fd
);
549 return PTR_ERR(eventfd
);
551 mutex_lock(&kvm
->slots_lock
);
553 list_for_each_entry_safe(p
, tmp
, &kvm
->ioeventfds
, list
) {
554 bool wildcard
= !(args
->flags
& KVM_IOEVENTFD_FLAG_DATAMATCH
);
556 if (p
->eventfd
!= eventfd
||
557 p
->addr
!= args
->addr
||
558 p
->length
!= args
->len
||
559 p
->wildcard
!= wildcard
)
562 if (!p
->wildcard
&& p
->datamatch
!= args
->datamatch
)
565 kvm_io_bus_unregister_dev(kvm
, bus_idx
, &p
->dev
);
566 ioeventfd_release(p
);
571 mutex_unlock(&kvm
->slots_lock
);
573 eventfd_ctx_put(eventfd
);
579 kvm_ioeventfd(struct kvm
*kvm
, struct kvm_ioeventfd
*args
)
581 if (args
->flags
& KVM_IOEVENTFD_FLAG_DEASSIGN
)
582 return kvm_deassign_ioeventfd(kvm
, args
);
584 return kvm_assign_ioeventfd(kvm
, args
);