8efb580edfefc4741f275f7d8b8e32eee057bbab
[deliverable/linux.git] / virt / kvm / irq_comm.c
1 /*
2 * irq_comm.c: Common API for in kernel interrupt controller
3 * Copyright (c) 2007, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 * Authors:
18 * Yaozu (Eddie) Dong <Eddie.dong@intel.com>
19 *
20 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
21 */
22
23 #include <linux/kvm_host.h>
24 #include <linux/slab.h>
25 #include <linux/export.h>
26 #include <trace/events/kvm.h>
27
28 #include <asm/msidef.h>
29 #ifdef CONFIG_IA64
30 #include <asm/iosapic.h>
31 #endif
32
33 #include "irq.h"
34
35 #include "ioapic.h"
36
37 static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
38 struct kvm *kvm, int irq_source_id, int level,
39 bool line_status)
40 {
41 #ifdef CONFIG_X86
42 struct kvm_pic *pic = pic_irqchip(kvm);
43 return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level);
44 #else
45 return -1;
46 #endif
47 }
48
49 static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
50 struct kvm *kvm, int irq_source_id, int level,
51 bool line_status)
52 {
53 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
54 return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level,
55 line_status);
56 }
57
58 inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq)
59 {
60 #ifdef CONFIG_IA64
61 return irq->delivery_mode ==
62 (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
63 #else
64 return irq->delivery_mode == APIC_DM_LOWEST;
65 #endif
66 }
67
68 int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
69 struct kvm_lapic_irq *irq, unsigned long *dest_map)
70 {
71 int i, r = -1;
72 struct kvm_vcpu *vcpu, *lowest = NULL;
73
74 if (irq->dest_mode == 0 && irq->dest_id == 0xff &&
75 kvm_is_dm_lowest_prio(irq)) {
76 printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n");
77 irq->delivery_mode = APIC_DM_FIXED;
78 }
79
80 if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map))
81 return r;
82
83 kvm_for_each_vcpu(i, vcpu, kvm) {
84 if (!kvm_apic_present(vcpu))
85 continue;
86
87 if (!kvm_apic_match_dest(vcpu, src, irq->shorthand,
88 irq->dest_id, irq->dest_mode))
89 continue;
90
91 if (!kvm_is_dm_lowest_prio(irq)) {
92 if (r < 0)
93 r = 0;
94 r += kvm_apic_set_irq(vcpu, irq, dest_map);
95 } else if (kvm_lapic_enabled(vcpu)) {
96 if (!lowest)
97 lowest = vcpu;
98 else if (kvm_apic_compare_prio(vcpu, lowest) < 0)
99 lowest = vcpu;
100 }
101 }
102
103 if (lowest)
104 r = kvm_apic_set_irq(lowest, irq, dest_map);
105
106 return r;
107 }
108
109 static inline void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e,
110 struct kvm_lapic_irq *irq)
111 {
112 trace_kvm_msi_set_irq(e->msi.address_lo, e->msi.data);
113
114 irq->dest_id = (e->msi.address_lo &
115 MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
116 irq->vector = (e->msi.data &
117 MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
118 irq->dest_mode = (1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo;
119 irq->trig_mode = (1 << MSI_DATA_TRIGGER_SHIFT) & e->msi.data;
120 irq->delivery_mode = e->msi.data & 0x700;
121 irq->level = 1;
122 irq->shorthand = 0;
123 /* TODO Deal with RH bit of MSI message address */
124 }
125
126 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
127 struct kvm *kvm, int irq_source_id, int level, bool line_status)
128 {
129 struct kvm_lapic_irq irq;
130
131 if (!level)
132 return -1;
133
134 kvm_set_msi_irq(e, &irq);
135
136 return kvm_irq_delivery_to_apic(kvm, NULL, &irq, NULL);
137 }
138
139
140 static int kvm_set_msi_inatomic(struct kvm_kernel_irq_routing_entry *e,
141 struct kvm *kvm)
142 {
143 struct kvm_lapic_irq irq;
144 int r;
145
146 kvm_set_msi_irq(e, &irq);
147
148 if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
149 return r;
150 else
151 return -EWOULDBLOCK;
152 }
153
154 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
155 {
156 struct kvm_kernel_irq_routing_entry route;
157
158 if (!irqchip_in_kernel(kvm) || msi->flags != 0)
159 return -EINVAL;
160
161 route.msi.address_lo = msi->address_lo;
162 route.msi.address_hi = msi->address_hi;
163 route.msi.data = msi->data;
164
165 return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, false);
166 }
167
168 /*
169 * Return value:
170 * < 0 Interrupt was ignored (masked or not delivered for other reasons)
171 * = 0 Interrupt was coalesced (previous irq is still pending)
172 * > 0 Number of CPUs interrupt was delivered to
173 */
174 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
175 bool line_status)
176 {
177 struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS];
178 int ret = -1, i = 0;
179 struct kvm_irq_routing_table *irq_rt;
180
181 trace_kvm_set_irq(irq, level, irq_source_id);
182
183 /* Not possible to detect if the guest uses the PIC or the
184 * IOAPIC. So set the bit in both. The guest will ignore
185 * writes to the unused one.
186 */
187 rcu_read_lock();
188 irq_rt = rcu_dereference(kvm->irq_routing);
189 if (irq < irq_rt->nr_rt_entries)
190 hlist_for_each_entry(e, &irq_rt->map[irq], link)
191 irq_set[i++] = *e;
192 rcu_read_unlock();
193
194 while(i--) {
195 int r;
196 r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level,
197 line_status);
198 if (r < 0)
199 continue;
200
201 ret = r + ((ret < 0) ? 0 : ret);
202 }
203
204 return ret;
205 }
206
207 /*
208 * Deliver an IRQ in an atomic context if we can, or return a failure,
209 * user can retry in a process context.
210 * Return value:
211 * -EWOULDBLOCK - Can't deliver in atomic context: retry in a process context.
212 * Other values - No need to retry.
213 */
214 int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
215 {
216 struct kvm_kernel_irq_routing_entry *e;
217 int ret = -EINVAL;
218 struct kvm_irq_routing_table *irq_rt;
219
220 trace_kvm_set_irq(irq, level, irq_source_id);
221
222 /*
223 * Injection into either PIC or IOAPIC might need to scan all CPUs,
224 * which would need to be retried from thread context; when same GSI
225 * is connected to both PIC and IOAPIC, we'd have to report a
226 * partial failure here.
227 * Since there's no easy way to do this, we only support injecting MSI
228 * which is limited to 1:1 GSI mapping.
229 */
230 rcu_read_lock();
231 irq_rt = rcu_dereference(kvm->irq_routing);
232 if (irq < irq_rt->nr_rt_entries)
233 hlist_for_each_entry(e, &irq_rt->map[irq], link) {
234 if (likely(e->type == KVM_IRQ_ROUTING_MSI))
235 ret = kvm_set_msi_inatomic(e, kvm);
236 else
237 ret = -EWOULDBLOCK;
238 break;
239 }
240 rcu_read_unlock();
241 return ret;
242 }
243
244 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
245 {
246 struct kvm_irq_ack_notifier *kian;
247 int gsi;
248
249 rcu_read_lock();
250 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
251 if (gsi != -1)
252 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
253 link)
254 if (kian->gsi == gsi) {
255 rcu_read_unlock();
256 return true;
257 }
258
259 rcu_read_unlock();
260
261 return false;
262 }
263 EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
264
265 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
266 {
267 struct kvm_irq_ack_notifier *kian;
268 int gsi;
269
270 trace_kvm_ack_irq(irqchip, pin);
271
272 rcu_read_lock();
273 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
274 if (gsi != -1)
275 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
276 link)
277 if (kian->gsi == gsi)
278 kian->irq_acked(kian);
279 rcu_read_unlock();
280 }
281
282 void kvm_register_irq_ack_notifier(struct kvm *kvm,
283 struct kvm_irq_ack_notifier *kian)
284 {
285 mutex_lock(&kvm->irq_lock);
286 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
287 mutex_unlock(&kvm->irq_lock);
288 kvm_ioapic_make_eoibitmap_request(kvm);
289 }
290
291 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
292 struct kvm_irq_ack_notifier *kian)
293 {
294 mutex_lock(&kvm->irq_lock);
295 hlist_del_init_rcu(&kian->link);
296 mutex_unlock(&kvm->irq_lock);
297 synchronize_rcu();
298 kvm_ioapic_make_eoibitmap_request(kvm);
299 }
300
301 int kvm_request_irq_source_id(struct kvm *kvm)
302 {
303 unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
304 int irq_source_id;
305
306 mutex_lock(&kvm->irq_lock);
307 irq_source_id = find_first_zero_bit(bitmap, BITS_PER_LONG);
308
309 if (irq_source_id >= BITS_PER_LONG) {
310 printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n");
311 irq_source_id = -EFAULT;
312 goto unlock;
313 }
314
315 ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
316 #ifdef CONFIG_X86
317 ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID);
318 #endif
319 set_bit(irq_source_id, bitmap);
320 unlock:
321 mutex_unlock(&kvm->irq_lock);
322
323 return irq_source_id;
324 }
325
326 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
327 {
328 ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
329 #ifdef CONFIG_X86
330 ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID);
331 #endif
332
333 mutex_lock(&kvm->irq_lock);
334 if (irq_source_id < 0 ||
335 irq_source_id >= BITS_PER_LONG) {
336 printk(KERN_ERR "kvm: IRQ source ID out of range!\n");
337 goto unlock;
338 }
339 clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
340 if (!irqchip_in_kernel(kvm))
341 goto unlock;
342
343 kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id);
344 #ifdef CONFIG_X86
345 kvm_pic_clear_all(pic_irqchip(kvm), irq_source_id);
346 #endif
347 unlock:
348 mutex_unlock(&kvm->irq_lock);
349 }
350
351 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
352 struct kvm_irq_mask_notifier *kimn)
353 {
354 mutex_lock(&kvm->irq_lock);
355 kimn->irq = irq;
356 hlist_add_head_rcu(&kimn->link, &kvm->mask_notifier_list);
357 mutex_unlock(&kvm->irq_lock);
358 }
359
360 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
361 struct kvm_irq_mask_notifier *kimn)
362 {
363 mutex_lock(&kvm->irq_lock);
364 hlist_del_rcu(&kimn->link);
365 mutex_unlock(&kvm->irq_lock);
366 synchronize_rcu();
367 }
368
369 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
370 bool mask)
371 {
372 struct kvm_irq_mask_notifier *kimn;
373 int gsi;
374
375 rcu_read_lock();
376 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
377 if (gsi != -1)
378 hlist_for_each_entry_rcu(kimn, &kvm->mask_notifier_list, link)
379 if (kimn->irq == gsi)
380 kimn->func(kimn, mask);
381 rcu_read_unlock();
382 }
383
384 void kvm_free_irq_routing(struct kvm *kvm)
385 {
386 /* Called only during vm destruction. Nobody can use the pointer
387 at this stage */
388 kfree(kvm->irq_routing);
389 }
390
391 static int setup_routing_entry(struct kvm_irq_routing_table *rt,
392 struct kvm_kernel_irq_routing_entry *e,
393 const struct kvm_irq_routing_entry *ue)
394 {
395 int r = -EINVAL;
396 int delta;
397 unsigned max_pin;
398 struct kvm_kernel_irq_routing_entry *ei;
399
400 /*
401 * Do not allow GSI to be mapped to the same irqchip more than once.
402 * Allow only one to one mapping between GSI and MSI.
403 */
404 hlist_for_each_entry(ei, &rt->map[ue->gsi], link)
405 if (ei->type == KVM_IRQ_ROUTING_MSI ||
406 ue->type == KVM_IRQ_ROUTING_MSI ||
407 ue->u.irqchip.irqchip == ei->irqchip.irqchip)
408 return r;
409
410 e->gsi = ue->gsi;
411 e->type = ue->type;
412 switch (ue->type) {
413 case KVM_IRQ_ROUTING_IRQCHIP:
414 delta = 0;
415 switch (ue->u.irqchip.irqchip) {
416 case KVM_IRQCHIP_PIC_MASTER:
417 e->set = kvm_set_pic_irq;
418 max_pin = PIC_NUM_PINS;
419 break;
420 case KVM_IRQCHIP_PIC_SLAVE:
421 e->set = kvm_set_pic_irq;
422 max_pin = PIC_NUM_PINS;
423 delta = 8;
424 break;
425 case KVM_IRQCHIP_IOAPIC:
426 max_pin = KVM_IOAPIC_NUM_PINS;
427 e->set = kvm_set_ioapic_irq;
428 break;
429 default:
430 goto out;
431 }
432 e->irqchip.irqchip = ue->u.irqchip.irqchip;
433 e->irqchip.pin = ue->u.irqchip.pin + delta;
434 if (e->irqchip.pin >= max_pin)
435 goto out;
436 rt->chip[ue->u.irqchip.irqchip][e->irqchip.pin] = ue->gsi;
437 break;
438 case KVM_IRQ_ROUTING_MSI:
439 e->set = kvm_set_msi;
440 e->msi.address_lo = ue->u.msi.address_lo;
441 e->msi.address_hi = ue->u.msi.address_hi;
442 e->msi.data = ue->u.msi.data;
443 break;
444 default:
445 goto out;
446 }
447
448 hlist_add_head(&e->link, &rt->map[e->gsi]);
449 r = 0;
450 out:
451 return r;
452 }
453
454
455 int kvm_set_irq_routing(struct kvm *kvm,
456 const struct kvm_irq_routing_entry *ue,
457 unsigned nr,
458 unsigned flags)
459 {
460 struct kvm_irq_routing_table *new, *old;
461 u32 i, j, nr_rt_entries = 0;
462 int r;
463
464 for (i = 0; i < nr; ++i) {
465 if (ue[i].gsi >= KVM_MAX_IRQ_ROUTES)
466 return -EINVAL;
467 nr_rt_entries = max(nr_rt_entries, ue[i].gsi);
468 }
469
470 nr_rt_entries += 1;
471
472 new = kzalloc(sizeof(*new) + (nr_rt_entries * sizeof(struct hlist_head))
473 + (nr * sizeof(struct kvm_kernel_irq_routing_entry)),
474 GFP_KERNEL);
475
476 if (!new)
477 return -ENOMEM;
478
479 new->rt_entries = (void *)&new->map[nr_rt_entries];
480
481 new->nr_rt_entries = nr_rt_entries;
482 for (i = 0; i < 3; i++)
483 for (j = 0; j < KVM_IOAPIC_NUM_PINS; j++)
484 new->chip[i][j] = -1;
485
486 for (i = 0; i < nr; ++i) {
487 r = -EINVAL;
488 if (ue->flags)
489 goto out;
490 r = setup_routing_entry(new, &new->rt_entries[i], ue);
491 if (r)
492 goto out;
493 ++ue;
494 }
495
496 mutex_lock(&kvm->irq_lock);
497 old = kvm->irq_routing;
498 kvm_irq_routing_update(kvm, new);
499 mutex_unlock(&kvm->irq_lock);
500
501 synchronize_rcu();
502
503 new = old;
504 r = 0;
505
506 out:
507 kfree(new);
508 return r;
509 }
510
511 #define IOAPIC_ROUTING_ENTRY(irq) \
512 { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
513 .u.irqchip.irqchip = KVM_IRQCHIP_IOAPIC, .u.irqchip.pin = (irq) }
514 #define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)
515
516 #ifdef CONFIG_X86
517 # define PIC_ROUTING_ENTRY(irq) \
518 { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
519 .u.irqchip.irqchip = SELECT_PIC(irq), .u.irqchip.pin = (irq) % 8 }
520 # define ROUTING_ENTRY2(irq) \
521 IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
522 #else
523 # define ROUTING_ENTRY2(irq) \
524 IOAPIC_ROUTING_ENTRY(irq)
525 #endif
526
527 static const struct kvm_irq_routing_entry default_routing[] = {
528 ROUTING_ENTRY2(0), ROUTING_ENTRY2(1),
529 ROUTING_ENTRY2(2), ROUTING_ENTRY2(3),
530 ROUTING_ENTRY2(4), ROUTING_ENTRY2(5),
531 ROUTING_ENTRY2(6), ROUTING_ENTRY2(7),
532 ROUTING_ENTRY2(8), ROUTING_ENTRY2(9),
533 ROUTING_ENTRY2(10), ROUTING_ENTRY2(11),
534 ROUTING_ENTRY2(12), ROUTING_ENTRY2(13),
535 ROUTING_ENTRY2(14), ROUTING_ENTRY2(15),
536 ROUTING_ENTRY1(16), ROUTING_ENTRY1(17),
537 ROUTING_ENTRY1(18), ROUTING_ENTRY1(19),
538 ROUTING_ENTRY1(20), ROUTING_ENTRY1(21),
539 ROUTING_ENTRY1(22), ROUTING_ENTRY1(23),
540 #ifdef CONFIG_IA64
541 ROUTING_ENTRY1(24), ROUTING_ENTRY1(25),
542 ROUTING_ENTRY1(26), ROUTING_ENTRY1(27),
543 ROUTING_ENTRY1(28), ROUTING_ENTRY1(29),
544 ROUTING_ENTRY1(30), ROUTING_ENTRY1(31),
545 ROUTING_ENTRY1(32), ROUTING_ENTRY1(33),
546 ROUTING_ENTRY1(34), ROUTING_ENTRY1(35),
547 ROUTING_ENTRY1(36), ROUTING_ENTRY1(37),
548 ROUTING_ENTRY1(38), ROUTING_ENTRY1(39),
549 ROUTING_ENTRY1(40), ROUTING_ENTRY1(41),
550 ROUTING_ENTRY1(42), ROUTING_ENTRY1(43),
551 ROUTING_ENTRY1(44), ROUTING_ENTRY1(45),
552 ROUTING_ENTRY1(46), ROUTING_ENTRY1(47),
553 #endif
554 };
555
556 int kvm_setup_default_irq_routing(struct kvm *kvm)
557 {
558 return kvm_set_irq_routing(kvm, default_routing,
559 ARRAY_SIZE(default_routing), 0);
560 }
This page took 0.04263 seconds and 5 git commands to generate.