Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / drivers / xen / events.c
1 /*
2 * Xen event channels
3 *
4 * Xen models interrupts with abstract event channels. Because each
5 * domain gets 1024 event channels, but NR_IRQ is not that large, we
6 * must dynamically map irqs<->event channels. The event channels
7 * interface with the rest of the kernel by defining a xen interrupt
8 * chip. When an event is received, it is mapped to an irq and sent
9 * through the normal interrupt processing path.
10 *
11 * There are four kinds of events which can be mapped to an event
12 * channel:
13 *
14 * 1. Inter-domain notifications. This includes all the virtual
15 * device events, since they're driven by front-ends in another domain
16 * (typically dom0).
17 * 2. VIRQs, typically used for timers. These are per-cpu events.
18 * 3. IPIs.
19 * 4. PIRQs - Hardware interrupts.
20 *
21 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22 */
23
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
29 #include <linux/bootmem.h>
30 #include <linux/slab.h>
31 #include <linux/irqnr.h>
32 #include <linux/pci.h>
33
34 #include <asm/desc.h>
35 #include <asm/ptrace.h>
36 #include <asm/irq.h>
37 #include <asm/idle.h>
38 #include <asm/io_apic.h>
39 #include <asm/sync_bitops.h>
40 #include <asm/xen/pci.h>
41 #include <asm/xen/hypercall.h>
42 #include <asm/xen/hypervisor.h>
43
44 #include <xen/xen.h>
45 #include <xen/hvm.h>
46 #include <xen/xen-ops.h>
47 #include <xen/events.h>
48 #include <xen/interface/xen.h>
49 #include <xen/interface/event_channel.h>
50 #include <xen/interface/hvm/hvm_op.h>
51 #include <xen/interface/hvm/params.h>
52
53 /*
54 * This lock protects updates to the following mapping and reference-count
55 * arrays. The lock does not need to be acquired to read the mapping tables.
56 */
57 static DEFINE_SPINLOCK(irq_mapping_update_lock);
58
59 static LIST_HEAD(xen_irq_list_head);
60
61 /* IRQ <-> VIRQ mapping. */
62 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
63
64 /* IRQ <-> IPI mapping */
65 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
66
67 /* Interrupt types. */
68 enum xen_irq_type {
69 IRQT_UNBOUND = 0,
70 IRQT_PIRQ,
71 IRQT_VIRQ,
72 IRQT_IPI,
73 IRQT_EVTCHN
74 };
75
76 /*
77 * Packed IRQ information:
78 * type - enum xen_irq_type
79 * event channel - irq->event channel mapping
80 * cpu - cpu this event channel is bound to
81 * index - type-specific information:
82 * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
83 * guest, or GSI (real passthrough IRQ) of the device.
84 * VIRQ - virq number
85 * IPI - IPI vector
86 * EVTCHN -
87 */
88 struct irq_info
89 {
90 struct list_head list;
91 enum xen_irq_type type; /* type */
92 unsigned irq;
93 unsigned short evtchn; /* event channel */
94 unsigned short cpu; /* cpu bound */
95
96 union {
97 unsigned short virq;
98 enum ipi_vector ipi;
99 struct {
100 unsigned short pirq;
101 unsigned short gsi;
102 unsigned char vector;
103 unsigned char flags;
104 uint16_t domid;
105 } pirq;
106 } u;
107 };
108 #define PIRQ_NEEDS_EOI (1 << 0)
109 #define PIRQ_SHAREABLE (1 << 1)
110
111 static int *evtchn_to_irq;
112
113 static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG],
114 cpu_evtchn_mask);
115
116 /* Xen will never allocate port zero for any purpose. */
117 #define VALID_EVTCHN(chn) ((chn) != 0)
118
119 static struct irq_chip xen_dynamic_chip;
120 static struct irq_chip xen_percpu_chip;
121 static struct irq_chip xen_pirq_chip;
122 static void enable_dynirq(struct irq_data *data);
123 static void disable_dynirq(struct irq_data *data);
124
125 /* Get info for IRQ */
126 static struct irq_info *info_for_irq(unsigned irq)
127 {
128 return irq_get_handler_data(irq);
129 }
130
131 /* Constructors for packed IRQ information. */
132 static void xen_irq_info_common_init(struct irq_info *info,
133 unsigned irq,
134 enum xen_irq_type type,
135 unsigned short evtchn,
136 unsigned short cpu)
137 {
138
139 BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
140
141 info->type = type;
142 info->irq = irq;
143 info->evtchn = evtchn;
144 info->cpu = cpu;
145
146 evtchn_to_irq[evtchn] = irq;
147 }
148
149 static void xen_irq_info_evtchn_init(unsigned irq,
150 unsigned short evtchn)
151 {
152 struct irq_info *info = info_for_irq(irq);
153
154 xen_irq_info_common_init(info, irq, IRQT_EVTCHN, evtchn, 0);
155 }
156
157 static void xen_irq_info_ipi_init(unsigned cpu,
158 unsigned irq,
159 unsigned short evtchn,
160 enum ipi_vector ipi)
161 {
162 struct irq_info *info = info_for_irq(irq);
163
164 xen_irq_info_common_init(info, irq, IRQT_IPI, evtchn, 0);
165
166 info->u.ipi = ipi;
167
168 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
169 }
170
171 static void xen_irq_info_virq_init(unsigned cpu,
172 unsigned irq,
173 unsigned short evtchn,
174 unsigned short virq)
175 {
176 struct irq_info *info = info_for_irq(irq);
177
178 xen_irq_info_common_init(info, irq, IRQT_VIRQ, evtchn, 0);
179
180 info->u.virq = virq;
181
182 per_cpu(virq_to_irq, cpu)[virq] = irq;
183 }
184
185 static void xen_irq_info_pirq_init(unsigned irq,
186 unsigned short evtchn,
187 unsigned short pirq,
188 unsigned short gsi,
189 unsigned short vector,
190 uint16_t domid,
191 unsigned char flags)
192 {
193 struct irq_info *info = info_for_irq(irq);
194
195 xen_irq_info_common_init(info, irq, IRQT_PIRQ, evtchn, 0);
196
197 info->u.pirq.pirq = pirq;
198 info->u.pirq.gsi = gsi;
199 info->u.pirq.vector = vector;
200 info->u.pirq.domid = domid;
201 info->u.pirq.flags = flags;
202 }
203
204 /*
205 * Accessors for packed IRQ information.
206 */
207 static unsigned int evtchn_from_irq(unsigned irq)
208 {
209 if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
210 return 0;
211
212 return info_for_irq(irq)->evtchn;
213 }
214
215 unsigned irq_from_evtchn(unsigned int evtchn)
216 {
217 return evtchn_to_irq[evtchn];
218 }
219 EXPORT_SYMBOL_GPL(irq_from_evtchn);
220
221 static enum ipi_vector ipi_from_irq(unsigned irq)
222 {
223 struct irq_info *info = info_for_irq(irq);
224
225 BUG_ON(info == NULL);
226 BUG_ON(info->type != IRQT_IPI);
227
228 return info->u.ipi;
229 }
230
231 static unsigned virq_from_irq(unsigned irq)
232 {
233 struct irq_info *info = info_for_irq(irq);
234
235 BUG_ON(info == NULL);
236 BUG_ON(info->type != IRQT_VIRQ);
237
238 return info->u.virq;
239 }
240
241 static unsigned pirq_from_irq(unsigned irq)
242 {
243 struct irq_info *info = info_for_irq(irq);
244
245 BUG_ON(info == NULL);
246 BUG_ON(info->type != IRQT_PIRQ);
247
248 return info->u.pirq.pirq;
249 }
250
251 static enum xen_irq_type type_from_irq(unsigned irq)
252 {
253 return info_for_irq(irq)->type;
254 }
255
256 static unsigned cpu_from_irq(unsigned irq)
257 {
258 return info_for_irq(irq)->cpu;
259 }
260
261 static unsigned int cpu_from_evtchn(unsigned int evtchn)
262 {
263 int irq = evtchn_to_irq[evtchn];
264 unsigned ret = 0;
265
266 if (irq != -1)
267 ret = cpu_from_irq(irq);
268
269 return ret;
270 }
271
272 static bool pirq_needs_eoi(unsigned irq)
273 {
274 struct irq_info *info = info_for_irq(irq);
275
276 BUG_ON(info->type != IRQT_PIRQ);
277
278 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
279 }
280
281 static inline unsigned long active_evtchns(unsigned int cpu,
282 struct shared_info *sh,
283 unsigned int idx)
284 {
285 return (sh->evtchn_pending[idx] &
286 per_cpu(cpu_evtchn_mask, cpu)[idx] &
287 ~sh->evtchn_mask[idx]);
288 }
289
290 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
291 {
292 int irq = evtchn_to_irq[chn];
293
294 BUG_ON(irq == -1);
295 #ifdef CONFIG_SMP
296 cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
297 #endif
298
299 clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_from_irq(irq)));
300 set_bit(chn, per_cpu(cpu_evtchn_mask, cpu));
301
302 info_for_irq(irq)->cpu = cpu;
303 }
304
305 static void init_evtchn_cpu_bindings(void)
306 {
307 int i;
308 #ifdef CONFIG_SMP
309 struct irq_info *info;
310
311 /* By default all event channels notify CPU#0. */
312 list_for_each_entry(info, &xen_irq_list_head, list) {
313 struct irq_desc *desc = irq_to_desc(info->irq);
314 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
315 }
316 #endif
317
318 for_each_possible_cpu(i)
319 memset(per_cpu(cpu_evtchn_mask, i),
320 (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
321 }
322
323 static inline void clear_evtchn(int port)
324 {
325 struct shared_info *s = HYPERVISOR_shared_info;
326 sync_clear_bit(port, &s->evtchn_pending[0]);
327 }
328
329 static inline void set_evtchn(int port)
330 {
331 struct shared_info *s = HYPERVISOR_shared_info;
332 sync_set_bit(port, &s->evtchn_pending[0]);
333 }
334
335 static inline int test_evtchn(int port)
336 {
337 struct shared_info *s = HYPERVISOR_shared_info;
338 return sync_test_bit(port, &s->evtchn_pending[0]);
339 }
340
341
342 /**
343 * notify_remote_via_irq - send event to remote end of event channel via irq
344 * @irq: irq of event channel to send event to
345 *
346 * Unlike notify_remote_via_evtchn(), this is safe to use across
347 * save/restore. Notifications on a broken connection are silently
348 * dropped.
349 */
350 void notify_remote_via_irq(int irq)
351 {
352 int evtchn = evtchn_from_irq(irq);
353
354 if (VALID_EVTCHN(evtchn))
355 notify_remote_via_evtchn(evtchn);
356 }
357 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
358
359 static void mask_evtchn(int port)
360 {
361 struct shared_info *s = HYPERVISOR_shared_info;
362 sync_set_bit(port, &s->evtchn_mask[0]);
363 }
364
365 static void unmask_evtchn(int port)
366 {
367 struct shared_info *s = HYPERVISOR_shared_info;
368 unsigned int cpu = get_cpu();
369
370 BUG_ON(!irqs_disabled());
371
372 /* Slow path (hypercall) if this is a non-local port. */
373 if (unlikely(cpu != cpu_from_evtchn(port))) {
374 struct evtchn_unmask unmask = { .port = port };
375 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
376 } else {
377 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
378
379 sync_clear_bit(port, &s->evtchn_mask[0]);
380
381 /*
382 * The following is basically the equivalent of
383 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
384 * the interrupt edge' if the channel is masked.
385 */
386 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
387 !sync_test_and_set_bit(port / BITS_PER_LONG,
388 &vcpu_info->evtchn_pending_sel))
389 vcpu_info->evtchn_upcall_pending = 1;
390 }
391
392 put_cpu();
393 }
394
395 static void xen_irq_init(unsigned irq)
396 {
397 struct irq_info *info;
398 #ifdef CONFIG_SMP
399 struct irq_desc *desc = irq_to_desc(irq);
400
401 /* By default all event channels notify CPU#0. */
402 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
403 #endif
404
405 info = kzalloc(sizeof(*info), GFP_KERNEL);
406 if (info == NULL)
407 panic("Unable to allocate metadata for IRQ%d\n", irq);
408
409 info->type = IRQT_UNBOUND;
410
411 irq_set_handler_data(irq, info);
412
413 list_add_tail(&info->list, &xen_irq_list_head);
414 }
415
416 static int __must_check xen_allocate_irq_dynamic(void)
417 {
418 int first = 0;
419 int irq;
420
421 #ifdef CONFIG_X86_IO_APIC
422 /*
423 * For an HVM guest or domain 0 which see "real" (emulated or
424 * actual respectively) GSIs we allocate dynamic IRQs
425 * e.g. those corresponding to event channels or MSIs
426 * etc. from the range above those "real" GSIs to avoid
427 * collisions.
428 */
429 if (xen_initial_domain() || xen_hvm_domain())
430 first = get_nr_irqs_gsi();
431 #endif
432
433 irq = irq_alloc_desc_from(first, -1);
434
435 xen_irq_init(irq);
436
437 return irq;
438 }
439
440 static int __must_check xen_allocate_irq_gsi(unsigned gsi)
441 {
442 int irq;
443
444 /*
445 * A PV guest has no concept of a GSI (since it has no ACPI
446 * nor access to/knowledge of the physical APICs). Therefore
447 * all IRQs are dynamically allocated from the entire IRQ
448 * space.
449 */
450 if (xen_pv_domain() && !xen_initial_domain())
451 return xen_allocate_irq_dynamic();
452
453 /* Legacy IRQ descriptors are already allocated by the arch. */
454 if (gsi < NR_IRQS_LEGACY)
455 irq = gsi;
456 else
457 irq = irq_alloc_desc_at(gsi, -1);
458
459 xen_irq_init(irq);
460
461 return irq;
462 }
463
464 static void xen_free_irq(unsigned irq)
465 {
466 struct irq_info *info = irq_get_handler_data(irq);
467
468 list_del(&info->list);
469
470 irq_set_handler_data(irq, NULL);
471
472 kfree(info);
473
474 /* Legacy IRQ descriptors are managed by the arch. */
475 if (irq < NR_IRQS_LEGACY)
476 return;
477
478 irq_free_desc(irq);
479 }
480
481 static void pirq_query_unmask(int irq)
482 {
483 struct physdev_irq_status_query irq_status;
484 struct irq_info *info = info_for_irq(irq);
485
486 BUG_ON(info->type != IRQT_PIRQ);
487
488 irq_status.irq = pirq_from_irq(irq);
489 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
490 irq_status.flags = 0;
491
492 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
493 if (irq_status.flags & XENIRQSTAT_needs_eoi)
494 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
495 }
496
497 static bool probing_irq(int irq)
498 {
499 struct irq_desc *desc = irq_to_desc(irq);
500
501 return desc && desc->action == NULL;
502 }
503
504 static void eoi_pirq(struct irq_data *data)
505 {
506 int evtchn = evtchn_from_irq(data->irq);
507 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
508 int rc = 0;
509
510 irq_move_irq(data);
511
512 if (VALID_EVTCHN(evtchn))
513 clear_evtchn(evtchn);
514
515 if (pirq_needs_eoi(data->irq)) {
516 rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
517 WARN_ON(rc);
518 }
519 }
520
521 static void mask_ack_pirq(struct irq_data *data)
522 {
523 disable_dynirq(data);
524 eoi_pirq(data);
525 }
526
527 static unsigned int __startup_pirq(unsigned int irq)
528 {
529 struct evtchn_bind_pirq bind_pirq;
530 struct irq_info *info = info_for_irq(irq);
531 int evtchn = evtchn_from_irq(irq);
532 int rc;
533
534 BUG_ON(info->type != IRQT_PIRQ);
535
536 if (VALID_EVTCHN(evtchn))
537 goto out;
538
539 bind_pirq.pirq = pirq_from_irq(irq);
540 /* NB. We are happy to share unless we are probing. */
541 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
542 BIND_PIRQ__WILL_SHARE : 0;
543 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
544 if (rc != 0) {
545 if (!probing_irq(irq))
546 printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
547 irq);
548 return 0;
549 }
550 evtchn = bind_pirq.port;
551
552 pirq_query_unmask(irq);
553
554 evtchn_to_irq[evtchn] = irq;
555 bind_evtchn_to_cpu(evtchn, 0);
556 info->evtchn = evtchn;
557
558 out:
559 unmask_evtchn(evtchn);
560 eoi_pirq(irq_get_irq_data(irq));
561
562 return 0;
563 }
564
565 static unsigned int startup_pirq(struct irq_data *data)
566 {
567 return __startup_pirq(data->irq);
568 }
569
570 static void shutdown_pirq(struct irq_data *data)
571 {
572 struct evtchn_close close;
573 unsigned int irq = data->irq;
574 struct irq_info *info = info_for_irq(irq);
575 int evtchn = evtchn_from_irq(irq);
576
577 BUG_ON(info->type != IRQT_PIRQ);
578
579 if (!VALID_EVTCHN(evtchn))
580 return;
581
582 mask_evtchn(evtchn);
583
584 close.port = evtchn;
585 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
586 BUG();
587
588 bind_evtchn_to_cpu(evtchn, 0);
589 evtchn_to_irq[evtchn] = -1;
590 info->evtchn = 0;
591 }
592
593 static void enable_pirq(struct irq_data *data)
594 {
595 startup_pirq(data);
596 }
597
598 static void disable_pirq(struct irq_data *data)
599 {
600 disable_dynirq(data);
601 }
602
603 static int find_irq_by_gsi(unsigned gsi)
604 {
605 struct irq_info *info;
606
607 list_for_each_entry(info, &xen_irq_list_head, list) {
608 if (info->type != IRQT_PIRQ)
609 continue;
610
611 if (info->u.pirq.gsi == gsi)
612 return info->irq;
613 }
614
615 return -1;
616 }
617
618 /*
619 * Do not make any assumptions regarding the relationship between the
620 * IRQ number returned here and the Xen pirq argument.
621 *
622 * Note: We don't assign an event channel until the irq actually started
623 * up. Return an existing irq if we've already got one for the gsi.
624 *
625 * Shareable implies level triggered, not shareable implies edge
626 * triggered here.
627 */
628 int xen_bind_pirq_gsi_to_irq(unsigned gsi,
629 unsigned pirq, int shareable, char *name)
630 {
631 int irq = -1;
632 struct physdev_irq irq_op;
633
634 spin_lock(&irq_mapping_update_lock);
635
636 irq = find_irq_by_gsi(gsi);
637 if (irq != -1) {
638 printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
639 irq, gsi);
640 goto out; /* XXX need refcount? */
641 }
642
643 irq = xen_allocate_irq_gsi(gsi);
644 if (irq < 0)
645 goto out;
646
647 irq_op.irq = irq;
648 irq_op.vector = 0;
649
650 /* Only the privileged domain can do this. For non-priv, the pcifront
651 * driver provides a PCI bus that does the call to do exactly
652 * this in the priv domain. */
653 if (xen_initial_domain() &&
654 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
655 xen_free_irq(irq);
656 irq = -ENOSPC;
657 goto out;
658 }
659
660 xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF,
661 shareable ? PIRQ_SHAREABLE : 0);
662
663 pirq_query_unmask(irq);
664 /* We try to use the handler with the appropriate semantic for the
665 * type of interrupt: if the interrupt is an edge triggered
666 * interrupt we use handle_edge_irq.
667 *
668 * On the other hand if the interrupt is level triggered we use
669 * handle_fasteoi_irq like the native code does for this kind of
670 * interrupts.
671 *
672 * Depending on the Xen version, pirq_needs_eoi might return true
673 * not only for level triggered interrupts but for edge triggered
674 * interrupts too. In any case Xen always honors the eoi mechanism,
675 * not injecting any more pirqs of the same kind if the first one
676 * hasn't received an eoi yet. Therefore using the fasteoi handler
677 * is the right choice either way.
678 */
679 if (shareable)
680 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
681 handle_fasteoi_irq, name);
682 else
683 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
684 handle_edge_irq, name);
685
686 out:
687 spin_unlock(&irq_mapping_update_lock);
688
689 return irq;
690 }
691
692 #ifdef CONFIG_PCI_MSI
693 int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
694 {
695 int rc;
696 struct physdev_get_free_pirq op_get_free_pirq;
697
698 op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
699 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
700
701 WARN_ONCE(rc == -ENOSYS,
702 "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
703
704 return rc ? -1 : op_get_free_pirq.pirq;
705 }
706
707 int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
708 int pirq, int vector, const char *name,
709 domid_t domid)
710 {
711 int irq, ret;
712
713 spin_lock(&irq_mapping_update_lock);
714
715 irq = xen_allocate_irq_dynamic();
716 if (irq == -1)
717 goto out;
718
719 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq,
720 name);
721
722 xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0);
723 ret = irq_set_msi_desc(irq, msidesc);
724 if (ret < 0)
725 goto error_irq;
726 out:
727 spin_unlock(&irq_mapping_update_lock);
728 return irq;
729 error_irq:
730 spin_unlock(&irq_mapping_update_lock);
731 xen_free_irq(irq);
732 return -1;
733 }
734 #endif
735
736 int xen_destroy_irq(int irq)
737 {
738 struct irq_desc *desc;
739 struct physdev_unmap_pirq unmap_irq;
740 struct irq_info *info = info_for_irq(irq);
741 int rc = -ENOENT;
742
743 spin_lock(&irq_mapping_update_lock);
744
745 desc = irq_to_desc(irq);
746 if (!desc)
747 goto out;
748
749 if (xen_initial_domain()) {
750 unmap_irq.pirq = info->u.pirq.pirq;
751 unmap_irq.domid = info->u.pirq.domid;
752 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
753 /* If another domain quits without making the pci_disable_msix
754 * call, the Xen hypervisor takes care of freeing the PIRQs
755 * (free_domain_pirqs).
756 */
757 if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
758 printk(KERN_INFO "domain %d does not have %d anymore\n",
759 info->u.pirq.domid, info->u.pirq.pirq);
760 else if (rc) {
761 printk(KERN_WARNING "unmap irq failed %d\n", rc);
762 goto out;
763 }
764 }
765
766 xen_free_irq(irq);
767
768 out:
769 spin_unlock(&irq_mapping_update_lock);
770 return rc;
771 }
772
773 int xen_irq_from_pirq(unsigned pirq)
774 {
775 int irq;
776
777 struct irq_info *info;
778
779 spin_lock(&irq_mapping_update_lock);
780
781 list_for_each_entry(info, &xen_irq_list_head, list) {
782 if (info == NULL || info->type != IRQT_PIRQ)
783 continue;
784 irq = info->irq;
785 if (info->u.pirq.pirq == pirq)
786 goto out;
787 }
788 irq = -1;
789 out:
790 spin_unlock(&irq_mapping_update_lock);
791
792 return irq;
793 }
794
795
796 int xen_pirq_from_irq(unsigned irq)
797 {
798 return pirq_from_irq(irq);
799 }
800 EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
801 int bind_evtchn_to_irq(unsigned int evtchn)
802 {
803 int irq;
804
805 spin_lock(&irq_mapping_update_lock);
806
807 irq = evtchn_to_irq[evtchn];
808
809 if (irq == -1) {
810 irq = xen_allocate_irq_dynamic();
811 if (irq == -1)
812 goto out;
813
814 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
815 handle_edge_irq, "event");
816
817 xen_irq_info_evtchn_init(irq, evtchn);
818 }
819
820 out:
821 spin_unlock(&irq_mapping_update_lock);
822
823 return irq;
824 }
825 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
826
827 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
828 {
829 struct evtchn_bind_ipi bind_ipi;
830 int evtchn, irq;
831
832 spin_lock(&irq_mapping_update_lock);
833
834 irq = per_cpu(ipi_to_irq, cpu)[ipi];
835
836 if (irq == -1) {
837 irq = xen_allocate_irq_dynamic();
838 if (irq < 0)
839 goto out;
840
841 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
842 handle_percpu_irq, "ipi");
843
844 bind_ipi.vcpu = cpu;
845 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
846 &bind_ipi) != 0)
847 BUG();
848 evtchn = bind_ipi.port;
849
850 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
851
852 bind_evtchn_to_cpu(evtchn, cpu);
853 }
854
855 out:
856 spin_unlock(&irq_mapping_update_lock);
857 return irq;
858 }
859
860 static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
861 unsigned int remote_port)
862 {
863 struct evtchn_bind_interdomain bind_interdomain;
864 int err;
865
866 bind_interdomain.remote_dom = remote_domain;
867 bind_interdomain.remote_port = remote_port;
868
869 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
870 &bind_interdomain);
871
872 return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
873 }
874
875
876 int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
877 {
878 struct evtchn_bind_virq bind_virq;
879 int evtchn, irq;
880
881 spin_lock(&irq_mapping_update_lock);
882
883 irq = per_cpu(virq_to_irq, cpu)[virq];
884
885 if (irq == -1) {
886 irq = xen_allocate_irq_dynamic();
887 if (irq == -1)
888 goto out;
889
890 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
891 handle_percpu_irq, "virq");
892
893 bind_virq.virq = virq;
894 bind_virq.vcpu = cpu;
895 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
896 &bind_virq) != 0)
897 BUG();
898 evtchn = bind_virq.port;
899
900 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
901
902 bind_evtchn_to_cpu(evtchn, cpu);
903 }
904
905 out:
906 spin_unlock(&irq_mapping_update_lock);
907
908 return irq;
909 }
910
911 static void unbind_from_irq(unsigned int irq)
912 {
913 struct evtchn_close close;
914 int evtchn = evtchn_from_irq(irq);
915
916 spin_lock(&irq_mapping_update_lock);
917
918 if (VALID_EVTCHN(evtchn)) {
919 close.port = evtchn;
920 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
921 BUG();
922
923 switch (type_from_irq(irq)) {
924 case IRQT_VIRQ:
925 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
926 [virq_from_irq(irq)] = -1;
927 break;
928 case IRQT_IPI:
929 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
930 [ipi_from_irq(irq)] = -1;
931 break;
932 default:
933 break;
934 }
935
936 /* Closed ports are implicitly re-bound to VCPU0. */
937 bind_evtchn_to_cpu(evtchn, 0);
938
939 evtchn_to_irq[evtchn] = -1;
940 }
941
942 BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
943
944 xen_free_irq(irq);
945
946 spin_unlock(&irq_mapping_update_lock);
947 }
948
949 int bind_evtchn_to_irqhandler(unsigned int evtchn,
950 irq_handler_t handler,
951 unsigned long irqflags,
952 const char *devname, void *dev_id)
953 {
954 int irq, retval;
955
956 irq = bind_evtchn_to_irq(evtchn);
957 if (irq < 0)
958 return irq;
959 retval = request_irq(irq, handler, irqflags, devname, dev_id);
960 if (retval != 0) {
961 unbind_from_irq(irq);
962 return retval;
963 }
964
965 return irq;
966 }
967 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
968
969 int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
970 unsigned int remote_port,
971 irq_handler_t handler,
972 unsigned long irqflags,
973 const char *devname,
974 void *dev_id)
975 {
976 int irq, retval;
977
978 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
979 if (irq < 0)
980 return irq;
981
982 retval = request_irq(irq, handler, irqflags, devname, dev_id);
983 if (retval != 0) {
984 unbind_from_irq(irq);
985 return retval;
986 }
987
988 return irq;
989 }
990 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
991
992 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
993 irq_handler_t handler,
994 unsigned long irqflags, const char *devname, void *dev_id)
995 {
996 int irq, retval;
997
998 irq = bind_virq_to_irq(virq, cpu);
999 if (irq < 0)
1000 return irq;
1001 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1002 if (retval != 0) {
1003 unbind_from_irq(irq);
1004 return retval;
1005 }
1006
1007 return irq;
1008 }
1009 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
1010
1011 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
1012 unsigned int cpu,
1013 irq_handler_t handler,
1014 unsigned long irqflags,
1015 const char *devname,
1016 void *dev_id)
1017 {
1018 int irq, retval;
1019
1020 irq = bind_ipi_to_irq(ipi, cpu);
1021 if (irq < 0)
1022 return irq;
1023
1024 irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME;
1025 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1026 if (retval != 0) {
1027 unbind_from_irq(irq);
1028 return retval;
1029 }
1030
1031 return irq;
1032 }
1033
1034 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1035 {
1036 free_irq(irq, dev_id);
1037 unbind_from_irq(irq);
1038 }
1039 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1040
1041 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1042 {
1043 int irq = per_cpu(ipi_to_irq, cpu)[vector];
1044 BUG_ON(irq < 0);
1045 notify_remote_via_irq(irq);
1046 }
1047
1048 irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
1049 {
1050 struct shared_info *sh = HYPERVISOR_shared_info;
1051 int cpu = smp_processor_id();
1052 unsigned long *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
1053 int i;
1054 unsigned long flags;
1055 static DEFINE_SPINLOCK(debug_lock);
1056 struct vcpu_info *v;
1057
1058 spin_lock_irqsave(&debug_lock, flags);
1059
1060 printk("\nvcpu %d\n ", cpu);
1061
1062 for_each_online_cpu(i) {
1063 int pending;
1064 v = per_cpu(xen_vcpu, i);
1065 pending = (get_irq_regs() && i == cpu)
1066 ? xen_irqs_disabled(get_irq_regs())
1067 : v->evtchn_upcall_mask;
1068 printk("%d: masked=%d pending=%d event_sel %0*lx\n ", i,
1069 pending, v->evtchn_upcall_pending,
1070 (int)(sizeof(v->evtchn_pending_sel)*2),
1071 v->evtchn_pending_sel);
1072 }
1073 v = per_cpu(xen_vcpu, cpu);
1074
1075 printk("\npending:\n ");
1076 for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
1077 printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2,
1078 sh->evtchn_pending[i],
1079 i % 8 == 0 ? "\n " : " ");
1080 printk("\nglobal mask:\n ");
1081 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1082 printk("%0*lx%s",
1083 (int)(sizeof(sh->evtchn_mask[0])*2),
1084 sh->evtchn_mask[i],
1085 i % 8 == 0 ? "\n " : " ");
1086
1087 printk("\nglobally unmasked:\n ");
1088 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1089 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
1090 sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
1091 i % 8 == 0 ? "\n " : " ");
1092
1093 printk("\nlocal cpu%d mask:\n ", cpu);
1094 for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--)
1095 printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2),
1096 cpu_evtchn[i],
1097 i % 8 == 0 ? "\n " : " ");
1098
1099 printk("\nlocally unmasked:\n ");
1100 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
1101 unsigned long pending = sh->evtchn_pending[i]
1102 & ~sh->evtchn_mask[i]
1103 & cpu_evtchn[i];
1104 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
1105 pending, i % 8 == 0 ? "\n " : " ");
1106 }
1107
1108 printk("\npending list:\n");
1109 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1110 if (sync_test_bit(i, sh->evtchn_pending)) {
1111 int word_idx = i / BITS_PER_LONG;
1112 printk(" %d: event %d -> irq %d%s%s%s\n",
1113 cpu_from_evtchn(i), i,
1114 evtchn_to_irq[i],
1115 sync_test_bit(word_idx, &v->evtchn_pending_sel)
1116 ? "" : " l2-clear",
1117 !sync_test_bit(i, sh->evtchn_mask)
1118 ? "" : " globally-masked",
1119 sync_test_bit(i, cpu_evtchn)
1120 ? "" : " locally-masked");
1121 }
1122 }
1123
1124 spin_unlock_irqrestore(&debug_lock, flags);
1125
1126 return IRQ_HANDLED;
1127 }
1128
1129 static DEFINE_PER_CPU(unsigned, xed_nesting_count);
1130 static DEFINE_PER_CPU(unsigned int, current_word_idx);
1131 static DEFINE_PER_CPU(unsigned int, current_bit_idx);
1132
1133 /*
1134 * Mask out the i least significant bits of w
1135 */
1136 #define MASK_LSBS(w, i) (w & ((~0UL) << i))
1137
1138 /*
1139 * Search the CPUs pending events bitmasks. For each one found, map
1140 * the event number to an irq, and feed it into do_IRQ() for
1141 * handling.
1142 *
1143 * Xen uses a two-level bitmap to speed searching. The first level is
1144 * a bitset of words which contain pending event bits. The second
1145 * level is a bitset of pending events themselves.
1146 */
1147 static void __xen_evtchn_do_upcall(void)
1148 {
1149 int start_word_idx, start_bit_idx;
1150 int word_idx, bit_idx;
1151 int i;
1152 int cpu = get_cpu();
1153 struct shared_info *s = HYPERVISOR_shared_info;
1154 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
1155 unsigned count;
1156
1157 do {
1158 unsigned long pending_words;
1159
1160 vcpu_info->evtchn_upcall_pending = 0;
1161
1162 if (__this_cpu_inc_return(xed_nesting_count) - 1)
1163 goto out;
1164
1165 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
1166 /* Clear master flag /before/ clearing selector flag. */
1167 wmb();
1168 #endif
1169 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
1170
1171 start_word_idx = __this_cpu_read(current_word_idx);
1172 start_bit_idx = __this_cpu_read(current_bit_idx);
1173
1174 word_idx = start_word_idx;
1175
1176 for (i = 0; pending_words != 0; i++) {
1177 unsigned long pending_bits;
1178 unsigned long words;
1179
1180 words = MASK_LSBS(pending_words, word_idx);
1181
1182 /*
1183 * If we masked out all events, wrap to beginning.
1184 */
1185 if (words == 0) {
1186 word_idx = 0;
1187 bit_idx = 0;
1188 continue;
1189 }
1190 word_idx = __ffs(words);
1191
1192 pending_bits = active_evtchns(cpu, s, word_idx);
1193 bit_idx = 0; /* usually scan entire word from start */
1194 if (word_idx == start_word_idx) {
1195 /* We scan the starting word in two parts */
1196 if (i == 0)
1197 /* 1st time: start in the middle */
1198 bit_idx = start_bit_idx;
1199 else
1200 /* 2nd time: mask bits done already */
1201 bit_idx &= (1UL << start_bit_idx) - 1;
1202 }
1203
1204 do {
1205 unsigned long bits;
1206 int port, irq;
1207 struct irq_desc *desc;
1208
1209 bits = MASK_LSBS(pending_bits, bit_idx);
1210
1211 /* If we masked out all events, move on. */
1212 if (bits == 0)
1213 break;
1214
1215 bit_idx = __ffs(bits);
1216
1217 /* Process port. */
1218 port = (word_idx * BITS_PER_LONG) + bit_idx;
1219 irq = evtchn_to_irq[port];
1220
1221 if (irq != -1) {
1222 desc = irq_to_desc(irq);
1223 if (desc)
1224 generic_handle_irq_desc(irq, desc);
1225 }
1226
1227 bit_idx = (bit_idx + 1) % BITS_PER_LONG;
1228
1229 /* Next caller starts at last processed + 1 */
1230 __this_cpu_write(current_word_idx,
1231 bit_idx ? word_idx :
1232 (word_idx+1) % BITS_PER_LONG);
1233 __this_cpu_write(current_bit_idx, bit_idx);
1234 } while (bit_idx != 0);
1235
1236 /* Scan start_l1i twice; all others once. */
1237 if ((word_idx != start_word_idx) || (i != 0))
1238 pending_words &= ~(1UL << word_idx);
1239
1240 word_idx = (word_idx + 1) % BITS_PER_LONG;
1241 }
1242
1243 BUG_ON(!irqs_disabled());
1244
1245 count = __this_cpu_read(xed_nesting_count);
1246 __this_cpu_write(xed_nesting_count, 0);
1247 } while (count != 1 || vcpu_info->evtchn_upcall_pending);
1248
1249 out:
1250
1251 put_cpu();
1252 }
1253
1254 void xen_evtchn_do_upcall(struct pt_regs *regs)
1255 {
1256 struct pt_regs *old_regs = set_irq_regs(regs);
1257
1258 exit_idle();
1259 irq_enter();
1260
1261 __xen_evtchn_do_upcall();
1262
1263 irq_exit();
1264 set_irq_regs(old_regs);
1265 }
1266
1267 void xen_hvm_evtchn_do_upcall(void)
1268 {
1269 __xen_evtchn_do_upcall();
1270 }
1271 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
1272
1273 /* Rebind a new event channel to an existing irq. */
1274 void rebind_evtchn_irq(int evtchn, int irq)
1275 {
1276 struct irq_info *info = info_for_irq(irq);
1277
1278 /* Make sure the irq is masked, since the new event channel
1279 will also be masked. */
1280 disable_irq(irq);
1281
1282 spin_lock(&irq_mapping_update_lock);
1283
1284 /* After resume the irq<->evtchn mappings are all cleared out */
1285 BUG_ON(evtchn_to_irq[evtchn] != -1);
1286 /* Expect irq to have been bound before,
1287 so there should be a proper type */
1288 BUG_ON(info->type == IRQT_UNBOUND);
1289
1290 xen_irq_info_evtchn_init(irq, evtchn);
1291
1292 spin_unlock(&irq_mapping_update_lock);
1293
1294 /* new event channels are always bound to cpu 0 */
1295 irq_set_affinity(irq, cpumask_of(0));
1296
1297 /* Unmask the event channel. */
1298 enable_irq(irq);
1299 }
1300
1301 /* Rebind an evtchn so that it gets delivered to a specific cpu */
1302 static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1303 {
1304 struct evtchn_bind_vcpu bind_vcpu;
1305 int evtchn = evtchn_from_irq(irq);
1306
1307 if (!VALID_EVTCHN(evtchn))
1308 return -1;
1309
1310 /*
1311 * Events delivered via platform PCI interrupts are always
1312 * routed to vcpu 0 and hence cannot be rebound.
1313 */
1314 if (xen_hvm_domain() && !xen_have_vector_callback)
1315 return -1;
1316
1317 /* Send future instances of this interrupt to other vcpu. */
1318 bind_vcpu.port = evtchn;
1319 bind_vcpu.vcpu = tcpu;
1320
1321 /*
1322 * If this fails, it usually just indicates that we're dealing with a
1323 * virq or IPI channel, which don't actually need to be rebound. Ignore
1324 * it, but don't do the xenlinux-level rebind in that case.
1325 */
1326 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1327 bind_evtchn_to_cpu(evtchn, tcpu);
1328
1329 return 0;
1330 }
1331
1332 static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1333 bool force)
1334 {
1335 unsigned tcpu = cpumask_first(dest);
1336
1337 return rebind_irq_to_cpu(data->irq, tcpu);
1338 }
1339
1340 int resend_irq_on_evtchn(unsigned int irq)
1341 {
1342 int masked, evtchn = evtchn_from_irq(irq);
1343 struct shared_info *s = HYPERVISOR_shared_info;
1344
1345 if (!VALID_EVTCHN(evtchn))
1346 return 1;
1347
1348 masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
1349 sync_set_bit(evtchn, s->evtchn_pending);
1350 if (!masked)
1351 unmask_evtchn(evtchn);
1352
1353 return 1;
1354 }
1355
1356 static void enable_dynirq(struct irq_data *data)
1357 {
1358 int evtchn = evtchn_from_irq(data->irq);
1359
1360 if (VALID_EVTCHN(evtchn))
1361 unmask_evtchn(evtchn);
1362 }
1363
1364 static void disable_dynirq(struct irq_data *data)
1365 {
1366 int evtchn = evtchn_from_irq(data->irq);
1367
1368 if (VALID_EVTCHN(evtchn))
1369 mask_evtchn(evtchn);
1370 }
1371
1372 static void ack_dynirq(struct irq_data *data)
1373 {
1374 int evtchn = evtchn_from_irq(data->irq);
1375
1376 irq_move_irq(data);
1377
1378 if (VALID_EVTCHN(evtchn))
1379 clear_evtchn(evtchn);
1380 }
1381
1382 static void mask_ack_dynirq(struct irq_data *data)
1383 {
1384 disable_dynirq(data);
1385 ack_dynirq(data);
1386 }
1387
1388 static int retrigger_dynirq(struct irq_data *data)
1389 {
1390 int evtchn = evtchn_from_irq(data->irq);
1391 struct shared_info *sh = HYPERVISOR_shared_info;
1392 int ret = 0;
1393
1394 if (VALID_EVTCHN(evtchn)) {
1395 int masked;
1396
1397 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
1398 sync_set_bit(evtchn, sh->evtchn_pending);
1399 if (!masked)
1400 unmask_evtchn(evtchn);
1401 ret = 1;
1402 }
1403
1404 return ret;
1405 }
1406
1407 static void restore_pirqs(void)
1408 {
1409 int pirq, rc, irq, gsi;
1410 struct physdev_map_pirq map_irq;
1411 struct irq_info *info;
1412
1413 list_for_each_entry(info, &xen_irq_list_head, list) {
1414 if (info->type != IRQT_PIRQ)
1415 continue;
1416
1417 pirq = info->u.pirq.pirq;
1418 gsi = info->u.pirq.gsi;
1419 irq = info->irq;
1420
1421 /* save/restore of PT devices doesn't work, so at this point the
1422 * only devices present are GSI based emulated devices */
1423 if (!gsi)
1424 continue;
1425
1426 map_irq.domid = DOMID_SELF;
1427 map_irq.type = MAP_PIRQ_TYPE_GSI;
1428 map_irq.index = gsi;
1429 map_irq.pirq = pirq;
1430
1431 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
1432 if (rc) {
1433 printk(KERN_WARNING "xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1434 gsi, irq, pirq, rc);
1435 xen_free_irq(irq);
1436 continue;
1437 }
1438
1439 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1440
1441 __startup_pirq(irq);
1442 }
1443 }
1444
1445 static void restore_cpu_virqs(unsigned int cpu)
1446 {
1447 struct evtchn_bind_virq bind_virq;
1448 int virq, irq, evtchn;
1449
1450 for (virq = 0; virq < NR_VIRQS; virq++) {
1451 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1452 continue;
1453
1454 BUG_ON(virq_from_irq(irq) != virq);
1455
1456 /* Get a new binding from Xen. */
1457 bind_virq.virq = virq;
1458 bind_virq.vcpu = cpu;
1459 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1460 &bind_virq) != 0)
1461 BUG();
1462 evtchn = bind_virq.port;
1463
1464 /* Record the new mapping. */
1465 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
1466 bind_evtchn_to_cpu(evtchn, cpu);
1467 }
1468 }
1469
1470 static void restore_cpu_ipis(unsigned int cpu)
1471 {
1472 struct evtchn_bind_ipi bind_ipi;
1473 int ipi, irq, evtchn;
1474
1475 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1476 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1477 continue;
1478
1479 BUG_ON(ipi_from_irq(irq) != ipi);
1480
1481 /* Get a new binding from Xen. */
1482 bind_ipi.vcpu = cpu;
1483 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1484 &bind_ipi) != 0)
1485 BUG();
1486 evtchn = bind_ipi.port;
1487
1488 /* Record the new mapping. */
1489 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
1490 bind_evtchn_to_cpu(evtchn, cpu);
1491 }
1492 }
1493
1494 /* Clear an irq's pending state, in preparation for polling on it */
1495 void xen_clear_irq_pending(int irq)
1496 {
1497 int evtchn = evtchn_from_irq(irq);
1498
1499 if (VALID_EVTCHN(evtchn))
1500 clear_evtchn(evtchn);
1501 }
1502 EXPORT_SYMBOL(xen_clear_irq_pending);
1503 void xen_set_irq_pending(int irq)
1504 {
1505 int evtchn = evtchn_from_irq(irq);
1506
1507 if (VALID_EVTCHN(evtchn))
1508 set_evtchn(evtchn);
1509 }
1510
1511 bool xen_test_irq_pending(int irq)
1512 {
1513 int evtchn = evtchn_from_irq(irq);
1514 bool ret = false;
1515
1516 if (VALID_EVTCHN(evtchn))
1517 ret = test_evtchn(evtchn);
1518
1519 return ret;
1520 }
1521
1522 /* Poll waiting for an irq to become pending with timeout. In the usual case,
1523 * the irq will be disabled so it won't deliver an interrupt. */
1524 void xen_poll_irq_timeout(int irq, u64 timeout)
1525 {
1526 evtchn_port_t evtchn = evtchn_from_irq(irq);
1527
1528 if (VALID_EVTCHN(evtchn)) {
1529 struct sched_poll poll;
1530
1531 poll.nr_ports = 1;
1532 poll.timeout = timeout;
1533 set_xen_guest_handle(poll.ports, &evtchn);
1534
1535 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
1536 BUG();
1537 }
1538 }
1539 EXPORT_SYMBOL(xen_poll_irq_timeout);
1540 /* Poll waiting for an irq to become pending. In the usual case, the
1541 * irq will be disabled so it won't deliver an interrupt. */
1542 void xen_poll_irq(int irq)
1543 {
1544 xen_poll_irq_timeout(irq, 0 /* no timeout */);
1545 }
1546
1547 /* Check whether the IRQ line is shared with other guests. */
1548 int xen_test_irq_shared(int irq)
1549 {
1550 struct irq_info *info = info_for_irq(irq);
1551 struct physdev_irq_status_query irq_status = { .irq = info->u.pirq.pirq };
1552
1553 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1554 return 0;
1555 return !(irq_status.flags & XENIRQSTAT_shared);
1556 }
1557 EXPORT_SYMBOL_GPL(xen_test_irq_shared);
1558
1559 void xen_irq_resume(void)
1560 {
1561 unsigned int cpu, evtchn;
1562 struct irq_info *info;
1563
1564 init_evtchn_cpu_bindings();
1565
1566 /* New event-channel space is not 'live' yet. */
1567 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1568 mask_evtchn(evtchn);
1569
1570 /* No IRQ <-> event-channel mappings. */
1571 list_for_each_entry(info, &xen_irq_list_head, list)
1572 info->evtchn = 0; /* zap event-channel binding */
1573
1574 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1575 evtchn_to_irq[evtchn] = -1;
1576
1577 for_each_possible_cpu(cpu) {
1578 restore_cpu_virqs(cpu);
1579 restore_cpu_ipis(cpu);
1580 }
1581
1582 restore_pirqs();
1583 }
1584
1585 static struct irq_chip xen_dynamic_chip __read_mostly = {
1586 .name = "xen-dyn",
1587
1588 .irq_disable = disable_dynirq,
1589 .irq_mask = disable_dynirq,
1590 .irq_unmask = enable_dynirq,
1591
1592 .irq_ack = ack_dynirq,
1593 .irq_mask_ack = mask_ack_dynirq,
1594
1595 .irq_set_affinity = set_affinity_irq,
1596 .irq_retrigger = retrigger_dynirq,
1597 };
1598
1599 static struct irq_chip xen_pirq_chip __read_mostly = {
1600 .name = "xen-pirq",
1601
1602 .irq_startup = startup_pirq,
1603 .irq_shutdown = shutdown_pirq,
1604 .irq_enable = enable_pirq,
1605 .irq_disable = disable_pirq,
1606
1607 .irq_mask = disable_dynirq,
1608 .irq_unmask = enable_dynirq,
1609
1610 .irq_ack = eoi_pirq,
1611 .irq_eoi = eoi_pirq,
1612 .irq_mask_ack = mask_ack_pirq,
1613
1614 .irq_set_affinity = set_affinity_irq,
1615
1616 .irq_retrigger = retrigger_dynirq,
1617 };
1618
1619 static struct irq_chip xen_percpu_chip __read_mostly = {
1620 .name = "xen-percpu",
1621
1622 .irq_disable = disable_dynirq,
1623 .irq_mask = disable_dynirq,
1624 .irq_unmask = enable_dynirq,
1625
1626 .irq_ack = ack_dynirq,
1627 };
1628
1629 int xen_set_callback_via(uint64_t via)
1630 {
1631 struct xen_hvm_param a;
1632 a.domid = DOMID_SELF;
1633 a.index = HVM_PARAM_CALLBACK_IRQ;
1634 a.value = via;
1635 return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
1636 }
1637 EXPORT_SYMBOL_GPL(xen_set_callback_via);
1638
1639 #ifdef CONFIG_XEN_PVHVM
1640 /* Vector callbacks are better than PCI interrupts to receive event
1641 * channel notifications because we can receive vector callbacks on any
1642 * vcpu and we don't need PCI support or APIC interactions. */
1643 void xen_callback_vector(void)
1644 {
1645 int rc;
1646 uint64_t callback_via;
1647 if (xen_have_vector_callback) {
1648 callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK);
1649 rc = xen_set_callback_via(callback_via);
1650 if (rc) {
1651 printk(KERN_ERR "Request for Xen HVM callback vector"
1652 " failed.\n");
1653 xen_have_vector_callback = 0;
1654 return;
1655 }
1656 printk(KERN_INFO "Xen HVM callback vector for event delivery is "
1657 "enabled\n");
1658 /* in the restore case the vector has already been allocated */
1659 if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors))
1660 alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector);
1661 }
1662 }
1663 #else
1664 void xen_callback_vector(void) {}
1665 #endif
1666
1667 void __init xen_init_IRQ(void)
1668 {
1669 int i;
1670
1671 evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
1672 GFP_KERNEL);
1673 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1674 evtchn_to_irq[i] = -1;
1675
1676 init_evtchn_cpu_bindings();
1677
1678 /* No event channels are 'live' right now. */
1679 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1680 mask_evtchn(i);
1681
1682 if (xen_hvm_domain()) {
1683 xen_callback_vector();
1684 native_init_IRQ();
1685 /* pci_xen_hvm_init must be called after native_init_IRQ so that
1686 * __acpi_register_gsi can point at the right function */
1687 pci_xen_hvm_init();
1688 } else {
1689 irq_ctx_init(smp_processor_id());
1690 if (xen_initial_domain())
1691 pci_xen_initial_domain();
1692 }
1693 }
This page took 0.065444 seconds and 6 git commands to generate.