Commit | Line | Data |
---|---|---|
e46cdb66 JF |
1 | /* |
2 | * Xen event channels | |
3 | * | |
4 | * Xen models interrupts with abstract event channels. Because each | |
5 | * domain gets 1024 event channels, but NR_IRQ is not that large, we | |
6 | * must dynamically map irqs<->event channels. The event channels | |
7 | * interface with the rest of the kernel by defining a xen interrupt | |
8 | * chip. When an event is recieved, it is mapped to an irq and sent | |
9 | * through the normal interrupt processing path. | |
10 | * | |
11 | * There are four kinds of events which can be mapped to an event | |
12 | * channel: | |
13 | * | |
14 | * 1. Inter-domain notifications. This includes all the virtual | |
15 | * device events, since they're driven by front-ends in another domain | |
16 | * (typically dom0). | |
17 | * 2. VIRQs, typically used for timers. These are per-cpu events. | |
18 | * 3. IPIs. | |
19 | * 4. Hardware interrupts. Not supported at present. | |
20 | * | |
21 | * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 | |
22 | */ | |
23 | ||
24 | #include <linux/linkage.h> | |
25 | #include <linux/interrupt.h> | |
26 | #include <linux/irq.h> | |
27 | #include <linux/module.h> | |
28 | #include <linux/string.h> | |
28e08861 | 29 | #include <linux/bootmem.h> |
e46cdb66 JF |
30 | |
31 | #include <asm/ptrace.h> | |
32 | #include <asm/irq.h> | |
792dc4f6 | 33 | #include <asm/idle.h> |
e46cdb66 JF |
34 | #include <asm/sync_bitops.h> |
35 | #include <asm/xen/hypercall.h> | |
8d1b8753 | 36 | #include <asm/xen/hypervisor.h> |
e46cdb66 | 37 | |
e04d0d07 | 38 | #include <xen/xen-ops.h> |
e46cdb66 JF |
39 | #include <xen/events.h> |
40 | #include <xen/interface/xen.h> | |
41 | #include <xen/interface/event_channel.h> | |
42 | ||
e46cdb66 JF |
43 | /* |
44 | * This lock protects updates to the following mapping and reference-count | |
45 | * arrays. The lock does not need to be acquired to read the mapping tables. | |
46 | */ | |
47 | static DEFINE_SPINLOCK(irq_mapping_update_lock); | |
48 | ||
49 | /* IRQ <-> VIRQ mapping. */ | |
50 | static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1}; | |
51 | ||
f87e4cac JF |
52 | /* IRQ <-> IPI mapping */ |
53 | static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1}; | |
54 | ||
ced40d0f JF |
55 | /* Interrupt types. */ |
56 | enum xen_irq_type { | |
d77bbd4d | 57 | IRQT_UNBOUND = 0, |
f87e4cac JF |
58 | IRQT_PIRQ, |
59 | IRQT_VIRQ, | |
60 | IRQT_IPI, | |
61 | IRQT_EVTCHN | |
62 | }; | |
e46cdb66 | 63 | |
ced40d0f JF |
64 | /* |
65 | * Packed IRQ information: | |
66 | * type - enum xen_irq_type | |
67 | * event channel - irq->event channel mapping | |
68 | * cpu - cpu this event channel is bound to | |
69 | * index - type-specific information: | |
70 | * PIRQ - vector, with MSB being "needs EIO" | |
71 | * VIRQ - virq number | |
72 | * IPI - IPI vector | |
73 | * EVTCHN - | |
74 | */ | |
75 | struct irq_info | |
76 | { | |
77 | enum xen_irq_type type; /* type */ | |
78 | unsigned short evtchn; /* event channel */ | |
79 | unsigned short cpu; /* cpu bound */ | |
80 | ||
81 | union { | |
82 | unsigned short virq; | |
83 | enum ipi_vector ipi; | |
84 | struct { | |
85 | unsigned short gsi; | |
86 | unsigned short vector; | |
87 | } pirq; | |
88 | } u; | |
89 | }; | |
90 | ||
91 | static struct irq_info irq_info[NR_IRQS]; | |
e46cdb66 JF |
92 | |
93 | static int evtchn_to_irq[NR_EVENT_CHANNELS] = { | |
94 | [0 ... NR_EVENT_CHANNELS-1] = -1 | |
95 | }; | |
c7a3589e MT |
96 | struct cpu_evtchn_s { |
97 | unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG]; | |
98 | }; | |
99 | static struct cpu_evtchn_s *cpu_evtchn_mask_p; | |
100 | static inline unsigned long *cpu_evtchn_mask(int cpu) | |
101 | { | |
102 | return cpu_evtchn_mask_p[cpu].bits; | |
103 | } | |
e46cdb66 | 104 | |
e46cdb66 JF |
105 | /* Xen will never allocate port zero for any purpose. */ |
106 | #define VALID_EVTCHN(chn) ((chn) != 0) | |
107 | ||
e46cdb66 JF |
108 | static struct irq_chip xen_dynamic_chip; |
109 | ||
110 | /* Constructor for packed IRQ information. */ | |
ced40d0f JF |
111 | static struct irq_info mk_unbound_info(void) |
112 | { | |
113 | return (struct irq_info) { .type = IRQT_UNBOUND }; | |
114 | } | |
115 | ||
116 | static struct irq_info mk_evtchn_info(unsigned short evtchn) | |
117 | { | |
90af9514 IC |
118 | return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn, |
119 | .cpu = 0 }; | |
ced40d0f JF |
120 | } |
121 | ||
122 | static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi) | |
e46cdb66 | 123 | { |
ced40d0f | 124 | return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn, |
90af9514 | 125 | .cpu = 0, .u.ipi = ipi }; |
ced40d0f JF |
126 | } |
127 | ||
128 | static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq) | |
129 | { | |
130 | return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn, | |
90af9514 | 131 | .cpu = 0, .u.virq = virq }; |
ced40d0f JF |
132 | } |
133 | ||
134 | static struct irq_info mk_pirq_info(unsigned short evtchn, | |
135 | unsigned short gsi, unsigned short vector) | |
136 | { | |
137 | return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn, | |
90af9514 | 138 | .cpu = 0, .u.pirq = { .gsi = gsi, .vector = vector } }; |
e46cdb66 JF |
139 | } |
140 | ||
141 | /* | |
142 | * Accessors for packed IRQ information. | |
143 | */ | |
ced40d0f | 144 | static struct irq_info *info_for_irq(unsigned irq) |
e46cdb66 | 145 | { |
ced40d0f | 146 | return &irq_info[irq]; |
e46cdb66 JF |
147 | } |
148 | ||
ced40d0f | 149 | static unsigned int evtchn_from_irq(unsigned irq) |
e46cdb66 | 150 | { |
ced40d0f | 151 | return info_for_irq(irq)->evtchn; |
e46cdb66 JF |
152 | } |
153 | ||
ced40d0f | 154 | static enum ipi_vector ipi_from_irq(unsigned irq) |
e46cdb66 | 155 | { |
ced40d0f JF |
156 | struct irq_info *info = info_for_irq(irq); |
157 | ||
158 | BUG_ON(info == NULL); | |
159 | BUG_ON(info->type != IRQT_IPI); | |
160 | ||
161 | return info->u.ipi; | |
162 | } | |
163 | ||
164 | static unsigned virq_from_irq(unsigned irq) | |
165 | { | |
166 | struct irq_info *info = info_for_irq(irq); | |
167 | ||
168 | BUG_ON(info == NULL); | |
169 | BUG_ON(info->type != IRQT_VIRQ); | |
170 | ||
171 | return info->u.virq; | |
172 | } | |
173 | ||
174 | static unsigned gsi_from_irq(unsigned irq) | |
175 | { | |
176 | struct irq_info *info = info_for_irq(irq); | |
177 | ||
178 | BUG_ON(info == NULL); | |
179 | BUG_ON(info->type != IRQT_PIRQ); | |
180 | ||
181 | return info->u.pirq.gsi; | |
182 | } | |
183 | ||
184 | static unsigned vector_from_irq(unsigned irq) | |
185 | { | |
186 | struct irq_info *info = info_for_irq(irq); | |
187 | ||
188 | BUG_ON(info == NULL); | |
189 | BUG_ON(info->type != IRQT_PIRQ); | |
190 | ||
191 | return info->u.pirq.vector; | |
192 | } | |
193 | ||
194 | static enum xen_irq_type type_from_irq(unsigned irq) | |
195 | { | |
196 | return info_for_irq(irq)->type; | |
197 | } | |
198 | ||
199 | static unsigned cpu_from_irq(unsigned irq) | |
200 | { | |
201 | return info_for_irq(irq)->cpu; | |
202 | } | |
203 | ||
204 | static unsigned int cpu_from_evtchn(unsigned int evtchn) | |
205 | { | |
206 | int irq = evtchn_to_irq[evtchn]; | |
207 | unsigned ret = 0; | |
208 | ||
209 | if (irq != -1) | |
210 | ret = cpu_from_irq(irq); | |
211 | ||
212 | return ret; | |
e46cdb66 JF |
213 | } |
214 | ||
215 | static inline unsigned long active_evtchns(unsigned int cpu, | |
216 | struct shared_info *sh, | |
217 | unsigned int idx) | |
218 | { | |
219 | return (sh->evtchn_pending[idx] & | |
c7a3589e | 220 | cpu_evtchn_mask(cpu)[idx] & |
e46cdb66 JF |
221 | ~sh->evtchn_mask[idx]); |
222 | } | |
223 | ||
224 | static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) | |
225 | { | |
226 | int irq = evtchn_to_irq[chn]; | |
227 | ||
228 | BUG_ON(irq == -1); | |
229 | #ifdef CONFIG_SMP | |
7f7ace0c | 230 | cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); |
e46cdb66 JF |
231 | #endif |
232 | ||
ced40d0f | 233 | __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); |
c7a3589e | 234 | __set_bit(chn, cpu_evtchn_mask(cpu)); |
e46cdb66 | 235 | |
ced40d0f | 236 | irq_info[irq].cpu = cpu; |
e46cdb66 JF |
237 | } |
238 | ||
239 | static void init_evtchn_cpu_bindings(void) | |
240 | { | |
241 | #ifdef CONFIG_SMP | |
10e58084 | 242 | struct irq_desc *desc; |
e46cdb66 | 243 | int i; |
10e58084 | 244 | |
e46cdb66 | 245 | /* By default all event channels notify CPU#0. */ |
0b8f1efa | 246 | for_each_irq_desc(i, desc) { |
7f7ace0c | 247 | cpumask_copy(desc->affinity, cpumask_of(0)); |
0b8f1efa | 248 | } |
e46cdb66 JF |
249 | #endif |
250 | ||
c7a3589e | 251 | memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0))); |
e46cdb66 JF |
252 | } |
253 | ||
e46cdb66 JF |
254 | static inline void clear_evtchn(int port) |
255 | { | |
256 | struct shared_info *s = HYPERVISOR_shared_info; | |
257 | sync_clear_bit(port, &s->evtchn_pending[0]); | |
258 | } | |
259 | ||
260 | static inline void set_evtchn(int port) | |
261 | { | |
262 | struct shared_info *s = HYPERVISOR_shared_info; | |
263 | sync_set_bit(port, &s->evtchn_pending[0]); | |
264 | } | |
265 | ||
168d2f46 JF |
266 | static inline int test_evtchn(int port) |
267 | { | |
268 | struct shared_info *s = HYPERVISOR_shared_info; | |
269 | return sync_test_bit(port, &s->evtchn_pending[0]); | |
270 | } | |
271 | ||
e46cdb66 JF |
272 | |
273 | /** | |
274 | * notify_remote_via_irq - send event to remote end of event channel via irq | |
275 | * @irq: irq of event channel to send event to | |
276 | * | |
277 | * Unlike notify_remote_via_evtchn(), this is safe to use across | |
278 | * save/restore. Notifications on a broken connection are silently | |
279 | * dropped. | |
280 | */ | |
281 | void notify_remote_via_irq(int irq) | |
282 | { | |
283 | int evtchn = evtchn_from_irq(irq); | |
284 | ||
285 | if (VALID_EVTCHN(evtchn)) | |
286 | notify_remote_via_evtchn(evtchn); | |
287 | } | |
288 | EXPORT_SYMBOL_GPL(notify_remote_via_irq); | |
289 | ||
290 | static void mask_evtchn(int port) | |
291 | { | |
292 | struct shared_info *s = HYPERVISOR_shared_info; | |
293 | sync_set_bit(port, &s->evtchn_mask[0]); | |
294 | } | |
295 | ||
296 | static void unmask_evtchn(int port) | |
297 | { | |
298 | struct shared_info *s = HYPERVISOR_shared_info; | |
299 | unsigned int cpu = get_cpu(); | |
300 | ||
301 | BUG_ON(!irqs_disabled()); | |
302 | ||
303 | /* Slow path (hypercall) if this is a non-local port. */ | |
304 | if (unlikely(cpu != cpu_from_evtchn(port))) { | |
305 | struct evtchn_unmask unmask = { .port = port }; | |
306 | (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); | |
307 | } else { | |
308 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); | |
309 | ||
310 | sync_clear_bit(port, &s->evtchn_mask[0]); | |
311 | ||
312 | /* | |
313 | * The following is basically the equivalent of | |
314 | * 'hw_resend_irq'. Just like a real IO-APIC we 'lose | |
315 | * the interrupt edge' if the channel is masked. | |
316 | */ | |
317 | if (sync_test_bit(port, &s->evtchn_pending[0]) && | |
318 | !sync_test_and_set_bit(port / BITS_PER_LONG, | |
319 | &vcpu_info->evtchn_pending_sel)) | |
320 | vcpu_info->evtchn_upcall_pending = 1; | |
321 | } | |
322 | ||
323 | put_cpu(); | |
324 | } | |
325 | ||
326 | static int find_unbound_irq(void) | |
327 | { | |
328 | int irq; | |
6f8a0ed4 | 329 | struct irq_desc *desc; |
e46cdb66 | 330 | |
0b8f1efa | 331 | for (irq = 0; irq < nr_irqs; irq++) |
d77bbd4d | 332 | if (irq_info[irq].type == IRQT_UNBOUND) |
e46cdb66 JF |
333 | break; |
334 | ||
5a15d7e8 YL |
335 | if (irq == nr_irqs) |
336 | panic("No available IRQ to bind to: increase nr_irqs!\n"); | |
e46cdb66 | 337 | |
85ac16d0 | 338 | desc = irq_to_desc_alloc_node(irq, 0); |
6f8a0ed4 JF |
339 | if (WARN_ON(desc == NULL)) |
340 | return -1; | |
341 | ||
ced40d0f JF |
342 | dynamic_irq_init(irq); |
343 | ||
e46cdb66 JF |
344 | return irq; |
345 | } | |
346 | ||
b536b4b9 | 347 | int bind_evtchn_to_irq(unsigned int evtchn) |
e46cdb66 JF |
348 | { |
349 | int irq; | |
350 | ||
351 | spin_lock(&irq_mapping_update_lock); | |
352 | ||
353 | irq = evtchn_to_irq[evtchn]; | |
354 | ||
355 | if (irq == -1) { | |
356 | irq = find_unbound_irq(); | |
357 | ||
e46cdb66 JF |
358 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
359 | handle_level_irq, "event"); | |
360 | ||
361 | evtchn_to_irq[evtchn] = irq; | |
ced40d0f | 362 | irq_info[irq] = mk_evtchn_info(evtchn); |
e46cdb66 JF |
363 | } |
364 | ||
e46cdb66 JF |
365 | spin_unlock(&irq_mapping_update_lock); |
366 | ||
367 | return irq; | |
368 | } | |
b536b4b9 | 369 | EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); |
e46cdb66 | 370 | |
f87e4cac JF |
371 | static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) |
372 | { | |
373 | struct evtchn_bind_ipi bind_ipi; | |
374 | int evtchn, irq; | |
375 | ||
376 | spin_lock(&irq_mapping_update_lock); | |
377 | ||
378 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; | |
90af9514 | 379 | |
f87e4cac JF |
380 | if (irq == -1) { |
381 | irq = find_unbound_irq(); | |
382 | if (irq < 0) | |
383 | goto out; | |
384 | ||
f87e4cac JF |
385 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
386 | handle_level_irq, "ipi"); | |
387 | ||
388 | bind_ipi.vcpu = cpu; | |
389 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, | |
390 | &bind_ipi) != 0) | |
391 | BUG(); | |
392 | evtchn = bind_ipi.port; | |
393 | ||
394 | evtchn_to_irq[evtchn] = irq; | |
ced40d0f | 395 | irq_info[irq] = mk_ipi_info(evtchn, ipi); |
f87e4cac JF |
396 | per_cpu(ipi_to_irq, cpu)[ipi] = irq; |
397 | ||
398 | bind_evtchn_to_cpu(evtchn, cpu); | |
399 | } | |
400 | ||
f87e4cac JF |
401 | out: |
402 | spin_unlock(&irq_mapping_update_lock); | |
403 | return irq; | |
404 | } | |
405 | ||
406 | ||
e46cdb66 JF |
407 | static int bind_virq_to_irq(unsigned int virq, unsigned int cpu) |
408 | { | |
409 | struct evtchn_bind_virq bind_virq; | |
410 | int evtchn, irq; | |
411 | ||
412 | spin_lock(&irq_mapping_update_lock); | |
413 | ||
414 | irq = per_cpu(virq_to_irq, cpu)[virq]; | |
415 | ||
416 | if (irq == -1) { | |
417 | bind_virq.virq = virq; | |
418 | bind_virq.vcpu = cpu; | |
419 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, | |
420 | &bind_virq) != 0) | |
421 | BUG(); | |
422 | evtchn = bind_virq.port; | |
423 | ||
424 | irq = find_unbound_irq(); | |
425 | ||
e46cdb66 JF |
426 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
427 | handle_level_irq, "virq"); | |
428 | ||
429 | evtchn_to_irq[evtchn] = irq; | |
ced40d0f | 430 | irq_info[irq] = mk_virq_info(evtchn, virq); |
e46cdb66 JF |
431 | |
432 | per_cpu(virq_to_irq, cpu)[virq] = irq; | |
433 | ||
434 | bind_evtchn_to_cpu(evtchn, cpu); | |
435 | } | |
436 | ||
e46cdb66 JF |
437 | spin_unlock(&irq_mapping_update_lock); |
438 | ||
439 | return irq; | |
440 | } | |
441 | ||
442 | static void unbind_from_irq(unsigned int irq) | |
443 | { | |
444 | struct evtchn_close close; | |
445 | int evtchn = evtchn_from_irq(irq); | |
446 | ||
447 | spin_lock(&irq_mapping_update_lock); | |
448 | ||
d77bbd4d | 449 | if (VALID_EVTCHN(evtchn)) { |
e46cdb66 JF |
450 | close.port = evtchn; |
451 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) | |
452 | BUG(); | |
453 | ||
454 | switch (type_from_irq(irq)) { | |
455 | case IRQT_VIRQ: | |
456 | per_cpu(virq_to_irq, cpu_from_evtchn(evtchn)) | |
ced40d0f | 457 | [virq_from_irq(irq)] = -1; |
e46cdb66 | 458 | break; |
d68d82af AN |
459 | case IRQT_IPI: |
460 | per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn)) | |
ced40d0f | 461 | [ipi_from_irq(irq)] = -1; |
d68d82af | 462 | break; |
e46cdb66 JF |
463 | default: |
464 | break; | |
465 | } | |
466 | ||
467 | /* Closed ports are implicitly re-bound to VCPU0. */ | |
468 | bind_evtchn_to_cpu(evtchn, 0); | |
469 | ||
470 | evtchn_to_irq[evtchn] = -1; | |
ced40d0f | 471 | irq_info[irq] = mk_unbound_info(); |
e46cdb66 | 472 | |
0f2287ad | 473 | dynamic_irq_cleanup(irq); |
e46cdb66 JF |
474 | } |
475 | ||
476 | spin_unlock(&irq_mapping_update_lock); | |
477 | } | |
478 | ||
479 | int bind_evtchn_to_irqhandler(unsigned int evtchn, | |
7c239975 | 480 | irq_handler_t handler, |
e46cdb66 JF |
481 | unsigned long irqflags, |
482 | const char *devname, void *dev_id) | |
483 | { | |
484 | unsigned int irq; | |
485 | int retval; | |
486 | ||
487 | irq = bind_evtchn_to_irq(evtchn); | |
488 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | |
489 | if (retval != 0) { | |
490 | unbind_from_irq(irq); | |
491 | return retval; | |
492 | } | |
493 | ||
494 | return irq; | |
495 | } | |
496 | EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); | |
497 | ||
498 | int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, | |
7c239975 | 499 | irq_handler_t handler, |
e46cdb66 JF |
500 | unsigned long irqflags, const char *devname, void *dev_id) |
501 | { | |
502 | unsigned int irq; | |
503 | int retval; | |
504 | ||
505 | irq = bind_virq_to_irq(virq, cpu); | |
506 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | |
507 | if (retval != 0) { | |
508 | unbind_from_irq(irq); | |
509 | return retval; | |
510 | } | |
511 | ||
512 | return irq; | |
513 | } | |
514 | EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); | |
515 | ||
f87e4cac JF |
516 | int bind_ipi_to_irqhandler(enum ipi_vector ipi, |
517 | unsigned int cpu, | |
518 | irq_handler_t handler, | |
519 | unsigned long irqflags, | |
520 | const char *devname, | |
521 | void *dev_id) | |
522 | { | |
523 | int irq, retval; | |
524 | ||
525 | irq = bind_ipi_to_irq(ipi, cpu); | |
526 | if (irq < 0) | |
527 | return irq; | |
528 | ||
529 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | |
530 | if (retval != 0) { | |
531 | unbind_from_irq(irq); | |
532 | return retval; | |
533 | } | |
534 | ||
535 | return irq; | |
536 | } | |
537 | ||
e46cdb66 JF |
538 | void unbind_from_irqhandler(unsigned int irq, void *dev_id) |
539 | { | |
540 | free_irq(irq, dev_id); | |
541 | unbind_from_irq(irq); | |
542 | } | |
543 | EXPORT_SYMBOL_GPL(unbind_from_irqhandler); | |
544 | ||
f87e4cac JF |
545 | void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) |
546 | { | |
547 | int irq = per_cpu(ipi_to_irq, cpu)[vector]; | |
548 | BUG_ON(irq < 0); | |
549 | notify_remote_via_irq(irq); | |
550 | } | |
551 | ||
ee523ca1 JF |
552 | irqreturn_t xen_debug_interrupt(int irq, void *dev_id) |
553 | { | |
554 | struct shared_info *sh = HYPERVISOR_shared_info; | |
555 | int cpu = smp_processor_id(); | |
556 | int i; | |
557 | unsigned long flags; | |
558 | static DEFINE_SPINLOCK(debug_lock); | |
559 | ||
560 | spin_lock_irqsave(&debug_lock, flags); | |
561 | ||
562 | printk("vcpu %d\n ", cpu); | |
563 | ||
564 | for_each_online_cpu(i) { | |
565 | struct vcpu_info *v = per_cpu(xen_vcpu, i); | |
566 | printk("%d: masked=%d pending=%d event_sel %08lx\n ", i, | |
e849c3e9 | 567 | (get_irq_regs() && i == cpu) ? xen_irqs_disabled(get_irq_regs()) : v->evtchn_upcall_mask, |
ee523ca1 JF |
568 | v->evtchn_upcall_pending, |
569 | v->evtchn_pending_sel); | |
570 | } | |
571 | printk("pending:\n "); | |
572 | for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--) | |
573 | printk("%08lx%s", sh->evtchn_pending[i], | |
574 | i % 8 == 0 ? "\n " : " "); | |
575 | printk("\nmasks:\n "); | |
576 | for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) | |
577 | printk("%08lx%s", sh->evtchn_mask[i], | |
578 | i % 8 == 0 ? "\n " : " "); | |
579 | ||
580 | printk("\nunmasked:\n "); | |
581 | for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) | |
582 | printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i], | |
583 | i % 8 == 0 ? "\n " : " "); | |
584 | ||
585 | printk("\npending list:\n"); | |
586 | for(i = 0; i < NR_EVENT_CHANNELS; i++) { | |
587 | if (sync_test_bit(i, sh->evtchn_pending)) { | |
588 | printk(" %d: event %d -> irq %d\n", | |
ced40d0f JF |
589 | cpu_from_evtchn(i), i, |
590 | evtchn_to_irq[i]); | |
ee523ca1 JF |
591 | } |
592 | } | |
593 | ||
594 | spin_unlock_irqrestore(&debug_lock, flags); | |
595 | ||
596 | return IRQ_HANDLED; | |
597 | } | |
598 | ||
e46cdb66 JF |
599 | /* |
600 | * Search the CPUs pending events bitmasks. For each one found, map | |
601 | * the event number to an irq, and feed it into do_IRQ() for | |
602 | * handling. | |
603 | * | |
604 | * Xen uses a two-level bitmap to speed searching. The first level is | |
605 | * a bitset of words which contain pending event bits. The second | |
606 | * level is a bitset of pending events themselves. | |
607 | */ | |
75604d7f | 608 | void xen_evtchn_do_upcall(struct pt_regs *regs) |
e46cdb66 JF |
609 | { |
610 | int cpu = get_cpu(); | |
3445a8fd | 611 | struct pt_regs *old_regs = set_irq_regs(regs); |
e46cdb66 JF |
612 | struct shared_info *s = HYPERVISOR_shared_info; |
613 | struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); | |
229664be JF |
614 | static DEFINE_PER_CPU(unsigned, nesting_count); |
615 | unsigned count; | |
e46cdb66 | 616 | |
3445a8fd JF |
617 | exit_idle(); |
618 | irq_enter(); | |
619 | ||
229664be JF |
620 | do { |
621 | unsigned long pending_words; | |
e46cdb66 | 622 | |
229664be | 623 | vcpu_info->evtchn_upcall_pending = 0; |
e46cdb66 | 624 | |
229664be JF |
625 | if (__get_cpu_var(nesting_count)++) |
626 | goto out; | |
e46cdb66 | 627 | |
e849c3e9 IY |
628 | #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ |
629 | /* Clear master flag /before/ clearing selector flag. */ | |
6673cf63 | 630 | wmb(); |
e849c3e9 | 631 | #endif |
229664be JF |
632 | pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0); |
633 | while (pending_words != 0) { | |
634 | unsigned long pending_bits; | |
635 | int word_idx = __ffs(pending_words); | |
636 | pending_words &= ~(1UL << word_idx); | |
637 | ||
638 | while ((pending_bits = active_evtchns(cpu, s, word_idx)) != 0) { | |
639 | int bit_idx = __ffs(pending_bits); | |
640 | int port = (word_idx * BITS_PER_LONG) + bit_idx; | |
641 | int irq = evtchn_to_irq[port]; | |
642 | ||
3445a8fd JF |
643 | if (irq != -1) |
644 | handle_irq(irq, regs); | |
e46cdb66 JF |
645 | } |
646 | } | |
e46cdb66 | 647 | |
229664be JF |
648 | BUG_ON(!irqs_disabled()); |
649 | ||
650 | count = __get_cpu_var(nesting_count); | |
651 | __get_cpu_var(nesting_count) = 0; | |
652 | } while(count != 1); | |
653 | ||
654 | out: | |
3445a8fd JF |
655 | irq_exit(); |
656 | set_irq_regs(old_regs); | |
657 | ||
e46cdb66 JF |
658 | put_cpu(); |
659 | } | |
660 | ||
eb1e305f JF |
661 | /* Rebind a new event channel to an existing irq. */ |
662 | void rebind_evtchn_irq(int evtchn, int irq) | |
663 | { | |
d77bbd4d JF |
664 | struct irq_info *info = info_for_irq(irq); |
665 | ||
eb1e305f JF |
666 | /* Make sure the irq is masked, since the new event channel |
667 | will also be masked. */ | |
668 | disable_irq(irq); | |
669 | ||
670 | spin_lock(&irq_mapping_update_lock); | |
671 | ||
672 | /* After resume the irq<->evtchn mappings are all cleared out */ | |
673 | BUG_ON(evtchn_to_irq[evtchn] != -1); | |
674 | /* Expect irq to have been bound before, | |
d77bbd4d JF |
675 | so there should be a proper type */ |
676 | BUG_ON(info->type == IRQT_UNBOUND); | |
eb1e305f JF |
677 | |
678 | evtchn_to_irq[evtchn] = irq; | |
ced40d0f | 679 | irq_info[irq] = mk_evtchn_info(evtchn); |
eb1e305f JF |
680 | |
681 | spin_unlock(&irq_mapping_update_lock); | |
682 | ||
683 | /* new event channels are always bound to cpu 0 */ | |
0de26520 | 684 | irq_set_affinity(irq, cpumask_of(0)); |
eb1e305f JF |
685 | |
686 | /* Unmask the event channel. */ | |
687 | enable_irq(irq); | |
688 | } | |
689 | ||
e46cdb66 | 690 | /* Rebind an evtchn so that it gets delivered to a specific cpu */ |
d5dedd45 | 691 | static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) |
e46cdb66 JF |
692 | { |
693 | struct evtchn_bind_vcpu bind_vcpu; | |
694 | int evtchn = evtchn_from_irq(irq); | |
695 | ||
696 | if (!VALID_EVTCHN(evtchn)) | |
d5dedd45 | 697 | return -1; |
e46cdb66 JF |
698 | |
699 | /* Send future instances of this interrupt to other vcpu. */ | |
700 | bind_vcpu.port = evtchn; | |
701 | bind_vcpu.vcpu = tcpu; | |
702 | ||
703 | /* | |
704 | * If this fails, it usually just indicates that we're dealing with a | |
705 | * virq or IPI channel, which don't actually need to be rebound. Ignore | |
706 | * it, but don't do the xenlinux-level rebind in that case. | |
707 | */ | |
708 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) | |
709 | bind_evtchn_to_cpu(evtchn, tcpu); | |
e46cdb66 | 710 | |
d5dedd45 YL |
711 | return 0; |
712 | } | |
e46cdb66 | 713 | |
d5dedd45 | 714 | static int set_affinity_irq(unsigned irq, const struct cpumask *dest) |
e46cdb66 | 715 | { |
0de26520 | 716 | unsigned tcpu = cpumask_first(dest); |
d5dedd45 YL |
717 | |
718 | return rebind_irq_to_cpu(irq, tcpu); | |
e46cdb66 JF |
719 | } |
720 | ||
642e0c88 IY |
721 | int resend_irq_on_evtchn(unsigned int irq) |
722 | { | |
723 | int masked, evtchn = evtchn_from_irq(irq); | |
724 | struct shared_info *s = HYPERVISOR_shared_info; | |
725 | ||
726 | if (!VALID_EVTCHN(evtchn)) | |
727 | return 1; | |
728 | ||
729 | masked = sync_test_and_set_bit(evtchn, s->evtchn_mask); | |
730 | sync_set_bit(evtchn, s->evtchn_pending); | |
731 | if (!masked) | |
732 | unmask_evtchn(evtchn); | |
733 | ||
734 | return 1; | |
735 | } | |
736 | ||
e46cdb66 JF |
737 | static void enable_dynirq(unsigned int irq) |
738 | { | |
739 | int evtchn = evtchn_from_irq(irq); | |
740 | ||
741 | if (VALID_EVTCHN(evtchn)) | |
742 | unmask_evtchn(evtchn); | |
743 | } | |
744 | ||
745 | static void disable_dynirq(unsigned int irq) | |
746 | { | |
747 | int evtchn = evtchn_from_irq(irq); | |
748 | ||
749 | if (VALID_EVTCHN(evtchn)) | |
750 | mask_evtchn(evtchn); | |
751 | } | |
752 | ||
753 | static void ack_dynirq(unsigned int irq) | |
754 | { | |
755 | int evtchn = evtchn_from_irq(irq); | |
756 | ||
757 | move_native_irq(irq); | |
758 | ||
759 | if (VALID_EVTCHN(evtchn)) | |
760 | clear_evtchn(evtchn); | |
761 | } | |
762 | ||
763 | static int retrigger_dynirq(unsigned int irq) | |
764 | { | |
765 | int evtchn = evtchn_from_irq(irq); | |
ee8fa1c6 | 766 | struct shared_info *sh = HYPERVISOR_shared_info; |
e46cdb66 JF |
767 | int ret = 0; |
768 | ||
769 | if (VALID_EVTCHN(evtchn)) { | |
ee8fa1c6 JF |
770 | int masked; |
771 | ||
772 | masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask); | |
773 | sync_set_bit(evtchn, sh->evtchn_pending); | |
774 | if (!masked) | |
775 | unmask_evtchn(evtchn); | |
e46cdb66 JF |
776 | ret = 1; |
777 | } | |
778 | ||
779 | return ret; | |
780 | } | |
781 | ||
0e91398f JF |
782 | static void restore_cpu_virqs(unsigned int cpu) |
783 | { | |
784 | struct evtchn_bind_virq bind_virq; | |
785 | int virq, irq, evtchn; | |
786 | ||
787 | for (virq = 0; virq < NR_VIRQS; virq++) { | |
788 | if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) | |
789 | continue; | |
790 | ||
ced40d0f | 791 | BUG_ON(virq_from_irq(irq) != virq); |
0e91398f JF |
792 | |
793 | /* Get a new binding from Xen. */ | |
794 | bind_virq.virq = virq; | |
795 | bind_virq.vcpu = cpu; | |
796 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, | |
797 | &bind_virq) != 0) | |
798 | BUG(); | |
799 | evtchn = bind_virq.port; | |
800 | ||
801 | /* Record the new mapping. */ | |
802 | evtchn_to_irq[evtchn] = irq; | |
ced40d0f | 803 | irq_info[irq] = mk_virq_info(evtchn, virq); |
0e91398f JF |
804 | bind_evtchn_to_cpu(evtchn, cpu); |
805 | ||
806 | /* Ready for use. */ | |
807 | unmask_evtchn(evtchn); | |
808 | } | |
809 | } | |
810 | ||
811 | static void restore_cpu_ipis(unsigned int cpu) | |
812 | { | |
813 | struct evtchn_bind_ipi bind_ipi; | |
814 | int ipi, irq, evtchn; | |
815 | ||
816 | for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { | |
817 | if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) | |
818 | continue; | |
819 | ||
ced40d0f | 820 | BUG_ON(ipi_from_irq(irq) != ipi); |
0e91398f JF |
821 | |
822 | /* Get a new binding from Xen. */ | |
823 | bind_ipi.vcpu = cpu; | |
824 | if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, | |
825 | &bind_ipi) != 0) | |
826 | BUG(); | |
827 | evtchn = bind_ipi.port; | |
828 | ||
829 | /* Record the new mapping. */ | |
830 | evtchn_to_irq[evtchn] = irq; | |
ced40d0f | 831 | irq_info[irq] = mk_ipi_info(evtchn, ipi); |
0e91398f JF |
832 | bind_evtchn_to_cpu(evtchn, cpu); |
833 | ||
834 | /* Ready for use. */ | |
835 | unmask_evtchn(evtchn); | |
836 | ||
837 | } | |
838 | } | |
839 | ||
2d9e1e2f JF |
840 | /* Clear an irq's pending state, in preparation for polling on it */ |
841 | void xen_clear_irq_pending(int irq) | |
842 | { | |
843 | int evtchn = evtchn_from_irq(irq); | |
844 | ||
845 | if (VALID_EVTCHN(evtchn)) | |
846 | clear_evtchn(evtchn); | |
847 | } | |
848 | ||
168d2f46 JF |
849 | void xen_set_irq_pending(int irq) |
850 | { | |
851 | int evtchn = evtchn_from_irq(irq); | |
852 | ||
853 | if (VALID_EVTCHN(evtchn)) | |
854 | set_evtchn(evtchn); | |
855 | } | |
856 | ||
857 | bool xen_test_irq_pending(int irq) | |
858 | { | |
859 | int evtchn = evtchn_from_irq(irq); | |
860 | bool ret = false; | |
861 | ||
862 | if (VALID_EVTCHN(evtchn)) | |
863 | ret = test_evtchn(evtchn); | |
864 | ||
865 | return ret; | |
866 | } | |
867 | ||
2d9e1e2f JF |
868 | /* Poll waiting for an irq to become pending. In the usual case, the |
869 | irq will be disabled so it won't deliver an interrupt. */ | |
870 | void xen_poll_irq(int irq) | |
871 | { | |
872 | evtchn_port_t evtchn = evtchn_from_irq(irq); | |
873 | ||
874 | if (VALID_EVTCHN(evtchn)) { | |
875 | struct sched_poll poll; | |
876 | ||
877 | poll.nr_ports = 1; | |
878 | poll.timeout = 0; | |
ff3c5362 | 879 | set_xen_guest_handle(poll.ports, &evtchn); |
2d9e1e2f JF |
880 | |
881 | if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0) | |
882 | BUG(); | |
883 | } | |
884 | } | |
885 | ||
0e91398f JF |
886 | void xen_irq_resume(void) |
887 | { | |
888 | unsigned int cpu, irq, evtchn; | |
889 | ||
890 | init_evtchn_cpu_bindings(); | |
891 | ||
892 | /* New event-channel space is not 'live' yet. */ | |
893 | for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) | |
894 | mask_evtchn(evtchn); | |
895 | ||
896 | /* No IRQ <-> event-channel mappings. */ | |
0b8f1efa | 897 | for (irq = 0; irq < nr_irqs; irq++) |
0e91398f JF |
898 | irq_info[irq].evtchn = 0; /* zap event-channel binding */ |
899 | ||
900 | for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++) | |
901 | evtchn_to_irq[evtchn] = -1; | |
902 | ||
903 | for_each_possible_cpu(cpu) { | |
904 | restore_cpu_virqs(cpu); | |
905 | restore_cpu_ipis(cpu); | |
906 | } | |
907 | } | |
908 | ||
e46cdb66 JF |
909 | static struct irq_chip xen_dynamic_chip __read_mostly = { |
910 | .name = "xen-dyn", | |
54a353a0 JF |
911 | |
912 | .disable = disable_dynirq, | |
e46cdb66 JF |
913 | .mask = disable_dynirq, |
914 | .unmask = enable_dynirq, | |
54a353a0 | 915 | |
e46cdb66 JF |
916 | .ack = ack_dynirq, |
917 | .set_affinity = set_affinity_irq, | |
918 | .retrigger = retrigger_dynirq, | |
919 | }; | |
920 | ||
921 | void __init xen_init_IRQ(void) | |
922 | { | |
923 | int i; | |
c7a3589e MT |
924 | size_t size = nr_cpu_ids * sizeof(struct cpu_evtchn_s); |
925 | ||
28e08861 CS |
926 | cpu_evtchn_mask_p = alloc_bootmem(size); |
927 | BUG_ON(cpu_evtchn_mask_p == NULL); | |
e46cdb66 JF |
928 | |
929 | init_evtchn_cpu_bindings(); | |
930 | ||
931 | /* No event channels are 'live' right now. */ | |
932 | for (i = 0; i < NR_EVENT_CHANNELS; i++) | |
933 | mask_evtchn(i); | |
934 | ||
e46cdb66 JF |
935 | irq_ctx_init(smp_processor_id()); |
936 | } |