Commit | Line | Data |
---|---|---|
74afab7a JL |
1 | /* |
2 | * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc. | |
3 | * | |
4 | * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo | |
5 | * Moved from arch/x86/kernel/apic/io_apic.c. | |
b5dc8e6c JL |
6 | * Jiang Liu <jiang.liu@linux.intel.com> |
7 | * Enable support of hierarchical irqdomains | |
74afab7a JL |
8 | * |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | */ | |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/compiler.h> | |
16 | #include <linux/irqdomain.h> | |
17 | #include <linux/slab.h> | |
18 | #include <asm/hw_irq.h> | |
19 | #include <asm/apic.h> | |
20 | #include <asm/i8259.h> | |
21 | #include <asm/desc.h> | |
22 | #include <asm/irq_remapping.h> | |
23 | ||
7f3262ed JL |
24 | struct apic_chip_data { |
25 | struct irq_cfg cfg; | |
26 | cpumask_var_t domain; | |
27 | cpumask_var_t old_domain; | |
28 | u8 move_in_progress : 1; | |
29 | }; | |
30 | ||
b5dc8e6c | 31 | struct irq_domain *x86_vector_domain; |
74afab7a | 32 | static DEFINE_RAW_SPINLOCK(vector_lock); |
b5dc8e6c | 33 | static struct irq_chip lapic_controller; |
13315320 | 34 | #ifdef CONFIG_X86_IO_APIC |
7f3262ed | 35 | static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY]; |
13315320 | 36 | #endif |
74afab7a JL |
37 | |
38 | void lock_vector_lock(void) | |
39 | { | |
40 | /* Used to the online set of cpus does not change | |
41 | * during assign_irq_vector. | |
42 | */ | |
43 | raw_spin_lock(&vector_lock); | |
44 | } | |
45 | ||
46 | void unlock_vector_lock(void) | |
47 | { | |
48 | raw_spin_unlock(&vector_lock); | |
49 | } | |
50 | ||
7f3262ed | 51 | static struct apic_chip_data *apic_chip_data(struct irq_data *irq_data) |
74afab7a | 52 | { |
b5dc8e6c JL |
53 | if (!irq_data) |
54 | return NULL; | |
55 | ||
56 | while (irq_data->parent_data) | |
57 | irq_data = irq_data->parent_data; | |
58 | ||
74afab7a JL |
59 | return irq_data->chip_data; |
60 | } | |
61 | ||
7f3262ed JL |
62 | struct irq_cfg *irqd_cfg(struct irq_data *irq_data) |
63 | { | |
64 | struct apic_chip_data *data = apic_chip_data(irq_data); | |
65 | ||
66 | return data ? &data->cfg : NULL; | |
67 | } | |
68 | ||
69 | struct irq_cfg *irq_cfg(unsigned int irq) | |
74afab7a | 70 | { |
7f3262ed JL |
71 | return irqd_cfg(irq_get_irq_data(irq)); |
72 | } | |
74afab7a | 73 | |
7f3262ed JL |
74 | static struct apic_chip_data *alloc_apic_chip_data(int node) |
75 | { | |
76 | struct apic_chip_data *data; | |
77 | ||
78 | data = kzalloc_node(sizeof(*data), GFP_KERNEL, node); | |
79 | if (!data) | |
74afab7a | 80 | return NULL; |
7f3262ed JL |
81 | if (!zalloc_cpumask_var_node(&data->domain, GFP_KERNEL, node)) |
82 | goto out_data; | |
83 | if (!zalloc_cpumask_var_node(&data->old_domain, GFP_KERNEL, node)) | |
74afab7a | 84 | goto out_domain; |
7f3262ed | 85 | return data; |
74afab7a | 86 | out_domain: |
7f3262ed JL |
87 | free_cpumask_var(data->domain); |
88 | out_data: | |
89 | kfree(data); | |
74afab7a JL |
90 | return NULL; |
91 | } | |
92 | ||
7f3262ed | 93 | static void free_apic_chip_data(struct apic_chip_data *data) |
74afab7a | 94 | { |
7f3262ed JL |
95 | if (data) { |
96 | free_cpumask_var(data->domain); | |
97 | free_cpumask_var(data->old_domain); | |
98 | kfree(data); | |
b5dc8e6c | 99 | } |
74afab7a JL |
100 | } |
101 | ||
7f3262ed JL |
102 | static int __assign_irq_vector(int irq, struct apic_chip_data *d, |
103 | const struct cpumask *mask) | |
74afab7a JL |
104 | { |
105 | /* | |
106 | * NOTE! The local APIC isn't very good at handling | |
107 | * multiple interrupts at the same interrupt level. | |
108 | * As the interrupt level is determined by taking the | |
109 | * vector number and shifting that right by 4, we | |
110 | * want to spread these out a bit so that they don't | |
111 | * all fall in the same interrupt level. | |
112 | * | |
113 | * Also, we've got to be careful not to trash gate | |
114 | * 0x80, because int 0x80 is hm, kind of importantish. ;) | |
115 | */ | |
116 | static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; | |
117 | static int current_offset = VECTOR_OFFSET_START % 16; | |
118 | int cpu, err; | |
119 | cpumask_var_t tmp_mask; | |
120 | ||
7f3262ed | 121 | if (d->move_in_progress) |
74afab7a JL |
122 | return -EBUSY; |
123 | ||
124 | if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) | |
125 | return -ENOMEM; | |
126 | ||
127 | /* Only try and allocate irqs on cpus that are present */ | |
128 | err = -ENOSPC; | |
7f3262ed | 129 | cpumask_clear(d->old_domain); |
74afab7a JL |
130 | cpu = cpumask_first_and(mask, cpu_online_mask); |
131 | while (cpu < nr_cpu_ids) { | |
132 | int new_cpu, vector, offset; | |
133 | ||
134 | apic->vector_allocation_domain(cpu, tmp_mask, mask); | |
135 | ||
7f3262ed | 136 | if (cpumask_subset(tmp_mask, d->domain)) { |
74afab7a | 137 | err = 0; |
7f3262ed | 138 | if (cpumask_equal(tmp_mask, d->domain)) |
74afab7a JL |
139 | break; |
140 | /* | |
141 | * New cpumask using the vector is a proper subset of | |
142 | * the current in use mask. So cleanup the vector | |
143 | * allocation for the members that are not used anymore. | |
144 | */ | |
7f3262ed JL |
145 | cpumask_andnot(d->old_domain, d->domain, tmp_mask); |
146 | d->move_in_progress = | |
147 | cpumask_intersects(d->old_domain, cpu_online_mask); | |
148 | cpumask_and(d->domain, d->domain, tmp_mask); | |
74afab7a JL |
149 | break; |
150 | } | |
151 | ||
152 | vector = current_vector; | |
153 | offset = current_offset; | |
154 | next: | |
155 | vector += 16; | |
156 | if (vector >= first_system_vector) { | |
157 | offset = (offset + 1) % 16; | |
158 | vector = FIRST_EXTERNAL_VECTOR + offset; | |
159 | } | |
160 | ||
161 | if (unlikely(current_vector == vector)) { | |
7f3262ed JL |
162 | cpumask_or(d->old_domain, d->old_domain, tmp_mask); |
163 | cpumask_andnot(tmp_mask, mask, d->old_domain); | |
74afab7a JL |
164 | cpu = cpumask_first_and(tmp_mask, cpu_online_mask); |
165 | continue; | |
166 | } | |
167 | ||
168 | if (test_bit(vector, used_vectors)) | |
169 | goto next; | |
170 | ||
171 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) { | |
172 | if (per_cpu(vector_irq, new_cpu)[vector] > | |
173 | VECTOR_UNDEFINED) | |
174 | goto next; | |
175 | } | |
176 | /* Found one! */ | |
177 | current_vector = vector; | |
178 | current_offset = offset; | |
7f3262ed JL |
179 | if (d->cfg.vector) { |
180 | cpumask_copy(d->old_domain, d->domain); | |
181 | d->move_in_progress = | |
182 | cpumask_intersects(d->old_domain, cpu_online_mask); | |
74afab7a JL |
183 | } |
184 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) | |
185 | per_cpu(vector_irq, new_cpu)[vector] = irq; | |
7f3262ed JL |
186 | d->cfg.vector = vector; |
187 | cpumask_copy(d->domain, tmp_mask); | |
74afab7a JL |
188 | err = 0; |
189 | break; | |
190 | } | |
191 | free_cpumask_var(tmp_mask); | |
192 | ||
5f0052f9 JL |
193 | if (!err) { |
194 | /* cache destination APIC IDs into cfg->dest_apicid */ | |
7f3262ed JL |
195 | err = apic->cpu_mask_to_apicid_and(mask, d->domain, |
196 | &d->cfg.dest_apicid); | |
5f0052f9 JL |
197 | } |
198 | ||
74afab7a JL |
199 | return err; |
200 | } | |
201 | ||
7f3262ed | 202 | static int assign_irq_vector(int irq, struct apic_chip_data *data, |
f970510c | 203 | const struct cpumask *mask) |
74afab7a JL |
204 | { |
205 | int err; | |
206 | unsigned long flags; | |
207 | ||
208 | raw_spin_lock_irqsave(&vector_lock, flags); | |
7f3262ed | 209 | err = __assign_irq_vector(irq, data, mask); |
74afab7a JL |
210 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
211 | return err; | |
212 | } | |
213 | ||
7f3262ed | 214 | static void clear_irq_vector(int irq, struct apic_chip_data *data) |
74afab7a JL |
215 | { |
216 | int cpu, vector; | |
217 | unsigned long flags; | |
218 | ||
219 | raw_spin_lock_irqsave(&vector_lock, flags); | |
7f3262ed | 220 | BUG_ON(!data->cfg.vector); |
74afab7a | 221 | |
7f3262ed JL |
222 | vector = data->cfg.vector; |
223 | for_each_cpu_and(cpu, data->domain, cpu_online_mask) | |
74afab7a JL |
224 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; |
225 | ||
7f3262ed JL |
226 | data->cfg.vector = 0; |
227 | cpumask_clear(data->domain); | |
74afab7a | 228 | |
7f3262ed | 229 | if (likely(!data->move_in_progress)) { |
74afab7a JL |
230 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
231 | return; | |
232 | } | |
233 | ||
7f3262ed | 234 | for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) { |
74afab7a JL |
235 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; |
236 | vector++) { | |
237 | if (per_cpu(vector_irq, cpu)[vector] != irq) | |
238 | continue; | |
239 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; | |
240 | break; | |
241 | } | |
242 | } | |
7f3262ed | 243 | data->move_in_progress = 0; |
74afab7a JL |
244 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
245 | } | |
246 | ||
b5dc8e6c JL |
247 | void init_irq_alloc_info(struct irq_alloc_info *info, |
248 | const struct cpumask *mask) | |
249 | { | |
250 | memset(info, 0, sizeof(*info)); | |
251 | info->mask = mask; | |
252 | } | |
253 | ||
254 | void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src) | |
255 | { | |
256 | if (src) | |
257 | *dst = *src; | |
258 | else | |
259 | memset(dst, 0, sizeof(*dst)); | |
260 | } | |
261 | ||
262 | static inline const struct cpumask * | |
263 | irq_alloc_info_get_mask(struct irq_alloc_info *info) | |
264 | { | |
265 | return (!info || !info->mask) ? apic->target_cpus() : info->mask; | |
266 | } | |
267 | ||
268 | static void x86_vector_free_irqs(struct irq_domain *domain, | |
269 | unsigned int virq, unsigned int nr_irqs) | |
270 | { | |
271 | struct irq_data *irq_data; | |
272 | int i; | |
273 | ||
274 | for (i = 0; i < nr_irqs; i++) { | |
275 | irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i); | |
276 | if (irq_data && irq_data->chip_data) { | |
b5dc8e6c | 277 | clear_irq_vector(virq + i, irq_data->chip_data); |
7f3262ed | 278 | free_apic_chip_data(irq_data->chip_data); |
13315320 JL |
279 | #ifdef CONFIG_X86_IO_APIC |
280 | if (virq + i < nr_legacy_irqs()) | |
7f3262ed | 281 | legacy_irq_data[virq + i] = NULL; |
13315320 | 282 | #endif |
b5dc8e6c JL |
283 | irq_domain_reset_irq_data(irq_data); |
284 | } | |
285 | } | |
286 | } | |
287 | ||
288 | static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, | |
289 | unsigned int nr_irqs, void *arg) | |
290 | { | |
291 | struct irq_alloc_info *info = arg; | |
7f3262ed | 292 | struct apic_chip_data *data; |
b5dc8e6c JL |
293 | const struct cpumask *mask; |
294 | struct irq_data *irq_data; | |
b5dc8e6c JL |
295 | int i, err; |
296 | ||
297 | if (disable_apic) | |
298 | return -ENXIO; | |
299 | ||
300 | /* Currently vector allocator can't guarantee contiguous allocations */ | |
301 | if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1) | |
302 | return -ENOSYS; | |
303 | ||
304 | mask = irq_alloc_info_get_mask(info); | |
305 | for (i = 0; i < nr_irqs; i++) { | |
306 | irq_data = irq_domain_get_irq_data(domain, virq + i); | |
307 | BUG_ON(!irq_data); | |
13315320 | 308 | #ifdef CONFIG_X86_IO_APIC |
7f3262ed JL |
309 | if (virq + i < nr_legacy_irqs() && legacy_irq_data[virq + i]) |
310 | data = legacy_irq_data[virq + i]; | |
13315320 JL |
311 | else |
312 | #endif | |
7f3262ed JL |
313 | data = alloc_apic_chip_data(irq_data->node); |
314 | if (!data) { | |
b5dc8e6c JL |
315 | err = -ENOMEM; |
316 | goto error; | |
317 | } | |
318 | ||
319 | irq_data->chip = &lapic_controller; | |
7f3262ed | 320 | irq_data->chip_data = data; |
b5dc8e6c | 321 | irq_data->hwirq = virq + i; |
7f3262ed | 322 | err = assign_irq_vector(virq, data, mask); |
b5dc8e6c JL |
323 | if (err) |
324 | goto error; | |
325 | } | |
326 | ||
327 | return 0; | |
328 | ||
329 | error: | |
330 | x86_vector_free_irqs(domain, virq, i + 1); | |
331 | return err; | |
332 | } | |
333 | ||
334 | static struct irq_domain_ops x86_vector_domain_ops = { | |
335 | .alloc = x86_vector_alloc_irqs, | |
336 | .free = x86_vector_free_irqs, | |
337 | }; | |
338 | ||
11d686e9 JL |
339 | int __init arch_probe_nr_irqs(void) |
340 | { | |
341 | int nr; | |
342 | ||
343 | if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) | |
344 | nr_irqs = NR_VECTORS * nr_cpu_ids; | |
345 | ||
346 | nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids; | |
347 | #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ) | |
348 | /* | |
349 | * for MSI and HT dyn irq | |
350 | */ | |
351 | if (gsi_top <= NR_IRQS_LEGACY) | |
352 | nr += 8 * nr_cpu_ids; | |
353 | else | |
354 | nr += gsi_top * 16; | |
355 | #endif | |
356 | if (nr < nr_irqs) | |
357 | nr_irqs = nr; | |
358 | ||
359 | return nr_legacy_irqs(); | |
360 | } | |
361 | ||
13315320 JL |
362 | #ifdef CONFIG_X86_IO_APIC |
363 | static void init_legacy_irqs(void) | |
364 | { | |
365 | int i, node = cpu_to_node(0); | |
7f3262ed | 366 | struct apic_chip_data *data; |
13315320 JL |
367 | |
368 | /* | |
369 | * For legacy IRQ's, start with assigning irq0 to irq15 to | |
370 | * IRQ0_VECTOR to IRQ15_VECTOR for all cpu's. | |
371 | */ | |
372 | for (i = 0; i < nr_legacy_irqs(); i++) { | |
7f3262ed JL |
373 | data = legacy_irq_data[i] = alloc_apic_chip_data(node); |
374 | BUG_ON(!data); | |
13315320 JL |
375 | /* |
376 | * For legacy IRQ's, start with assigning irq0 to irq15 to | |
377 | * IRQ0_VECTOR to IRQ15_VECTOR for all cpu's. | |
378 | */ | |
7f3262ed JL |
379 | data->cfg.vector = IRQ0_VECTOR + i; |
380 | cpumask_setall(data->domain); | |
381 | irq_set_chip_data(i, data); | |
13315320 JL |
382 | } |
383 | } | |
384 | #else | |
385 | static void init_legacy_irqs(void) { } | |
386 | #endif | |
387 | ||
11d686e9 JL |
388 | int __init arch_early_irq_init(void) |
389 | { | |
13315320 JL |
390 | init_legacy_irqs(); |
391 | ||
b5dc8e6c JL |
392 | x86_vector_domain = irq_domain_add_tree(NULL, &x86_vector_domain_ops, |
393 | NULL); | |
394 | BUG_ON(x86_vector_domain == NULL); | |
395 | irq_set_default_host(x86_vector_domain); | |
396 | ||
52f518a3 | 397 | arch_init_msi_domain(x86_vector_domain); |
49e07d8f | 398 | arch_init_htirq_domain(x86_vector_domain); |
52f518a3 | 399 | |
11d686e9 JL |
400 | return arch_early_ioapic_init(); |
401 | } | |
402 | ||
74afab7a JL |
403 | static void __setup_vector_irq(int cpu) |
404 | { | |
405 | /* Initialize vector_irq on a new cpu */ | |
406 | int irq, vector; | |
7f3262ed | 407 | struct apic_chip_data *data; |
74afab7a JL |
408 | |
409 | /* | |
410 | * vector_lock will make sure that we don't run into irq vector | |
411 | * assignments that might be happening on another cpu in parallel, | |
412 | * while we setup our initial vector to irq mappings. | |
413 | */ | |
414 | raw_spin_lock(&vector_lock); | |
415 | /* Mark the inuse vectors */ | |
416 | for_each_active_irq(irq) { | |
7f3262ed JL |
417 | data = apic_chip_data(irq_get_irq_data(irq)); |
418 | if (!data) | |
74afab7a JL |
419 | continue; |
420 | ||
7f3262ed | 421 | if (!cpumask_test_cpu(cpu, data->domain)) |
74afab7a | 422 | continue; |
7f3262ed | 423 | vector = data->cfg.vector; |
74afab7a JL |
424 | per_cpu(vector_irq, cpu)[vector] = irq; |
425 | } | |
426 | /* Mark the free vectors */ | |
427 | for (vector = 0; vector < NR_VECTORS; ++vector) { | |
428 | irq = per_cpu(vector_irq, cpu)[vector]; | |
429 | if (irq <= VECTOR_UNDEFINED) | |
430 | continue; | |
431 | ||
7f3262ed JL |
432 | data = apic_chip_data(irq_get_irq_data(irq)); |
433 | if (!cpumask_test_cpu(cpu, data->domain)) | |
74afab7a JL |
434 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; |
435 | } | |
436 | raw_spin_unlock(&vector_lock); | |
437 | } | |
438 | ||
439 | /* | |
440 | * Setup the vector to irq mappings. | |
441 | */ | |
442 | void setup_vector_irq(int cpu) | |
443 | { | |
444 | int irq; | |
445 | ||
446 | /* | |
447 | * On most of the platforms, legacy PIC delivers the interrupts on the | |
448 | * boot cpu. But there are certain platforms where PIC interrupts are | |
449 | * delivered to multiple cpu's. If the legacy IRQ is handled by the | |
450 | * legacy PIC, for the new cpu that is coming online, setup the static | |
451 | * legacy vector to irq mapping: | |
452 | */ | |
453 | for (irq = 0; irq < nr_legacy_irqs(); irq++) | |
454 | per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq; | |
455 | ||
456 | __setup_vector_irq(cpu); | |
457 | } | |
458 | ||
7f3262ed | 459 | static int apic_retrigger_irq(struct irq_data *irq_data) |
74afab7a | 460 | { |
7f3262ed | 461 | struct apic_chip_data *data = apic_chip_data(irq_data); |
74afab7a JL |
462 | unsigned long flags; |
463 | int cpu; | |
464 | ||
465 | raw_spin_lock_irqsave(&vector_lock, flags); | |
7f3262ed JL |
466 | cpu = cpumask_first_and(data->domain, cpu_online_mask); |
467 | apic->send_IPI_mask(cpumask_of(cpu), data->cfg.vector); | |
74afab7a JL |
468 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
469 | ||
470 | return 1; | |
471 | } | |
472 | ||
473 | void apic_ack_edge(struct irq_data *data) | |
474 | { | |
a9786091 | 475 | irq_complete_move(irqd_cfg(data)); |
74afab7a JL |
476 | irq_move_irq(data); |
477 | ack_APIC_irq(); | |
478 | } | |
479 | ||
68f9f440 JL |
480 | static int apic_set_affinity(struct irq_data *irq_data, |
481 | const struct cpumask *dest, bool force) | |
b5dc8e6c | 482 | { |
7f3262ed | 483 | struct apic_chip_data *data = irq_data->chip_data; |
b5dc8e6c JL |
484 | int err, irq = irq_data->irq; |
485 | ||
486 | if (!config_enabled(CONFIG_SMP)) | |
487 | return -EPERM; | |
488 | ||
489 | if (!cpumask_intersects(dest, cpu_online_mask)) | |
490 | return -EINVAL; | |
491 | ||
7f3262ed | 492 | err = assign_irq_vector(irq, data, dest); |
b5dc8e6c JL |
493 | if (err) { |
494 | struct irq_data *top = irq_get_irq_data(irq); | |
495 | ||
7f3262ed | 496 | if (assign_irq_vector(irq, data, top->affinity)) |
b5dc8e6c JL |
497 | pr_err("Failed to recover vector for irq %d\n", irq); |
498 | return err; | |
499 | } | |
500 | ||
501 | return IRQ_SET_MASK_OK; | |
502 | } | |
503 | ||
504 | static struct irq_chip lapic_controller = { | |
505 | .irq_ack = apic_ack_edge, | |
68f9f440 | 506 | .irq_set_affinity = apic_set_affinity, |
b5dc8e6c JL |
507 | .irq_retrigger = apic_retrigger_irq, |
508 | }; | |
509 | ||
74afab7a | 510 | #ifdef CONFIG_SMP |
7f3262ed | 511 | static void __send_cleanup_vector(struct apic_chip_data *data) |
74afab7a JL |
512 | { |
513 | cpumask_var_t cleanup_mask; | |
514 | ||
515 | if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { | |
516 | unsigned int i; | |
517 | ||
7f3262ed | 518 | for_each_cpu_and(i, data->old_domain, cpu_online_mask) |
74afab7a JL |
519 | apic->send_IPI_mask(cpumask_of(i), |
520 | IRQ_MOVE_CLEANUP_VECTOR); | |
521 | } else { | |
7f3262ed | 522 | cpumask_and(cleanup_mask, data->old_domain, cpu_online_mask); |
74afab7a JL |
523 | apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); |
524 | free_cpumask_var(cleanup_mask); | |
525 | } | |
7f3262ed | 526 | data->move_in_progress = 0; |
74afab7a JL |
527 | } |
528 | ||
c6c2002b JL |
529 | void send_cleanup_vector(struct irq_cfg *cfg) |
530 | { | |
7f3262ed JL |
531 | struct apic_chip_data *data; |
532 | ||
533 | data = container_of(cfg, struct apic_chip_data, cfg); | |
534 | if (data->move_in_progress) | |
535 | __send_cleanup_vector(data); | |
c6c2002b JL |
536 | } |
537 | ||
74afab7a JL |
538 | asmlinkage __visible void smp_irq_move_cleanup_interrupt(void) |
539 | { | |
540 | unsigned vector, me; | |
541 | ||
542 | ack_APIC_irq(); | |
543 | irq_enter(); | |
544 | exit_idle(); | |
545 | ||
546 | me = smp_processor_id(); | |
547 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { | |
548 | int irq; | |
549 | unsigned int irr; | |
550 | struct irq_desc *desc; | |
7f3262ed | 551 | struct apic_chip_data *data; |
74afab7a JL |
552 | |
553 | irq = __this_cpu_read(vector_irq[vector]); | |
554 | ||
555 | if (irq <= VECTOR_UNDEFINED) | |
556 | continue; | |
557 | ||
558 | desc = irq_to_desc(irq); | |
559 | if (!desc) | |
560 | continue; | |
561 | ||
7f3262ed JL |
562 | data = apic_chip_data(&desc->irq_data); |
563 | if (!data) | |
74afab7a JL |
564 | continue; |
565 | ||
566 | raw_spin_lock(&desc->lock); | |
567 | ||
568 | /* | |
569 | * Check if the irq migration is in progress. If so, we | |
570 | * haven't received the cleanup request yet for this irq. | |
571 | */ | |
7f3262ed | 572 | if (data->move_in_progress) |
74afab7a JL |
573 | goto unlock; |
574 | ||
7f3262ed JL |
575 | if (vector == data->cfg.vector && |
576 | cpumask_test_cpu(me, data->domain)) | |
74afab7a JL |
577 | goto unlock; |
578 | ||
579 | irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); | |
580 | /* | |
581 | * Check if the vector that needs to be cleanedup is | |
582 | * registered at the cpu's IRR. If so, then this is not | |
583 | * the best time to clean it up. Lets clean it up in the | |
584 | * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR | |
585 | * to myself. | |
586 | */ | |
587 | if (irr & (1 << (vector % 32))) { | |
588 | apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); | |
589 | goto unlock; | |
590 | } | |
591 | __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED); | |
592 | unlock: | |
593 | raw_spin_unlock(&desc->lock); | |
594 | } | |
595 | ||
596 | irq_exit(); | |
597 | } | |
598 | ||
599 | static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) | |
600 | { | |
601 | unsigned me; | |
7f3262ed | 602 | struct apic_chip_data *data; |
74afab7a | 603 | |
7f3262ed JL |
604 | data = container_of(cfg, struct apic_chip_data, cfg); |
605 | if (likely(!data->move_in_progress)) | |
74afab7a JL |
606 | return; |
607 | ||
608 | me = smp_processor_id(); | |
7f3262ed JL |
609 | if (vector == data->cfg.vector && cpumask_test_cpu(me, data->domain)) |
610 | __send_cleanup_vector(data); | |
74afab7a JL |
611 | } |
612 | ||
613 | void irq_complete_move(struct irq_cfg *cfg) | |
614 | { | |
615 | __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); | |
616 | } | |
617 | ||
618 | void irq_force_complete_move(int irq) | |
619 | { | |
620 | struct irq_cfg *cfg = irq_cfg(irq); | |
621 | ||
7f3262ed JL |
622 | if (cfg) |
623 | __irq_complete_move(cfg, cfg->vector); | |
74afab7a | 624 | } |
74afab7a JL |
625 | #endif |
626 | ||
74afab7a JL |
627 | static void __init print_APIC_field(int base) |
628 | { | |
629 | int i; | |
630 | ||
631 | printk(KERN_DEBUG); | |
632 | ||
633 | for (i = 0; i < 8; i++) | |
634 | pr_cont("%08x", apic_read(base + i*0x10)); | |
635 | ||
636 | pr_cont("\n"); | |
637 | } | |
638 | ||
639 | static void __init print_local_APIC(void *dummy) | |
640 | { | |
641 | unsigned int i, v, ver, maxlvt; | |
642 | u64 icr; | |
643 | ||
849d3569 JL |
644 | pr_debug("printing local APIC contents on CPU#%d/%d:\n", |
645 | smp_processor_id(), hard_smp_processor_id()); | |
74afab7a | 646 | v = apic_read(APIC_ID); |
849d3569 | 647 | pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id()); |
74afab7a | 648 | v = apic_read(APIC_LVR); |
849d3569 | 649 | pr_info("... APIC VERSION: %08x\n", v); |
74afab7a JL |
650 | ver = GET_APIC_VERSION(v); |
651 | maxlvt = lapic_get_maxlvt(); | |
652 | ||
653 | v = apic_read(APIC_TASKPRI); | |
849d3569 | 654 | pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); |
74afab7a JL |
655 | |
656 | /* !82489DX */ | |
657 | if (APIC_INTEGRATED(ver)) { | |
658 | if (!APIC_XAPIC(ver)) { | |
659 | v = apic_read(APIC_ARBPRI); | |
849d3569 JL |
660 | pr_debug("... APIC ARBPRI: %08x (%02x)\n", |
661 | v, v & APIC_ARBPRI_MASK); | |
74afab7a JL |
662 | } |
663 | v = apic_read(APIC_PROCPRI); | |
849d3569 | 664 | pr_debug("... APIC PROCPRI: %08x\n", v); |
74afab7a JL |
665 | } |
666 | ||
667 | /* | |
668 | * Remote read supported only in the 82489DX and local APIC for | |
669 | * Pentium processors. | |
670 | */ | |
671 | if (!APIC_INTEGRATED(ver) || maxlvt == 3) { | |
672 | v = apic_read(APIC_RRR); | |
849d3569 | 673 | pr_debug("... APIC RRR: %08x\n", v); |
74afab7a JL |
674 | } |
675 | ||
676 | v = apic_read(APIC_LDR); | |
849d3569 | 677 | pr_debug("... APIC LDR: %08x\n", v); |
74afab7a JL |
678 | if (!x2apic_enabled()) { |
679 | v = apic_read(APIC_DFR); | |
849d3569 | 680 | pr_debug("... APIC DFR: %08x\n", v); |
74afab7a JL |
681 | } |
682 | v = apic_read(APIC_SPIV); | |
849d3569 | 683 | pr_debug("... APIC SPIV: %08x\n", v); |
74afab7a | 684 | |
849d3569 | 685 | pr_debug("... APIC ISR field:\n"); |
74afab7a | 686 | print_APIC_field(APIC_ISR); |
849d3569 | 687 | pr_debug("... APIC TMR field:\n"); |
74afab7a | 688 | print_APIC_field(APIC_TMR); |
849d3569 | 689 | pr_debug("... APIC IRR field:\n"); |
74afab7a JL |
690 | print_APIC_field(APIC_IRR); |
691 | ||
692 | /* !82489DX */ | |
693 | if (APIC_INTEGRATED(ver)) { | |
694 | /* Due to the Pentium erratum 3AP. */ | |
695 | if (maxlvt > 3) | |
696 | apic_write(APIC_ESR, 0); | |
697 | ||
698 | v = apic_read(APIC_ESR); | |
849d3569 | 699 | pr_debug("... APIC ESR: %08x\n", v); |
74afab7a JL |
700 | } |
701 | ||
702 | icr = apic_icr_read(); | |
849d3569 JL |
703 | pr_debug("... APIC ICR: %08x\n", (u32)icr); |
704 | pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32)); | |
74afab7a JL |
705 | |
706 | v = apic_read(APIC_LVTT); | |
849d3569 | 707 | pr_debug("... APIC LVTT: %08x\n", v); |
74afab7a JL |
708 | |
709 | if (maxlvt > 3) { | |
710 | /* PC is LVT#4. */ | |
711 | v = apic_read(APIC_LVTPC); | |
849d3569 | 712 | pr_debug("... APIC LVTPC: %08x\n", v); |
74afab7a JL |
713 | } |
714 | v = apic_read(APIC_LVT0); | |
849d3569 | 715 | pr_debug("... APIC LVT0: %08x\n", v); |
74afab7a | 716 | v = apic_read(APIC_LVT1); |
849d3569 | 717 | pr_debug("... APIC LVT1: %08x\n", v); |
74afab7a JL |
718 | |
719 | if (maxlvt > 2) { | |
720 | /* ERR is LVT#3. */ | |
721 | v = apic_read(APIC_LVTERR); | |
849d3569 | 722 | pr_debug("... APIC LVTERR: %08x\n", v); |
74afab7a JL |
723 | } |
724 | ||
725 | v = apic_read(APIC_TMICT); | |
849d3569 | 726 | pr_debug("... APIC TMICT: %08x\n", v); |
74afab7a | 727 | v = apic_read(APIC_TMCCT); |
849d3569 | 728 | pr_debug("... APIC TMCCT: %08x\n", v); |
74afab7a | 729 | v = apic_read(APIC_TDCR); |
849d3569 | 730 | pr_debug("... APIC TDCR: %08x\n", v); |
74afab7a JL |
731 | |
732 | if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { | |
733 | v = apic_read(APIC_EFEAT); | |
734 | maxlvt = (v >> 16) & 0xff; | |
849d3569 | 735 | pr_debug("... APIC EFEAT: %08x\n", v); |
74afab7a | 736 | v = apic_read(APIC_ECTRL); |
849d3569 | 737 | pr_debug("... APIC ECTRL: %08x\n", v); |
74afab7a JL |
738 | for (i = 0; i < maxlvt; i++) { |
739 | v = apic_read(APIC_EILVTn(i)); | |
849d3569 | 740 | pr_debug("... APIC EILVT%d: %08x\n", i, v); |
74afab7a JL |
741 | } |
742 | } | |
743 | pr_cont("\n"); | |
744 | } | |
745 | ||
746 | static void __init print_local_APICs(int maxcpu) | |
747 | { | |
748 | int cpu; | |
749 | ||
750 | if (!maxcpu) | |
751 | return; | |
752 | ||
753 | preempt_disable(); | |
754 | for_each_online_cpu(cpu) { | |
755 | if (cpu >= maxcpu) | |
756 | break; | |
757 | smp_call_function_single(cpu, print_local_APIC, NULL, 1); | |
758 | } | |
759 | preempt_enable(); | |
760 | } | |
761 | ||
762 | static void __init print_PIC(void) | |
763 | { | |
764 | unsigned int v; | |
765 | unsigned long flags; | |
766 | ||
767 | if (!nr_legacy_irqs()) | |
768 | return; | |
769 | ||
849d3569 | 770 | pr_debug("\nprinting PIC contents\n"); |
74afab7a JL |
771 | |
772 | raw_spin_lock_irqsave(&i8259A_lock, flags); | |
773 | ||
774 | v = inb(0xa1) << 8 | inb(0x21); | |
849d3569 | 775 | pr_debug("... PIC IMR: %04x\n", v); |
74afab7a JL |
776 | |
777 | v = inb(0xa0) << 8 | inb(0x20); | |
849d3569 | 778 | pr_debug("... PIC IRR: %04x\n", v); |
74afab7a JL |
779 | |
780 | outb(0x0b, 0xa0); | |
781 | outb(0x0b, 0x20); | |
782 | v = inb(0xa0) << 8 | inb(0x20); | |
783 | outb(0x0a, 0xa0); | |
784 | outb(0x0a, 0x20); | |
785 | ||
786 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | |
787 | ||
849d3569 | 788 | pr_debug("... PIC ISR: %04x\n", v); |
74afab7a JL |
789 | |
790 | v = inb(0x4d1) << 8 | inb(0x4d0); | |
849d3569 | 791 | pr_debug("... PIC ELCR: %04x\n", v); |
74afab7a JL |
792 | } |
793 | ||
794 | static int show_lapic __initdata = 1; | |
795 | static __init int setup_show_lapic(char *arg) | |
796 | { | |
797 | int num = -1; | |
798 | ||
799 | if (strcmp(arg, "all") == 0) { | |
800 | show_lapic = CONFIG_NR_CPUS; | |
801 | } else { | |
802 | get_option(&arg, &num); | |
803 | if (num >= 0) | |
804 | show_lapic = num; | |
805 | } | |
806 | ||
807 | return 1; | |
808 | } | |
809 | __setup("show_lapic=", setup_show_lapic); | |
810 | ||
811 | static int __init print_ICs(void) | |
812 | { | |
813 | if (apic_verbosity == APIC_QUIET) | |
814 | return 0; | |
815 | ||
816 | print_PIC(); | |
817 | ||
818 | /* don't print out if apic is not there */ | |
819 | if (!cpu_has_apic && !apic_from_smp_config()) | |
820 | return 0; | |
821 | ||
822 | print_local_APICs(show_lapic); | |
823 | print_IO_APICs(); | |
824 | ||
825 | return 0; | |
826 | } | |
827 | ||
828 | late_initcall(print_ICs); |