Merge remote-tracking branch 'omap_dss2/for-next'
[deliverable/linux.git] / arch / x86 / include / asm / apic.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_APIC_H
2#define _ASM_X86_APIC_H
67c5fc5c 3
e2780a68 4#include <linux/cpumask.h>
e2780a68 5#include <linux/pm.h>
593f4a78
MR
6
7#include <asm/alternative.h>
e2780a68 8#include <asm/cpufeature.h>
e2780a68 9#include <asm/apicdef.h>
60063497 10#include <linux/atomic.h>
e2780a68
IM
11#include <asm/fixmap.h>
12#include <asm/mpspec.h>
13c88fb5 13#include <asm/msr.h>
eddc0e92 14#include <asm/idle.h>
67c5fc5c
TG
15
16#define ARCH_APICTIMER_STOPS_ON_C3 1
17
67c5fc5c
TG
18/*
19 * Debugging macros
20 */
21#define APIC_QUIET 0
22#define APIC_VERBOSE 1
23#define APIC_DEBUG 2
24
b7c4948e
HK
25/* Macros for apic_extnmi which controls external NMI masking */
26#define APIC_EXTNMI_BSP 0 /* Default */
27#define APIC_EXTNMI_ALL 1
28#define APIC_EXTNMI_NONE 2
29
67c5fc5c
TG
30/*
31 * Define the default level of output to be very little
32 * This can be turned up by using apic=verbose for more
33 * information and apic=debug for _lots_ of information.
34 * apic_verbosity is defined in apic.c
35 */
36#define apic_printk(v, s, a...) do { \
37 if ((v) <= apic_verbosity) \
38 printk(s, ##a); \
39 } while (0)
40
41
160d8dac 42#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
67c5fc5c 43extern void generic_apic_probe(void);
160d8dac
IM
44#else
45static inline void generic_apic_probe(void)
46{
47}
48#endif
67c5fc5c
TG
49
50#ifdef CONFIG_X86_LOCAL_APIC
51
baa13188 52extern unsigned int apic_verbosity;
67c5fc5c 53extern int local_apic_timer_c2_ok;
67c5fc5c 54
3c999f14 55extern int disable_apic;
1ade93ef 56extern unsigned int lapic_timer_frequency;
0939e4fd
IM
57
58#ifdef CONFIG_SMP
59extern void __inquire_remote_apic(int apicid);
60#else /* CONFIG_SMP */
61static inline void __inquire_remote_apic(int apicid)
62{
63}
64#endif /* CONFIG_SMP */
65
66static inline void default_inquire_remote_apic(int apicid)
67{
68 if (apic_verbosity >= APIC_DEBUG)
69 __inquire_remote_apic(apicid);
70}
71
8312136f
CG
72/*
73 * With 82489DX we can't rely on apic feature bit
74 * retrieved via cpuid but still have to deal with
75 * such an apic chip so we assume that SMP configuration
76 * is found from MP table (64bit case uses ACPI mostly
77 * which set smp presence flag as well so we are safe
78 * to use this helper too).
79 */
80static inline bool apic_from_smp_config(void)
81{
82 return smp_found_config && !disable_apic;
83}
84
67c5fc5c
TG
85/*
86 * Basic functions accessing APICs.
87 */
88#ifdef CONFIG_PARAVIRT
89#include <asm/paravirt.h>
96a388de 90#endif
67c5fc5c 91
2b97df06 92extern int setup_profiling_timer(unsigned int);
aa7d8e25 93
1b374e4d 94static inline void native_apic_mem_write(u32 reg, u32 v)
67c5fc5c 95{
593f4a78 96 volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg);
67c5fc5c 97
a930dc45 98 alternative_io("movl %0, %P1", "xchgl %0, %P1", X86_BUG_11AP,
593f4a78
MR
99 ASM_OUTPUT2("=r" (v), "=m" (*addr)),
100 ASM_OUTPUT2("0" (v), "m" (*addr)));
67c5fc5c
TG
101}
102
1b374e4d 103static inline u32 native_apic_mem_read(u32 reg)
67c5fc5c
TG
104{
105 return *((volatile u32 *)(APIC_BASE + reg));
106}
107
c1eeb2de
YL
108extern void native_apic_wait_icr_idle(void);
109extern u32 native_safe_apic_wait_icr_idle(void);
110extern void native_apic_icr_write(u32 low, u32 id);
111extern u64 native_apic_icr_read(void);
112
8d806960
TG
113static inline bool apic_is_x2apic_enabled(void)
114{
115 u64 msr;
116
117 if (rdmsrl_safe(MSR_IA32_APICBASE, &msr))
118 return false;
119 return msr & X2APIC_ENABLE;
120}
121
e02ae387
PB
122extern void enable_IR_x2apic(void);
123
124extern int get_physical_broadcast(void);
125
126extern int lapic_get_maxlvt(void);
127extern void clear_local_APIC(void);
128extern void disconnect_bsp_APIC(int virt_wire_setup);
129extern void disable_local_APIC(void);
130extern void lapic_shutdown(void);
131extern void sync_Arb_IDs(void);
132extern void init_bsp_APIC(void);
133extern void setup_local_APIC(void);
134extern void init_apic_mappings(void);
135void register_lapic_address(unsigned long address);
136extern void setup_boot_APIC_clock(void);
137extern void setup_secondary_APIC_clock(void);
6731b0d6 138extern void lapic_update_tsc_freq(void);
e02ae387
PB
139extern int APIC_init_uniprocessor(void);
140
141#ifdef CONFIG_X86_64
142static inline int apic_force_enable(unsigned long addr)
143{
144 return -1;
145}
146#else
147extern int apic_force_enable(unsigned long addr);
148#endif
149
150extern int apic_bsp_setup(bool upmode);
151extern void apic_ap_setup(void);
152
153/*
154 * On 32bit this is mach-xxx local
155 */
156#ifdef CONFIG_X86_64
157extern int apic_is_clustered_box(void);
158#else
159static inline int apic_is_clustered_box(void)
160{
161 return 0;
162}
163#endif
164
165extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
166
167#else /* !CONFIG_X86_LOCAL_APIC */
168static inline void lapic_shutdown(void) { }
169#define local_apic_timer_c2_ok 1
170static inline void init_apic_mappings(void) { }
171static inline void disable_local_APIC(void) { }
172# define setup_boot_APIC_clock x86_init_noop
173# define setup_secondary_APIC_clock x86_init_noop
6731b0d6 174static inline void lapic_update_tsc_freq(void) { }
e02ae387
PB
175#endif /* !CONFIG_X86_LOCAL_APIC */
176
d0b03bd1 177#ifdef CONFIG_X86_X2APIC
ce4e240c
SS
178/*
179 * Make previous memory operations globally visible before
180 * sending the IPI through x2apic wrmsr. We need a serializing instruction or
181 * mfence for this.
182 */
183static inline void x2apic_wrmsr_fence(void)
184{
185 asm volatile("mfence" : : : "memory");
186}
187
13c88fb5
SS
188static inline void native_apic_msr_write(u32 reg, u32 v)
189{
190 if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||
191 reg == APIC_LVR)
192 return;
193
194 wrmsr(APIC_BASE_MSR + (reg >> 4), v, 0);
195}
196
0ab711ae
MT
197static inline void native_apic_msr_eoi_write(u32 reg, u32 v)
198{
199 wrmsr(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0);
200}
201
13c88fb5
SS
202static inline u32 native_apic_msr_read(u32 reg)
203{
0059b243 204 u64 msr;
13c88fb5
SS
205
206 if (reg == APIC_DFR)
207 return -1;
208
0059b243
AK
209 rdmsrl(APIC_BASE_MSR + (reg >> 4), msr);
210 return (u32)msr;
13c88fb5
SS
211}
212
c1eeb2de
YL
213static inline void native_x2apic_wait_icr_idle(void)
214{
215 /* no need to wait for icr idle in x2apic */
216 return;
217}
218
219static inline u32 native_safe_x2apic_wait_icr_idle(void)
220{
221 /* no need to wait for icr idle in x2apic */
222 return 0;
223}
224
225static inline void native_x2apic_icr_write(u32 low, u32 id)
226{
227 wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
228}
229
230static inline u64 native_x2apic_icr_read(void)
231{
232 unsigned long val;
233
234 rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
235 return val;
236}
237
81a46dd8 238extern int x2apic_mode;
fc1edaf9 239extern int x2apic_phys;
d524165c 240extern void __init check_x2apic(void);
659006bf 241extern void x2apic_setup(void);
a11b5abe
YL
242static inline int x2apic_enabled(void)
243{
62436a4d 244 return boot_cpu_has(X86_FEATURE_X2APIC) && apic_is_x2apic_enabled();
a11b5abe 245}
fc1edaf9 246
62436a4d 247#define x2apic_supported() (boot_cpu_has(X86_FEATURE_X2APIC))
e02ae387 248#else /* !CONFIG_X86_X2APIC */
55eae7de 249static inline void check_x2apic(void) { }
659006bf 250static inline void x2apic_setup(void) { }
55eae7de 251static inline int x2apic_enabled(void) { return 0; }
cf6567fe 252
81a46dd8 253#define x2apic_mode (0)
81a46dd8 254#define x2apic_supported() (0)
e02ae387 255#endif /* !CONFIG_X86_X2APIC */
67c5fc5c 256
1f75ed0c
IM
257#ifdef CONFIG_X86_64
258#define SET_APIC_ID(x) (apic->set_apic_id(x))
259#else
260
1f75ed0c
IM
261#endif
262
e2780a68
IM
263/*
264 * Copyright 2004 James Cleverdon, IBM.
265 * Subject to the GNU Public License, v.2
266 *
267 * Generic APIC sub-arch data struct.
268 *
269 * Hacked for x86-64 by James Cleverdon from i386 architecture code by
270 * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
271 * James Cleverdon.
272 */
be163a15 273struct apic {
e2780a68
IM
274 char *name;
275
276 int (*probe)(void);
277 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
fa63030e 278 int (*apic_id_valid)(int apicid);
e2780a68
IM
279 int (*apic_id_registered)(void);
280
281 u32 irq_delivery_mode;
282 u32 irq_dest_mode;
283
284 const struct cpumask *(*target_cpus)(void);
285
286 int disable_esr;
287
288 int dest_logical;
7abc0753 289 unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid);
e2780a68 290
1ac322d0
SS
291 void (*vector_allocation_domain)(int cpu, struct cpumask *retmask,
292 const struct cpumask *mask);
e2780a68
IM
293 void (*init_apic_ldr)(void);
294
7abc0753 295 void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap);
e2780a68
IM
296
297 void (*setup_apic_routing)(void);
e2780a68 298 int (*cpu_present_to_apicid)(int mps_cpu);
7abc0753 299 void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap);
e11dadab 300 int (*check_phys_apicid_present)(int phys_apicid);
e2780a68
IM
301 int (*phys_pkg_id)(int cpuid_apic, int index_msb);
302
e2780a68
IM
303 unsigned int (*get_apic_id)(unsigned long x);
304 unsigned long (*set_apic_id)(unsigned int id);
e2780a68 305
ff164324
AG
306 int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
307 const struct cpumask *andmask,
308 unsigned int *apicid);
e2780a68
IM
309
310 /* ipi */
539da787 311 void (*send_IPI)(int cpu, int vector);
e2780a68
IM
312 void (*send_IPI_mask)(const struct cpumask *mask, int vector);
313 void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
314 int vector);
315 void (*send_IPI_allbutself)(int vector);
316 void (*send_IPI_all)(int vector);
317 void (*send_IPI_self)(int vector);
318
319 /* wakeup_secondary_cpu */
1f5bcabf 320 int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip);
e2780a68 321
e2780a68
IM
322 void (*inquire_remote_apic)(int apicid);
323
324 /* apic ops */
325 u32 (*read)(u32 reg);
326 void (*write)(u32 reg, u32 v);
2a43195d
MT
327 /*
328 * ->eoi_write() has the same signature as ->write().
329 *
330 * Drivers can support both ->eoi_write() and ->write() by passing the same
331 * callback value. Kernel can override ->eoi_write() and fall back
332 * on write for EOI.
333 */
334 void (*eoi_write)(u32 reg, u32 v);
e2780a68
IM
335 u64 (*icr_read)(void);
336 void (*icr_write)(u32 low, u32 high);
337 void (*wait_icr_idle)(void);
338 u32 (*safe_wait_icr_idle)(void);
acb8bc09
TH
339
340#ifdef CONFIG_X86_32
341 /*
342 * Called very early during boot from get_smp_config(). It should
343 * return the logical apicid. x86_[bios]_cpu_to_apicid is
344 * initialized before this function is called.
345 *
346 * If logical apicid can't be determined that early, the function
347 * may return BAD_APICID. Logical apicid will be configured after
348 * init_apic_ldr() while bringing up CPUs. Note that NUMA affinity
349 * won't be applied properly during early boot in this case.
350 */
351 int (*x86_32_early_logical_apicid)(int cpu);
352#endif
e2780a68
IM
353};
354
0917c01f
IM
355/*
356 * Pointer to the local APIC driver in use on this system (there's
357 * always just one such driver in use - the kernel decides via an
358 * early probing process which one it picks - and then sticks to it):
359 */
be163a15 360extern struct apic *apic;
0917c01f 361
107e0e0c
SS
362/*
363 * APIC drivers are probed based on how they are listed in the .apicdrivers
364 * section. So the order is important and enforced by the ordering
365 * of different apic driver files in the Makefile.
366 *
367 * For the files having two apic drivers, we use apic_drivers()
368 * to enforce the order with in them.
369 */
370#define apic_driver(sym) \
75fdd155 371 static const struct apic *__apicdrivers_##sym __used \
107e0e0c
SS
372 __aligned(sizeof(struct apic *)) \
373 __section(.apicdrivers) = { &sym }
374
375#define apic_drivers(sym1, sym2) \
376 static struct apic *__apicdrivers_##sym1##sym2[2] __used \
377 __aligned(sizeof(struct apic *)) \
378 __section(.apicdrivers) = { &sym1, &sym2 }
379
380extern struct apic *__apicdrivers[], *__apicdrivers_end[];
381
0917c01f
IM
382/*
383 * APIC functionality to boot other CPUs - only used on SMP:
384 */
385#ifdef CONFIG_SMP
2b6163bf 386extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
0917c01f 387#endif
e2780a68 388
d674cd19 389#ifdef CONFIG_X86_LOCAL_APIC
346b46be 390
e2780a68
IM
391static inline u32 apic_read(u32 reg)
392{
393 return apic->read(reg);
394}
395
396static inline void apic_write(u32 reg, u32 val)
397{
398 apic->write(reg, val);
399}
400
2a43195d
MT
401static inline void apic_eoi(void)
402{
403 apic->eoi_write(APIC_EOI, APIC_EOI_ACK);
404}
405
e2780a68
IM
406static inline u64 apic_icr_read(void)
407{
408 return apic->icr_read();
409}
410
411static inline void apic_icr_write(u32 low, u32 high)
412{
413 apic->icr_write(low, high);
414}
415
416static inline void apic_wait_icr_idle(void)
417{
418 apic->wait_icr_idle();
419}
420
421static inline u32 safe_apic_wait_icr_idle(void)
422{
423 return apic->safe_wait_icr_idle();
424}
425
1551df64
MT
426extern void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v));
427
d674cd19
CG
428#else /* CONFIG_X86_LOCAL_APIC */
429
430static inline u32 apic_read(u32 reg) { return 0; }
431static inline void apic_write(u32 reg, u32 val) { }
2a43195d 432static inline void apic_eoi(void) { }
d674cd19
CG
433static inline u64 apic_icr_read(void) { return 0; }
434static inline void apic_icr_write(u32 low, u32 high) { }
435static inline void apic_wait_icr_idle(void) { }
436static inline u32 safe_apic_wait_icr_idle(void) { return 0; }
1551df64 437static inline void apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v)) {}
d674cd19
CG
438
439#endif /* CONFIG_X86_LOCAL_APIC */
e2780a68
IM
440
441static inline void ack_APIC_irq(void)
442{
443 /*
444 * ack_APIC_irq() actually gets compiled as a single instruction
445 * ... yummie.
446 */
2a43195d 447 apic_eoi();
e2780a68
IM
448}
449
450static inline unsigned default_get_apic_id(unsigned long x)
451{
452 unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
453
42937e81 454 if (APIC_XAPIC(ver) || boot_cpu_has(X86_FEATURE_EXTD_APICID))
e2780a68
IM
455 return (x >> 24) & 0xFF;
456 else
457 return (x >> 24) & 0x0F;
458}
459
460/*
6ab1b27c 461 * Warm reset vector position:
e2780a68 462 */
6ab1b27c
DR
463#define TRAMPOLINE_PHYS_LOW 0x467
464#define TRAMPOLINE_PHYS_HIGH 0x469
e2780a68 465
2b6163bf 466#ifdef CONFIG_X86_64
e2780a68
IM
467extern void apic_send_IPI_self(int vector);
468
e2780a68
IM
469DECLARE_PER_CPU(int, x2apic_extra_bits);
470
471extern int default_cpu_present_to_apicid(int mps_cpu);
e11dadab 472extern int default_check_phys_apicid_present(int phys_apicid);
e2780a68
IM
473#endif
474
838312be 475extern void generic_bigsmp_probe(void);
e2780a68
IM
476
477
478#ifdef CONFIG_X86_LOCAL_APIC
479
480#include <asm/smp.h>
481
482#define APIC_DFR_VALUE (APIC_DFR_FLAT)
483
484static inline const struct cpumask *default_target_cpus(void)
485{
486#ifdef CONFIG_SMP
487 return cpu_online_mask;
488#else
489 return cpumask_of(0);
490#endif
491}
492
bf721d3a
AG
493static inline const struct cpumask *online_target_cpus(void)
494{
495 return cpu_online_mask;
496}
497
0816b0f0 498DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid);
e2780a68
IM
499
500
501static inline unsigned int read_apic_id(void)
502{
503 unsigned int reg;
504
505 reg = apic_read(APIC_ID);
506
507 return apic->get_apic_id(reg);
508}
509
fa63030e
DB
510static inline int default_apic_id_valid(int apicid)
511{
b7157acf 512 return (apicid < 255);
fa63030e
DB
513}
514
a491cc90
JL
515extern int default_acpi_madt_oem_check(char *, char *);
516
e2780a68
IM
517extern void default_setup_apic_routing(void);
518
9844ab11
CG
519extern struct apic apic_noop;
520
e2780a68 521#ifdef CONFIG_X86_32
2c1b284e 522
acb8bc09
TH
523static inline int noop_x86_32_early_logical_apicid(int cpu)
524{
525 return BAD_APICID;
526}
527
e2780a68
IM
528/*
529 * Set up the logical destination ID.
530 *
531 * Intel recommends to set DFR, LDR and TPR before enabling
532 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
533 * document number 292116). So here it goes...
534 */
535extern void default_init_apic_ldr(void);
536
537static inline int default_apic_id_registered(void)
538{
539 return physid_isset(read_apic_id(), phys_cpu_present_map);
540}
541
f56e5034
YL
542static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
543{
544 return cpuid_apic >> index_msb;
545}
546
f56e5034
YL
547#endif
548
ff164324 549static inline int
a5a39156
AG
550flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
551 const struct cpumask *andmask,
552 unsigned int *apicid)
e2780a68 553{
a5a39156
AG
554 unsigned long cpu_mask = cpumask_bits(cpumask)[0] &
555 cpumask_bits(andmask)[0] &
556 cpumask_bits(cpu_online_mask)[0] &
557 APIC_ALL_CPUS;
558
ff164324
AG
559 if (likely(cpu_mask)) {
560 *apicid = (unsigned int)cpu_mask;
561 return 0;
562 } else {
563 return -EINVAL;
564 }
565}
566
ff164324 567extern int
6398268d 568default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
ff164324
AG
569 const struct cpumask *andmask,
570 unsigned int *apicid);
6398268d 571
b39f25a8 572static inline void
1ac322d0
SS
573flat_vector_allocation_domain(int cpu, struct cpumask *retmask,
574 const struct cpumask *mask)
9d8e1066
AG
575{
576 /* Careful. Some cpus do not strictly honor the set of cpus
577 * specified in the interrupt destination when using lowest
578 * priority interrupt delivery mode.
579 *
580 * In particular there was a hyperthreading cpu observed to
581 * deliver interrupts to the wrong hyperthread when only one
582 * hyperthread was specified in the interrupt desitination.
583 */
584 cpumask_clear(retmask);
585 cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
586}
587
b39f25a8 588static inline void
1ac322d0
SS
589default_vector_allocation_domain(int cpu, struct cpumask *retmask,
590 const struct cpumask *mask)
9d8e1066
AG
591{
592 cpumask_copy(retmask, cpumask_of(cpu));
593}
594
7abc0753 595static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid)
e2780a68 596{
7abc0753 597 return physid_isset(apicid, *map);
e2780a68
IM
598}
599
7abc0753 600static inline void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
e2780a68 601{
7abc0753 602 *retmap = *phys_map;
e2780a68
IM
603}
604
e2780a68
IM
605static inline int __default_cpu_present_to_apicid(int mps_cpu)
606{
607 if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
608 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
609 else
610 return BAD_APICID;
611}
612
613static inline int
e11dadab 614__default_check_phys_apicid_present(int phys_apicid)
e2780a68 615{
e11dadab 616 return physid_isset(phys_apicid, phys_cpu_present_map);
e2780a68
IM
617}
618
619#ifdef CONFIG_X86_32
620static inline int default_cpu_present_to_apicid(int mps_cpu)
621{
622 return __default_cpu_present_to_apicid(mps_cpu);
623}
624
625static inline int
e11dadab 626default_check_phys_apicid_present(int phys_apicid)
e2780a68 627{
e11dadab 628 return __default_check_phys_apicid_present(phys_apicid);
e2780a68
IM
629}
630#else
631extern int default_cpu_present_to_apicid(int mps_cpu);
e11dadab 632extern int default_check_phys_apicid_present(int phys_apicid);
e2780a68
IM
633#endif
634
e2780a68 635#endif /* CONFIG_X86_LOCAL_APIC */
eddc0e92
SA
636extern void irq_enter(void);
637extern void irq_exit(void);
638
639static inline void entering_irq(void)
640{
641 irq_enter();
642 exit_idle();
643}
644
645static inline void entering_ack_irq(void)
646{
eddc0e92 647 entering_irq();
7834c103 648 ack_APIC_irq();
eddc0e92
SA
649}
650
6dc17876
TG
651static inline void ipi_entering_ack_irq(void)
652{
653 ack_APIC_irq();
654 irq_enter();
655}
656
eddc0e92
SA
657static inline void exiting_irq(void)
658{
659 irq_exit();
660}
661
662static inline void exiting_ack_irq(void)
663{
664 irq_exit();
665 /* Ack only at the end to avoid potential reentry */
666 ack_APIC_irq();
667}
e2780a68 668
17405453
YY
669extern void ioapic_zap_locks(void);
670
1965aae3 671#endif /* _ASM_X86_APIC_H */
This page took 0.656174 seconds and 5 git commands to generate.