Pull bugfix into test branch
[deliverable/linux.git] / include / asm-x86_64 / smp.h
1 #ifndef __ASM_SMP_H
2 #define __ASM_SMP_H
3
4 /*
5 * We need the APIC definitions automatically as part of 'smp.h'
6 */
7 #include <linux/threads.h>
8 #include <linux/cpumask.h>
9 #include <linux/bitops.h>
10 extern int disable_apic;
11
12 #include <asm/fixmap.h>
13 #include <asm/mpspec.h>
14 #include <asm/io_apic.h>
15 #include <asm/apic.h>
16 #include <asm/thread_info.h>
17
18 #ifdef CONFIG_SMP
19
20 #include <asm/pda.h>
21
22 struct pt_regs;
23
24 extern cpumask_t cpu_present_mask;
25 extern cpumask_t cpu_possible_map;
26 extern cpumask_t cpu_online_map;
27 extern cpumask_t cpu_callout_map;
28 extern cpumask_t cpu_initialized;
29
30 /*
31 * Private routines/data
32 */
33
34 extern void smp_alloc_memory(void);
35 extern volatile unsigned long smp_invalidate_needed;
36 extern void lock_ipi_call_lock(void);
37 extern void unlock_ipi_call_lock(void);
38 extern int smp_num_siblings;
39 extern void smp_send_reschedule(int cpu);
40 void smp_stop_cpu(void);
41
42 extern cpumask_t cpu_sibling_map[NR_CPUS];
43 extern cpumask_t cpu_core_map[NR_CPUS];
44 extern u8 cpu_llc_id[NR_CPUS];
45
46 #define SMP_TRAMPOLINE_BASE 0x6000
47
48 /*
49 * On x86 all CPUs are mapped 1:1 to the APIC space.
50 * This simplifies scheduling and IPI sending and
51 * compresses data structures.
52 */
53
54 static inline int num_booting_cpus(void)
55 {
56 return cpus_weight(cpu_callout_map);
57 }
58
59 #define raw_smp_processor_id() read_pda(cpunumber)
60
61 static inline int hard_smp_processor_id(void)
62 {
63 /* we don't want to mark this access volatile - bad code generation */
64 return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
65 }
66
67 extern int __cpu_disable(void);
68 extern void __cpu_die(unsigned int cpu);
69 extern void prefill_possible_map(void);
70 extern unsigned num_processors;
71 extern unsigned disabled_cpus;
72
73 #define NO_PROC_ID 0xFF /* No processor magic marker */
74
75 #endif
76
77 /*
78 * Some lowlevel functions might want to know about
79 * the real APIC ID <-> CPU # mapping.
80 */
81 extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */
82 extern u8 x86_cpu_to_log_apicid[NR_CPUS];
83 extern u8 bios_cpu_apicid[];
84
85 static inline int cpu_present_to_apicid(int mps_cpu)
86 {
87 if (mps_cpu < NR_CPUS)
88 return (int)bios_cpu_apicid[mps_cpu];
89 else
90 return BAD_APICID;
91 }
92
93 #ifndef CONFIG_SMP
94 #define stack_smp_processor_id() 0
95 #define cpu_logical_map(x) (x)
96 #else
97 #include <asm/thread_info.h>
98 #define stack_smp_processor_id() \
99 ({ \
100 struct thread_info *ti; \
101 __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
102 ti->cpu; \
103 })
104 #endif
105
106 static __inline int logical_smp_processor_id(void)
107 {
108 /* we don't want to mark this access volatile - bad code generation */
109 return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
110 }
111
112 #ifdef CONFIG_SMP
113 #define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
114 #else
115 #define cpu_physical_id(cpu) boot_cpu_id
116 #endif /* !CONFIG_SMP */
117 #endif
118
This page took 0.034896 seconds and 6 git commands to generate.