powerpc: Add smp_generic_cpu_bootable
[deliverable/linux.git] / arch / powerpc / include / asm / smp.h
1 /*
2 * smp.h: PowerPC-specific SMP code.
3 *
4 * Original was a copy of sparc smp.h. Now heavily modified
5 * for PPC.
6 *
7 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
8 * Copyright (C) 1996-2001 Cort Dougan <cort@fsmlabs.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16 #ifndef _ASM_POWERPC_SMP_H
17 #define _ASM_POWERPC_SMP_H
18 #ifdef __KERNEL__
19
20 #include <linux/threads.h>
21 #include <linux/cpumask.h>
22 #include <linux/kernel.h>
23 #include <linux/irqreturn.h>
24
25 #ifndef __ASSEMBLY__
26
27 #ifdef CONFIG_PPC64
28 #include <asm/paca.h>
29 #endif
30 #include <asm/percpu.h>
31
32 extern int boot_cpuid;
33 extern int spinning_secondaries;
34
35 extern void cpu_die(void);
36
37 #ifdef CONFIG_SMP
38
39 struct smp_ops_t {
40 void (*message_pass)(int cpu, int msg);
41 #ifdef CONFIG_PPC_SMP_MUXED_IPI
42 void (*cause_ipi)(int cpu, unsigned long data);
43 #endif
44 int (*probe)(void);
45 int (*kick_cpu)(int nr);
46 void (*setup_cpu)(int nr);
47 void (*bringup_done)(void);
48 void (*take_timebase)(void);
49 void (*give_timebase)(void);
50 int (*cpu_disable)(void);
51 void (*cpu_die)(unsigned int nr);
52 int (*cpu_bootable)(unsigned int nr);
53 };
54
55 extern void smp_send_debugger_break(void);
56 extern void start_secondary_resume(void);
57 extern void smp_generic_give_timebase(void);
58 extern void smp_generic_take_timebase(void);
59
60 DECLARE_PER_CPU(unsigned int, cpu_pvr);
61
62 #ifdef CONFIG_HOTPLUG_CPU
63 extern void migrate_irqs(void);
64 int generic_cpu_disable(void);
65 void generic_cpu_die(unsigned int cpu);
66 void generic_mach_cpu_die(void);
67 void generic_set_cpu_dead(unsigned int cpu);
68 void generic_set_cpu_up(unsigned int cpu);
69 int generic_check_cpu_restart(unsigned int cpu);
70
71 extern void inhibit_secondary_onlining(void);
72 extern void uninhibit_secondary_onlining(void);
73
74 #else /* HOTPLUG_CPU */
75 static inline void inhibit_secondary_onlining(void) {}
76 static inline void uninhibit_secondary_onlining(void) {}
77
78 #endif
79
80 #ifdef CONFIG_PPC64
81 #define raw_smp_processor_id() (local_paca->paca_index)
82 #define hard_smp_processor_id() (get_paca()->hw_cpu_id)
83 #else
84 /* 32-bit */
85 extern int smp_hw_index[];
86
87 #define raw_smp_processor_id() (current_thread_info()->cpu)
88 #define hard_smp_processor_id() (smp_hw_index[smp_processor_id()])
89
90 static inline int get_hard_smp_processor_id(int cpu)
91 {
92 return smp_hw_index[cpu];
93 }
94
95 static inline void set_hard_smp_processor_id(int cpu, int phys)
96 {
97 smp_hw_index[cpu] = phys;
98 }
99 #endif
100
101 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
102 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
103
104 static inline struct cpumask *cpu_sibling_mask(int cpu)
105 {
106 return per_cpu(cpu_sibling_map, cpu);
107 }
108
109 static inline struct cpumask *cpu_core_mask(int cpu)
110 {
111 return per_cpu(cpu_core_map, cpu);
112 }
113
114 extern int cpu_to_core_id(int cpu);
115
116 /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
117 *
118 * Make sure this matches openpic_request_IPIs in open_pic.c, or what shows up
119 * in /proc/interrupts will be wrong!!! --Troy */
120 #define PPC_MSG_CALL_FUNCTION 0
121 #define PPC_MSG_RESCHEDULE 1
122 #define PPC_MSG_CALL_FUNC_SINGLE 2
123 #define PPC_MSG_DEBUGGER_BREAK 3
124
125 /* for irq controllers that have dedicated ipis per message (4) */
126 extern int smp_request_message_ipi(int virq, int message);
127 extern const char *smp_ipi_name[];
128
129 /* for irq controllers with only a single ipi */
130 extern void smp_muxed_ipi_set_data(int cpu, unsigned long data);
131 extern void smp_muxed_ipi_message_pass(int cpu, int msg);
132 extern irqreturn_t smp_ipi_demux(void);
133
134 void smp_init_pSeries(void);
135 void smp_init_cell(void);
136 void smp_init_celleb(void);
137 void smp_setup_cpu_maps(void);
138
139 extern int __cpu_disable(void);
140 extern void __cpu_die(unsigned int cpu);
141
142 #else
143 /* for UP */
144 #define hard_smp_processor_id() get_hard_smp_processor_id(0)
145 #define smp_setup_cpu_maps()
146 static inline void inhibit_secondary_onlining(void) {}
147 static inline void uninhibit_secondary_onlining(void) {}
148 static inline const struct cpumask *cpu_sibling_mask(int cpu)
149 {
150 return cpumask_of(cpu);
151 }
152
153 #endif /* CONFIG_SMP */
154
155 #ifdef CONFIG_PPC64
156 static inline int get_hard_smp_processor_id(int cpu)
157 {
158 return paca[cpu].hw_cpu_id;
159 }
160
161 static inline void set_hard_smp_processor_id(int cpu, int phys)
162 {
163 paca[cpu].hw_cpu_id = phys;
164 }
165
166 extern void smp_release_cpus(void);
167
168 #else
169 /* 32-bit */
170 #ifndef CONFIG_SMP
171 extern int boot_cpuid_phys;
172 static inline int get_hard_smp_processor_id(int cpu)
173 {
174 return boot_cpuid_phys;
175 }
176
177 static inline void set_hard_smp_processor_id(int cpu, int phys)
178 {
179 boot_cpuid_phys = phys;
180 }
181 #endif /* !CONFIG_SMP */
182 #endif /* !CONFIG_PPC64 */
183
184 extern int smt_enabled_at_boot;
185
186 extern int smp_mpic_probe(void);
187 extern void smp_mpic_setup_cpu(int cpu);
188 extern int smp_generic_kick_cpu(int nr);
189 extern int smp_generic_cpu_bootable(unsigned int nr);
190
191
192 extern void smp_generic_give_timebase(void);
193 extern void smp_generic_take_timebase(void);
194
195 extern struct smp_ops_t *smp_ops;
196
197 extern void arch_send_call_function_single_ipi(int cpu);
198 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
199
200 /* Definitions relative to the secondary CPU spin loop
201 * and entry point. Not all of them exist on both 32 and
202 * 64-bit but defining them all here doesn't harm
203 */
204 extern void generic_secondary_smp_init(void);
205 extern void generic_secondary_thread_init(void);
206 extern unsigned long __secondary_hold_spinloop;
207 extern unsigned long __secondary_hold_acknowledge;
208 extern char __secondary_hold;
209
210 extern void __early_start(void);
211 #endif /* __ASSEMBLY__ */
212
213 #endif /* __KERNEL__ */
214 #endif /* _ASM_POWERPC_SMP_H) */
This page took 0.033915 seconds and 5 git commands to generate.