Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/s390/kernel/smp.c | |
3 | * | |
39ce010d | 4 | * Copyright IBM Corp. 1999,2007 |
1da177e4 | 5 | * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), |
39ce010d HC |
6 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
7 | * Heiko Carstens (heiko.carstens@de.ibm.com) | |
1da177e4 | 8 | * |
39ce010d | 9 | * based on other smp stuff by |
1da177e4 LT |
10 | * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> |
11 | * (c) 1998 Ingo Molnar | |
12 | * | |
13 | * We work with logical cpu numbering everywhere we can. The only | |
14 | * functions using the real cpu address (got from STAP) are the sigp | |
15 | * functions. For all other functions we use the identity mapping. | |
16 | * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is | |
17 | * used e.g. to find the idle task belonging to a logical cpu. Every array | |
18 | * in the kernel is sorted by the logical cpu number and not by the physical | |
19 | * one which is causing all the confusion with __cpu_logical_map and | |
20 | * cpu_number_map in other architectures. | |
21 | */ | |
22 | ||
23 | #include <linux/module.h> | |
24 | #include <linux/init.h> | |
1da177e4 LT |
25 | #include <linux/mm.h> |
26 | #include <linux/spinlock.h> | |
27 | #include <linux/kernel_stat.h> | |
1da177e4 LT |
28 | #include <linux/delay.h> |
29 | #include <linux/cache.h> | |
30 | #include <linux/interrupt.h> | |
31 | #include <linux/cpu.h> | |
2b67fc46 | 32 | #include <linux/timex.h> |
411ed322 | 33 | #include <linux/bootmem.h> |
46b05d26 | 34 | #include <asm/ipl.h> |
2b67fc46 | 35 | #include <asm/setup.h> |
1da177e4 LT |
36 | #include <asm/sigp.h> |
37 | #include <asm/pgalloc.h> | |
38 | #include <asm/irq.h> | |
39 | #include <asm/s390_ext.h> | |
40 | #include <asm/cpcmd.h> | |
41 | #include <asm/tlbflush.h> | |
2b67fc46 | 42 | #include <asm/timer.h> |
411ed322 | 43 | #include <asm/lowcore.h> |
1da177e4 | 44 | |
1da177e4 LT |
45 | /* |
46 | * An array with a pointer the lowcore of every CPU. | |
47 | */ | |
1da177e4 | 48 | struct _lowcore *lowcore_ptr[NR_CPUS]; |
39ce010d | 49 | EXPORT_SYMBOL(lowcore_ptr); |
1da177e4 | 50 | |
255acee7 | 51 | cpumask_t cpu_online_map = CPU_MASK_NONE; |
39ce010d HC |
52 | EXPORT_SYMBOL(cpu_online_map); |
53 | ||
255acee7 | 54 | cpumask_t cpu_possible_map = CPU_MASK_NONE; |
39ce010d | 55 | EXPORT_SYMBOL(cpu_possible_map); |
1da177e4 LT |
56 | |
57 | static struct task_struct *current_set[NR_CPUS]; | |
58 | ||
1da177e4 | 59 | static void smp_ext_bitcall(int, ec_bit_sig); |
1da177e4 LT |
60 | |
61 | /* | |
63db6e8d JG |
62 | * Structure and data for __smp_call_function_map(). This is designed to |
63 | * minimise static memory requirements. It also looks cleaner. | |
1da177e4 LT |
64 | */ |
65 | static DEFINE_SPINLOCK(call_lock); | |
66 | ||
67 | struct call_data_struct { | |
68 | void (*func) (void *info); | |
69 | void *info; | |
63db6e8d JG |
70 | cpumask_t started; |
71 | cpumask_t finished; | |
1da177e4 LT |
72 | int wait; |
73 | }; | |
74 | ||
39ce010d | 75 | static struct call_data_struct *call_data; |
1da177e4 LT |
76 | |
77 | /* | |
78 | * 'Call function' interrupt callback | |
79 | */ | |
80 | static void do_call_function(void) | |
81 | { | |
82 | void (*func) (void *info) = call_data->func; | |
83 | void *info = call_data->info; | |
84 | int wait = call_data->wait; | |
85 | ||
63db6e8d | 86 | cpu_set(smp_processor_id(), call_data->started); |
1da177e4 LT |
87 | (*func)(info); |
88 | if (wait) | |
63db6e8d | 89 | cpu_set(smp_processor_id(), call_data->finished);; |
1da177e4 LT |
90 | } |
91 | ||
63db6e8d JG |
92 | static void __smp_call_function_map(void (*func) (void *info), void *info, |
93 | int nonatomic, int wait, cpumask_t map) | |
1da177e4 LT |
94 | { |
95 | struct call_data_struct data; | |
63db6e8d | 96 | int cpu, local = 0; |
1da177e4 | 97 | |
63db6e8d | 98 | /* |
25864162 | 99 | * Can deadlock when interrupts are disabled or if in wrong context. |
63db6e8d | 100 | */ |
25864162 | 101 | WARN_ON(irqs_disabled() || in_irq()); |
1da177e4 | 102 | |
63db6e8d JG |
103 | /* |
104 | * Check for local function call. We have to have the same call order | |
105 | * as in on_each_cpu() because of machine_restart_smp(). | |
106 | */ | |
107 | if (cpu_isset(smp_processor_id(), map)) { | |
108 | local = 1; | |
109 | cpu_clear(smp_processor_id(), map); | |
110 | } | |
111 | ||
112 | cpus_and(map, map, cpu_online_map); | |
113 | if (cpus_empty(map)) | |
114 | goto out; | |
1da177e4 LT |
115 | |
116 | data.func = func; | |
117 | data.info = info; | |
63db6e8d | 118 | data.started = CPU_MASK_NONE; |
1da177e4 LT |
119 | data.wait = wait; |
120 | if (wait) | |
63db6e8d | 121 | data.finished = CPU_MASK_NONE; |
1da177e4 | 122 | |
0ec67667 | 123 | spin_lock_bh(&call_lock); |
1da177e4 | 124 | call_data = &data; |
63db6e8d JG |
125 | |
126 | for_each_cpu_mask(cpu, map) | |
127 | smp_ext_bitcall(cpu, ec_call_function); | |
1da177e4 LT |
128 | |
129 | /* Wait for response */ | |
63db6e8d | 130 | while (!cpus_equal(map, data.started)) |
1da177e4 LT |
131 | cpu_relax(); |
132 | ||
133 | if (wait) | |
63db6e8d | 134 | while (!cpus_equal(map, data.finished)) |
1da177e4 | 135 | cpu_relax(); |
63db6e8d | 136 | |
0ec67667 | 137 | spin_unlock_bh(&call_lock); |
1da177e4 | 138 | |
63db6e8d JG |
139 | out: |
140 | local_irq_disable(); | |
141 | if (local) | |
142 | func(info); | |
143 | local_irq_enable(); | |
1da177e4 LT |
144 | } |
145 | ||
146 | /* | |
63db6e8d JG |
147 | * smp_call_function: |
148 | * @func: the function to run; this must be fast and non-blocking | |
149 | * @info: an arbitrary pointer to pass to the function | |
150 | * @nonatomic: unused | |
151 | * @wait: if true, wait (atomically) until function has completed on other CPUs | |
1da177e4 | 152 | * |
63db6e8d | 153 | * Run a function on all other CPUs. |
1da177e4 | 154 | * |
39ce010d HC |
155 | * You must not call this function with disabled interrupts, from a |
156 | * hardware interrupt handler or from a bottom half. | |
1da177e4 | 157 | */ |
63db6e8d JG |
158 | int smp_call_function(void (*func) (void *info), void *info, int nonatomic, |
159 | int wait) | |
1da177e4 | 160 | { |
63db6e8d | 161 | cpumask_t map; |
1da177e4 | 162 | |
25864162 | 163 | preempt_disable(); |
63db6e8d JG |
164 | map = cpu_online_map; |
165 | cpu_clear(smp_processor_id(), map); | |
166 | __smp_call_function_map(func, info, nonatomic, wait, map); | |
25864162 | 167 | preempt_enable(); |
63db6e8d JG |
168 | return 0; |
169 | } | |
170 | EXPORT_SYMBOL(smp_call_function); | |
1da177e4 | 171 | |
63db6e8d JG |
172 | /* |
173 | * smp_call_function_on: | |
174 | * @func: the function to run; this must be fast and non-blocking | |
175 | * @info: an arbitrary pointer to pass to the function | |
176 | * @nonatomic: unused | |
177 | * @wait: if true, wait (atomically) until function has completed on other CPUs | |
178 | * @cpu: the CPU where func should run | |
179 | * | |
180 | * Run a function on one processor. | |
181 | * | |
39ce010d HC |
182 | * You must not call this function with disabled interrupts, from a |
183 | * hardware interrupt handler or from a bottom half. | |
63db6e8d JG |
184 | */ |
185 | int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic, | |
39ce010d | 186 | int wait, int cpu) |
63db6e8d JG |
187 | { |
188 | cpumask_t map = CPU_MASK_NONE; | |
1da177e4 | 189 | |
25864162 | 190 | preempt_disable(); |
63db6e8d JG |
191 | cpu_set(cpu, map); |
192 | __smp_call_function_map(func, info, nonatomic, wait, map); | |
25864162 | 193 | preempt_enable(); |
1da177e4 LT |
194 | return 0; |
195 | } | |
196 | EXPORT_SYMBOL(smp_call_function_on); | |
197 | ||
4d284cac | 198 | static void do_send_stop(void) |
1da177e4 | 199 | { |
39ce010d | 200 | int cpu, rc; |
1da177e4 | 201 | |
39ce010d | 202 | /* stop all processors */ |
1da177e4 LT |
203 | for_each_online_cpu(cpu) { |
204 | if (cpu == smp_processor_id()) | |
205 | continue; | |
206 | do { | |
207 | rc = signal_processor(cpu, sigp_stop); | |
208 | } while (rc == sigp_busy); | |
209 | } | |
210 | } | |
211 | ||
4d284cac | 212 | static void do_store_status(void) |
1da177e4 | 213 | { |
39ce010d | 214 | int cpu, rc; |
1da177e4 | 215 | |
39ce010d | 216 | /* store status of all processors in their lowcores (real 0) */ |
1da177e4 LT |
217 | for_each_online_cpu(cpu) { |
218 | if (cpu == smp_processor_id()) | |
219 | continue; | |
220 | do { | |
221 | rc = signal_processor_p( | |
222 | (__u32)(unsigned long) lowcore_ptr[cpu], cpu, | |
223 | sigp_store_status_at_address); | |
39ce010d HC |
224 | } while (rc == sigp_busy); |
225 | } | |
1da177e4 LT |
226 | } |
227 | ||
4d284cac | 228 | static void do_wait_for_stop(void) |
c6b5b847 HC |
229 | { |
230 | int cpu; | |
231 | ||
232 | /* Wait for all other cpus to enter stopped state */ | |
233 | for_each_online_cpu(cpu) { | |
234 | if (cpu == smp_processor_id()) | |
235 | continue; | |
39ce010d | 236 | while (!smp_cpu_not_running(cpu)) |
c6b5b847 HC |
237 | cpu_relax(); |
238 | } | |
239 | } | |
240 | ||
1da177e4 LT |
241 | /* |
242 | * this function sends a 'stop' sigp to all other CPUs in the system. | |
243 | * it goes straight through. | |
244 | */ | |
245 | void smp_send_stop(void) | |
246 | { | |
c6b5b847 | 247 | /* Disable all interrupts/machine checks */ |
c1821c2e | 248 | __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK); |
c6b5b847 | 249 | |
39ce010d | 250 | /* write magic number to zero page (absolute 0) */ |
1da177e4 LT |
251 | lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; |
252 | ||
253 | /* stop other processors. */ | |
254 | do_send_stop(); | |
255 | ||
c6b5b847 HC |
256 | /* wait until other processors are stopped */ |
257 | do_wait_for_stop(); | |
258 | ||
1da177e4 LT |
259 | /* store status of other processors. */ |
260 | do_store_status(); | |
261 | } | |
262 | ||
263 | /* | |
264 | * Reboot, halt and power_off routines for SMP. | |
265 | */ | |
39ce010d | 266 | void machine_restart_smp(char *__unused) |
1da177e4 | 267 | { |
c6b5b847 HC |
268 | smp_send_stop(); |
269 | do_reipl(); | |
1da177e4 LT |
270 | } |
271 | ||
272 | void machine_halt_smp(void) | |
273 | { | |
c6b5b847 HC |
274 | smp_send_stop(); |
275 | if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) | |
276 | __cpcmd(vmhalt_cmd, NULL, 0, NULL); | |
277 | signal_processor(smp_processor_id(), sigp_stop_and_store_status); | |
278 | for (;;); | |
1da177e4 LT |
279 | } |
280 | ||
281 | void machine_power_off_smp(void) | |
282 | { | |
c6b5b847 HC |
283 | smp_send_stop(); |
284 | if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) | |
285 | __cpcmd(vmpoff_cmd, NULL, 0, NULL); | |
286 | signal_processor(smp_processor_id(), sigp_stop_and_store_status); | |
287 | for (;;); | |
1da177e4 LT |
288 | } |
289 | ||
290 | /* | |
291 | * This is the main routine where commands issued by other | |
292 | * cpus are handled. | |
293 | */ | |
294 | ||
2b67fc46 | 295 | static void do_ext_call_interrupt(__u16 code) |
1da177e4 | 296 | { |
39ce010d | 297 | unsigned long bits; |
1da177e4 | 298 | |
39ce010d HC |
299 | /* |
300 | * handle bit signal external calls | |
301 | * | |
302 | * For the ec_schedule signal we have to do nothing. All the work | |
303 | * is done automatically when we return from the interrupt. | |
304 | */ | |
1da177e4 LT |
305 | bits = xchg(&S390_lowcore.ext_call_fast, 0); |
306 | ||
39ce010d | 307 | if (test_bit(ec_call_function, &bits)) |
1da177e4 LT |
308 | do_call_function(); |
309 | } | |
310 | ||
311 | /* | |
312 | * Send an external call sigp to another cpu and return without waiting | |
313 | * for its completion. | |
314 | */ | |
315 | static void smp_ext_bitcall(int cpu, ec_bit_sig sig) | |
316 | { | |
39ce010d HC |
317 | /* |
318 | * Set signaling bit in lowcore of target cpu and kick it | |
319 | */ | |
1da177e4 | 320 | set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); |
39ce010d | 321 | while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) |
1da177e4 LT |
322 | udelay(10); |
323 | } | |
324 | ||
347a8dc3 | 325 | #ifndef CONFIG_64BIT |
1da177e4 LT |
326 | /* |
327 | * this function sends a 'purge tlb' signal to another CPU. | |
328 | */ | |
329 | void smp_ptlb_callback(void *info) | |
330 | { | |
331 | local_flush_tlb(); | |
332 | } | |
333 | ||
334 | void smp_ptlb_all(void) | |
335 | { | |
39ce010d | 336 | on_each_cpu(smp_ptlb_callback, NULL, 0, 1); |
1da177e4 LT |
337 | } |
338 | EXPORT_SYMBOL(smp_ptlb_all); | |
347a8dc3 | 339 | #endif /* ! CONFIG_64BIT */ |
1da177e4 LT |
340 | |
341 | /* | |
342 | * this function sends a 'reschedule' IPI to another CPU. | |
343 | * it goes straight through and wastes no time serializing | |
344 | * anything. Worst case is that we lose a reschedule ... | |
345 | */ | |
346 | void smp_send_reschedule(int cpu) | |
347 | { | |
39ce010d | 348 | smp_ext_bitcall(cpu, ec_schedule); |
1da177e4 LT |
349 | } |
350 | ||
351 | /* | |
352 | * parameter area for the set/clear control bit callbacks | |
353 | */ | |
94c12cc7 | 354 | struct ec_creg_mask_parms { |
1da177e4 LT |
355 | unsigned long orvals[16]; |
356 | unsigned long andvals[16]; | |
94c12cc7 | 357 | }; |
1da177e4 LT |
358 | |
359 | /* | |
360 | * callback for setting/clearing control bits | |
361 | */ | |
39ce010d HC |
362 | static void smp_ctl_bit_callback(void *info) |
363 | { | |
94c12cc7 | 364 | struct ec_creg_mask_parms *pp = info; |
1da177e4 LT |
365 | unsigned long cregs[16]; |
366 | int i; | |
39ce010d | 367 | |
94c12cc7 MS |
368 | __ctl_store(cregs, 0, 15); |
369 | for (i = 0; i <= 15; i++) | |
1da177e4 | 370 | cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; |
94c12cc7 | 371 | __ctl_load(cregs, 0, 15); |
1da177e4 LT |
372 | } |
373 | ||
374 | /* | |
375 | * Set a bit in a control register of all cpus | |
376 | */ | |
94c12cc7 MS |
377 | void smp_ctl_set_bit(int cr, int bit) |
378 | { | |
379 | struct ec_creg_mask_parms parms; | |
1da177e4 | 380 | |
94c12cc7 MS |
381 | memset(&parms.orvals, 0, sizeof(parms.orvals)); |
382 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); | |
1da177e4 | 383 | parms.orvals[cr] = 1 << bit; |
94c12cc7 | 384 | on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); |
1da177e4 | 385 | } |
39ce010d | 386 | EXPORT_SYMBOL(smp_ctl_set_bit); |
1da177e4 LT |
387 | |
388 | /* | |
389 | * Clear a bit in a control register of all cpus | |
390 | */ | |
94c12cc7 MS |
391 | void smp_ctl_clear_bit(int cr, int bit) |
392 | { | |
393 | struct ec_creg_mask_parms parms; | |
1da177e4 | 394 | |
94c12cc7 MS |
395 | memset(&parms.orvals, 0, sizeof(parms.orvals)); |
396 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); | |
1da177e4 | 397 | parms.andvals[cr] = ~(1L << bit); |
94c12cc7 | 398 | on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); |
1da177e4 | 399 | } |
39ce010d | 400 | EXPORT_SYMBOL(smp_ctl_clear_bit); |
1da177e4 | 401 | |
411ed322 MH |
402 | #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE) |
403 | ||
404 | /* | |
405 | * zfcpdump_prefix_array holds prefix registers for the following scenario: | |
406 | * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to | |
407 | * save its prefix registers, since they get lost, when switching from 31 bit | |
408 | * to 64 bit. | |
409 | */ | |
410 | unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \ | |
411 | __attribute__((__section__(".data"))); | |
412 | ||
285f6722 | 413 | static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) |
411ed322 | 414 | { |
411ed322 MH |
415 | if (ipl_info.type != IPL_TYPE_FCP_DUMP) |
416 | return; | |
285f6722 HC |
417 | if (cpu >= NR_CPUS) { |
418 | printk(KERN_WARNING "Registers for cpu %i not saved since dump " | |
419 | "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS); | |
420 | return; | |
411ed322 | 421 | } |
285f6722 HC |
422 | zfcpdump_save_areas[cpu] = alloc_bootmem(sizeof(union save_area)); |
423 | __cpu_logical_map[1] = (__u16) phy_cpu; | |
424 | while (signal_processor(1, sigp_stop_and_store_status) == sigp_busy) | |
425 | cpu_relax(); | |
426 | memcpy(zfcpdump_save_areas[cpu], | |
427 | (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE, | |
428 | SAVE_AREA_SIZE); | |
429 | #ifdef CONFIG_64BIT | |
430 | /* copy original prefix register */ | |
431 | zfcpdump_save_areas[cpu]->s390x.pref_reg = zfcpdump_prefix_array[cpu]; | |
432 | #endif | |
411ed322 MH |
433 | } |
434 | ||
435 | union save_area *zfcpdump_save_areas[NR_CPUS + 1]; | |
436 | EXPORT_SYMBOL_GPL(zfcpdump_save_areas); | |
437 | ||
438 | #else | |
285f6722 HC |
439 | |
440 | static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { } | |
441 | ||
442 | #endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */ | |
411ed322 | 443 | |
1da177e4 LT |
444 | /* |
445 | * Lets check how many CPUs we have. | |
446 | */ | |
39ce010d | 447 | static unsigned int __init smp_count_cpus(void) |
1da177e4 | 448 | { |
255acee7 | 449 | unsigned int cpu, num_cpus; |
1da177e4 LT |
450 | __u16 boot_cpu_addr; |
451 | ||
452 | /* | |
453 | * cpu 0 is the boot cpu. See smp_prepare_boot_cpu. | |
454 | */ | |
1da177e4 LT |
455 | boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; |
456 | current_thread_info()->cpu = 0; | |
457 | num_cpus = 1; | |
255acee7 | 458 | for (cpu = 0; cpu <= 65535; cpu++) { |
1da177e4 LT |
459 | if ((__u16) cpu == boot_cpu_addr) |
460 | continue; | |
255acee7 | 461 | __cpu_logical_map[1] = (__u16) cpu; |
39ce010d | 462 | if (signal_processor(1, sigp_sense) == sigp_not_operational) |
1da177e4 | 463 | continue; |
285f6722 | 464 | smp_get_save_area(num_cpus, cpu); |
1da177e4 LT |
465 | num_cpus++; |
466 | } | |
39ce010d | 467 | printk("Detected %d CPU's\n", (int) num_cpus); |
1da177e4 | 468 | printk("Boot cpu address %2X\n", boot_cpu_addr); |
255acee7 | 469 | return num_cpus; |
1da177e4 LT |
470 | } |
471 | ||
472 | /* | |
39ce010d | 473 | * Activate a secondary processor. |
1da177e4 | 474 | */ |
ea1f4eec | 475 | int __cpuinit start_secondary(void *cpuvoid) |
1da177e4 | 476 | { |
39ce010d HC |
477 | /* Setup the cpu */ |
478 | cpu_init(); | |
5bfb5d69 | 479 | preempt_disable(); |
d54853ef | 480 | /* Enable TOD clock interrupts on the secondary cpu. */ |
39ce010d | 481 | init_cpu_timer(); |
1da177e4 | 482 | #ifdef CONFIG_VIRT_TIMER |
d54853ef | 483 | /* Enable cpu timer interrupts on the secondary cpu. */ |
39ce010d | 484 | init_cpu_vtimer(); |
1da177e4 | 485 | #endif |
1da177e4 | 486 | /* Enable pfault pseudo page faults on this cpu. */ |
29b08d2b HC |
487 | pfault_init(); |
488 | ||
1da177e4 LT |
489 | /* Mark this cpu as online */ |
490 | cpu_set(smp_processor_id(), cpu_online_map); | |
491 | /* Switch on interrupts */ | |
492 | local_irq_enable(); | |
39ce010d HC |
493 | /* Print info about this processor */ |
494 | print_cpu_info(&S390_lowcore.cpu_data); | |
495 | /* cpu_idle will call schedule for us */ | |
496 | cpu_idle(); | |
497 | return 0; | |
1da177e4 LT |
498 | } |
499 | ||
500 | static void __init smp_create_idle(unsigned int cpu) | |
501 | { | |
502 | struct task_struct *p; | |
503 | ||
504 | /* | |
505 | * don't care about the psw and regs settings since we'll never | |
506 | * reschedule the forked task. | |
507 | */ | |
508 | p = fork_idle(cpu); | |
509 | if (IS_ERR(p)) | |
510 | panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); | |
511 | current_set[cpu] = p; | |
512 | } | |
513 | ||
39ce010d | 514 | static int cpu_stopped(int cpu) |
1da177e4 LT |
515 | { |
516 | __u32 status; | |
517 | ||
518 | /* Check for stopped state */ | |
39ce010d HC |
519 | if (signal_processor_ps(&status, 0, cpu, sigp_sense) == |
520 | sigp_status_stored) { | |
1da177e4 LT |
521 | if (status & 0x40) |
522 | return 1; | |
523 | } | |
524 | return 0; | |
525 | } | |
526 | ||
527 | /* Upping and downing of CPUs */ | |
528 | ||
39ce010d | 529 | int __cpu_up(unsigned int cpu) |
1da177e4 LT |
530 | { |
531 | struct task_struct *idle; | |
39ce010d | 532 | struct _lowcore *cpu_lowcore; |
1da177e4 | 533 | struct stack_frame *sf; |
39ce010d HC |
534 | sigp_ccode ccode; |
535 | int curr_cpu; | |
1da177e4 LT |
536 | |
537 | for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { | |
538 | __cpu_logical_map[cpu] = (__u16) curr_cpu; | |
539 | if (cpu_stopped(cpu)) | |
540 | break; | |
541 | } | |
542 | ||
543 | if (!cpu_stopped(cpu)) | |
544 | return -ENODEV; | |
545 | ||
546 | ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), | |
547 | cpu, sigp_set_prefix); | |
39ce010d | 548 | if (ccode) { |
1da177e4 LT |
549 | printk("sigp_set_prefix failed for cpu %d " |
550 | "with condition code %d\n", | |
551 | (int) cpu, (int) ccode); | |
552 | return -EIO; | |
553 | } | |
554 | ||
555 | idle = current_set[cpu]; | |
39ce010d | 556 | cpu_lowcore = lowcore_ptr[cpu]; |
1da177e4 | 557 | cpu_lowcore->kernel_stack = (unsigned long) |
39ce010d | 558 | task_stack_page(idle) + THREAD_SIZE; |
1da177e4 LT |
559 | sf = (struct stack_frame *) (cpu_lowcore->kernel_stack |
560 | - sizeof(struct pt_regs) | |
561 | - sizeof(struct stack_frame)); | |
562 | memset(sf, 0, sizeof(struct stack_frame)); | |
563 | sf->gprs[9] = (unsigned long) sf; | |
564 | cpu_lowcore->save_area[15] = (unsigned long) sf; | |
565 | __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15); | |
94c12cc7 MS |
566 | asm volatile( |
567 | " stam 0,15,0(%0)" | |
568 | : : "a" (&cpu_lowcore->access_regs_save_area) : "memory"); | |
1da177e4 | 569 | cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; |
39ce010d HC |
570 | cpu_lowcore->current_task = (unsigned long) idle; |
571 | cpu_lowcore->cpu_data.cpu_nr = cpu; | |
1da177e4 | 572 | eieio(); |
699ff13f | 573 | |
39ce010d | 574 | while (signal_processor(cpu, sigp_restart) == sigp_busy) |
699ff13f | 575 | udelay(10); |
1da177e4 LT |
576 | |
577 | while (!cpu_online(cpu)) | |
578 | cpu_relax(); | |
579 | return 0; | |
580 | } | |
581 | ||
255acee7 | 582 | static unsigned int __initdata additional_cpus; |
37a33026 | 583 | static unsigned int __initdata possible_cpus; |
255acee7 HC |
584 | |
585 | void __init smp_setup_cpu_possible_map(void) | |
586 | { | |
54330456 | 587 | unsigned int phy_cpus, pos_cpus, cpu; |
255acee7 | 588 | |
54330456 HC |
589 | phy_cpus = smp_count_cpus(); |
590 | pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS); | |
255acee7 | 591 | |
37a33026 | 592 | if (possible_cpus) |
54330456 | 593 | pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS); |
255acee7 | 594 | |
54330456 | 595 | for (cpu = 0; cpu < pos_cpus; cpu++) |
255acee7 HC |
596 | cpu_set(cpu, cpu_possible_map); |
597 | ||
54330456 HC |
598 | phy_cpus = min(phy_cpus, pos_cpus); |
599 | ||
600 | for (cpu = 0; cpu < phy_cpus; cpu++) | |
601 | cpu_set(cpu, cpu_present_map); | |
255acee7 HC |
602 | } |
603 | ||
604 | #ifdef CONFIG_HOTPLUG_CPU | |
605 | ||
606 | static int __init setup_additional_cpus(char *s) | |
607 | { | |
608 | additional_cpus = simple_strtoul(s, NULL, 0); | |
609 | return 0; | |
610 | } | |
611 | early_param("additional_cpus", setup_additional_cpus); | |
612 | ||
37a33026 HC |
613 | static int __init setup_possible_cpus(char *s) |
614 | { | |
615 | possible_cpus = simple_strtoul(s, NULL, 0); | |
616 | return 0; | |
617 | } | |
618 | early_param("possible_cpus", setup_possible_cpus); | |
619 | ||
39ce010d | 620 | int __cpu_disable(void) |
1da177e4 | 621 | { |
94c12cc7 | 622 | struct ec_creg_mask_parms cr_parms; |
f3705136 | 623 | int cpu = smp_processor_id(); |
1da177e4 | 624 | |
f3705136 | 625 | cpu_clear(cpu, cpu_online_map); |
1da177e4 | 626 | |
1da177e4 | 627 | /* Disable pfault pseudo page faults on this cpu. */ |
29b08d2b | 628 | pfault_fini(); |
1da177e4 | 629 | |
94c12cc7 MS |
630 | memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals)); |
631 | memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals)); | |
1da177e4 | 632 | |
94c12cc7 | 633 | /* disable all external interrupts */ |
1da177e4 | 634 | cr_parms.orvals[0] = 0; |
39ce010d HC |
635 | cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 | |
636 | 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4); | |
1da177e4 | 637 | /* disable all I/O interrupts */ |
1da177e4 | 638 | cr_parms.orvals[6] = 0; |
39ce010d HC |
639 | cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 | |
640 | 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24); | |
1da177e4 | 641 | /* disable most machine checks */ |
1da177e4 | 642 | cr_parms.orvals[14] = 0; |
39ce010d HC |
643 | cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 | |
644 | 1 << 25 | 1 << 24); | |
94c12cc7 | 645 | |
1da177e4 LT |
646 | smp_ctl_bit_callback(&cr_parms); |
647 | ||
1da177e4 LT |
648 | return 0; |
649 | } | |
650 | ||
39ce010d | 651 | void __cpu_die(unsigned int cpu) |
1da177e4 LT |
652 | { |
653 | /* Wait until target cpu is down */ | |
654 | while (!smp_cpu_not_running(cpu)) | |
655 | cpu_relax(); | |
656 | printk("Processor %d spun down\n", cpu); | |
657 | } | |
658 | ||
39ce010d | 659 | void cpu_die(void) |
1da177e4 LT |
660 | { |
661 | idle_task_exit(); | |
662 | signal_processor(smp_processor_id(), sigp_stop); | |
663 | BUG(); | |
39ce010d | 664 | for (;;); |
1da177e4 LT |
665 | } |
666 | ||
255acee7 HC |
667 | #endif /* CONFIG_HOTPLUG_CPU */ |
668 | ||
1da177e4 LT |
669 | /* |
670 | * Cycle through the processors and setup structures. | |
671 | */ | |
672 | ||
673 | void __init smp_prepare_cpus(unsigned int max_cpus) | |
674 | { | |
675 | unsigned long stack; | |
676 | unsigned int cpu; | |
39ce010d HC |
677 | int i; |
678 | ||
679 | /* request the 0x1201 emergency signal external interrupt */ | |
680 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) | |
681 | panic("Couldn't request external interrupt 0x1201"); | |
682 | memset(lowcore_ptr, 0, sizeof(lowcore_ptr)); | |
683 | /* | |
684 | * Initialize prefix pages and stacks for all possible cpus | |
685 | */ | |
1da177e4 LT |
686 | print_cpu_info(&S390_lowcore.cpu_data); |
687 | ||
39ce010d | 688 | for_each_possible_cpu(i) { |
1da177e4 | 689 | lowcore_ptr[i] = (struct _lowcore *) |
39ce010d HC |
690 | __get_free_pages(GFP_KERNEL | GFP_DMA, |
691 | sizeof(void*) == 8 ? 1 : 0); | |
692 | stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); | |
693 | if (!lowcore_ptr[i] || !stack) | |
1da177e4 LT |
694 | panic("smp_boot_cpus failed to allocate memory\n"); |
695 | ||
696 | *(lowcore_ptr[i]) = S390_lowcore; | |
39ce010d HC |
697 | lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE; |
698 | stack = __get_free_pages(GFP_KERNEL, 0); | |
699 | if (!stack) | |
1da177e4 | 700 | panic("smp_boot_cpus failed to allocate memory\n"); |
39ce010d | 701 | lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE; |
347a8dc3 | 702 | #ifndef CONFIG_64BIT |
77fa2245 HC |
703 | if (MACHINE_HAS_IEEE) { |
704 | lowcore_ptr[i]->extended_save_area_addr = | |
39ce010d HC |
705 | (__u32) __get_free_pages(GFP_KERNEL, 0); |
706 | if (!lowcore_ptr[i]->extended_save_area_addr) | |
77fa2245 HC |
707 | panic("smp_boot_cpus failed to " |
708 | "allocate memory\n"); | |
709 | } | |
1da177e4 LT |
710 | #endif |
711 | } | |
347a8dc3 | 712 | #ifndef CONFIG_64BIT |
77fa2245 HC |
713 | if (MACHINE_HAS_IEEE) |
714 | ctl_set_bit(14, 29); /* enable extended save area */ | |
715 | #endif | |
1da177e4 LT |
716 | set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]); |
717 | ||
97db7fbf | 718 | for_each_possible_cpu(cpu) |
1da177e4 LT |
719 | if (cpu != smp_processor_id()) |
720 | smp_create_idle(cpu); | |
721 | } | |
722 | ||
ea1f4eec | 723 | void __init smp_prepare_boot_cpu(void) |
1da177e4 LT |
724 | { |
725 | BUG_ON(smp_processor_id() != 0); | |
726 | ||
727 | cpu_set(0, cpu_online_map); | |
1da177e4 LT |
728 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; |
729 | current_set[0] = current; | |
730 | } | |
731 | ||
ea1f4eec | 732 | void __init smp_cpus_done(unsigned int max_cpus) |
1da177e4 | 733 | { |
54330456 | 734 | cpu_present_map = cpu_possible_map; |
1da177e4 LT |
735 | } |
736 | ||
737 | /* | |
738 | * the frequency of the profiling timer can be changed | |
739 | * by writing a multiplier value into /proc/profile. | |
740 | * | |
741 | * usually you want to run this on all CPUs ;) | |
742 | */ | |
743 | int setup_profiling_timer(unsigned int multiplier) | |
744 | { | |
39ce010d | 745 | return 0; |
1da177e4 LT |
746 | } |
747 | ||
748 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | |
749 | ||
2fc2d1e9 HC |
750 | static ssize_t show_capability(struct sys_device *dev, char *buf) |
751 | { | |
752 | unsigned int capability; | |
753 | int rc; | |
754 | ||
755 | rc = get_cpu_capability(&capability); | |
756 | if (rc) | |
757 | return rc; | |
758 | return sprintf(buf, "%u\n", capability); | |
759 | } | |
760 | static SYSDEV_ATTR(capability, 0444, show_capability, NULL); | |
761 | ||
762 | static int __cpuinit smp_cpu_notify(struct notifier_block *self, | |
763 | unsigned long action, void *hcpu) | |
764 | { | |
765 | unsigned int cpu = (unsigned int)(long)hcpu; | |
766 | struct cpu *c = &per_cpu(cpu_devices, cpu); | |
767 | struct sys_device *s = &c->sysdev; | |
768 | ||
769 | switch (action) { | |
770 | case CPU_ONLINE: | |
8bb78442 | 771 | case CPU_ONLINE_FROZEN: |
2fc2d1e9 HC |
772 | if (sysdev_create_file(s, &attr_capability)) |
773 | return NOTIFY_BAD; | |
774 | break; | |
775 | case CPU_DEAD: | |
8bb78442 | 776 | case CPU_DEAD_FROZEN: |
2fc2d1e9 HC |
777 | sysdev_remove_file(s, &attr_capability); |
778 | break; | |
779 | } | |
780 | return NOTIFY_OK; | |
781 | } | |
782 | ||
783 | static struct notifier_block __cpuinitdata smp_cpu_nb = { | |
39ce010d | 784 | .notifier_call = smp_cpu_notify, |
2fc2d1e9 HC |
785 | }; |
786 | ||
1da177e4 LT |
787 | static int __init topology_init(void) |
788 | { | |
789 | int cpu; | |
2fc2d1e9 HC |
790 | |
791 | register_cpu_notifier(&smp_cpu_nb); | |
1da177e4 | 792 | |
97db7fbf | 793 | for_each_possible_cpu(cpu) { |
6721f778 | 794 | struct cpu *c = &per_cpu(cpu_devices, cpu); |
2fc2d1e9 | 795 | struct sys_device *s = &c->sysdev; |
6721f778 HC |
796 | |
797 | c->hotpluggable = 1; | |
2fc2d1e9 HC |
798 | register_cpu(c, cpu); |
799 | if (!cpu_online(cpu)) | |
800 | continue; | |
801 | s = &c->sysdev; | |
802 | sysdev_create_file(s, &attr_capability); | |
1da177e4 LT |
803 | } |
804 | return 0; | |
805 | } | |
1da177e4 | 806 | subsys_initcall(topology_init); |