Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/s390/kernel/smp.c | |
3 | * | |
255acee7 | 4 | * Copyright (C) IBM Corp. 1999,2006 |
1da177e4 LT |
5 | * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), |
6 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | |
7 | * Heiko Carstens (heiko.carstens@de.ibm.com) | |
8 | * | |
9 | * based on other smp stuff by | |
10 | * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> | |
11 | * (c) 1998 Ingo Molnar | |
12 | * | |
13 | * We work with logical cpu numbering everywhere we can. The only | |
14 | * functions using the real cpu address (got from STAP) are the sigp | |
15 | * functions. For all other functions we use the identity mapping. | |
16 | * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is | |
17 | * used e.g. to find the idle task belonging to a logical cpu. Every array | |
18 | * in the kernel is sorted by the logical cpu number and not by the physical | |
19 | * one which is causing all the confusion with __cpu_logical_map and | |
20 | * cpu_number_map in other architectures. | |
21 | */ | |
22 | ||
23 | #include <linux/module.h> | |
24 | #include <linux/init.h> | |
25 | ||
26 | #include <linux/mm.h> | |
27 | #include <linux/spinlock.h> | |
28 | #include <linux/kernel_stat.h> | |
29 | #include <linux/smp_lock.h> | |
30 | ||
31 | #include <linux/delay.h> | |
32 | #include <linux/cache.h> | |
33 | #include <linux/interrupt.h> | |
34 | #include <linux/cpu.h> | |
35 | ||
36 | #include <asm/sigp.h> | |
37 | #include <asm/pgalloc.h> | |
38 | #include <asm/irq.h> | |
39 | #include <asm/s390_ext.h> | |
40 | #include <asm/cpcmd.h> | |
41 | #include <asm/tlbflush.h> | |
42 | ||
1da177e4 LT |
43 | extern volatile int __cpu_logical_map[]; |
44 | ||
45 | /* | |
46 | * An array with a pointer the lowcore of every CPU. | |
47 | */ | |
48 | ||
49 | struct _lowcore *lowcore_ptr[NR_CPUS]; | |
50 | ||
255acee7 HC |
51 | cpumask_t cpu_online_map = CPU_MASK_NONE; |
52 | cpumask_t cpu_possible_map = CPU_MASK_NONE; | |
1da177e4 LT |
53 | |
54 | static struct task_struct *current_set[NR_CPUS]; | |
55 | ||
1da177e4 LT |
56 | /* |
57 | * Reboot, halt and power_off routines for SMP. | |
58 | */ | |
59 | extern char vmhalt_cmd[]; | |
60 | extern char vmpoff_cmd[]; | |
61 | ||
62 | extern void reipl(unsigned long devno); | |
c782268b | 63 | extern void reipl_diag(void); |
1da177e4 LT |
64 | |
65 | static void smp_ext_bitcall(int, ec_bit_sig); | |
66 | static void smp_ext_bitcall_others(ec_bit_sig); | |
67 | ||
68 | /* | |
69 | * Structure and data for smp_call_function(). This is designed to minimise | |
70 | * static memory requirements. It also looks cleaner. | |
71 | */ | |
72 | static DEFINE_SPINLOCK(call_lock); | |
73 | ||
74 | struct call_data_struct { | |
75 | void (*func) (void *info); | |
76 | void *info; | |
77 | atomic_t started; | |
78 | atomic_t finished; | |
79 | int wait; | |
80 | }; | |
81 | ||
82 | static struct call_data_struct * call_data; | |
83 | ||
84 | /* | |
85 | * 'Call function' interrupt callback | |
86 | */ | |
87 | static void do_call_function(void) | |
88 | { | |
89 | void (*func) (void *info) = call_data->func; | |
90 | void *info = call_data->info; | |
91 | int wait = call_data->wait; | |
92 | ||
93 | atomic_inc(&call_data->started); | |
94 | (*func)(info); | |
95 | if (wait) | |
96 | atomic_inc(&call_data->finished); | |
97 | } | |
98 | ||
99 | /* | |
100 | * this function sends a 'generic call function' IPI to all other CPUs | |
101 | * in the system. | |
102 | */ | |
103 | ||
104 | int smp_call_function (void (*func) (void *info), void *info, int nonatomic, | |
105 | int wait) | |
106 | /* | |
107 | * [SUMMARY] Run a function on all other CPUs. | |
108 | * <func> The function to run. This must be fast and non-blocking. | |
109 | * <info> An arbitrary pointer to pass to the function. | |
110 | * <nonatomic> currently unused. | |
111 | * <wait> If true, wait (atomically) until function has completed on other CPUs. | |
112 | * [RETURNS] 0 on success, else a negative status code. Does not return until | |
113 | * remote CPUs are nearly ready to execute <<func>> or are or have executed. | |
114 | * | |
115 | * You must not call this function with disabled interrupts or from a | |
116 | * hardware interrupt handler or from a bottom half handler. | |
117 | */ | |
118 | { | |
119 | struct call_data_struct data; | |
120 | int cpus = num_online_cpus()-1; | |
121 | ||
122 | if (cpus <= 0) | |
123 | return 0; | |
124 | ||
125 | /* Can deadlock when called with interrupts disabled */ | |
126 | WARN_ON(irqs_disabled()); | |
127 | ||
128 | data.func = func; | |
129 | data.info = info; | |
130 | atomic_set(&data.started, 0); | |
131 | data.wait = wait; | |
132 | if (wait) | |
133 | atomic_set(&data.finished, 0); | |
134 | ||
135 | spin_lock(&call_lock); | |
136 | call_data = &data; | |
137 | /* Send a message to all other CPUs and wait for them to respond */ | |
138 | smp_ext_bitcall_others(ec_call_function); | |
139 | ||
140 | /* Wait for response */ | |
141 | while (atomic_read(&data.started) != cpus) | |
142 | cpu_relax(); | |
143 | ||
144 | if (wait) | |
145 | while (atomic_read(&data.finished) != cpus) | |
146 | cpu_relax(); | |
147 | spin_unlock(&call_lock); | |
148 | ||
149 | return 0; | |
150 | } | |
151 | ||
152 | /* | |
153 | * Call a function on one CPU | |
154 | * cpu : the CPU the function should be executed on | |
155 | * | |
156 | * You must not call this function with disabled interrupts or from a | |
157 | * hardware interrupt handler. You may call it from a bottom half. | |
158 | * | |
159 | * It is guaranteed that the called function runs on the specified CPU, | |
160 | * preemption is disabled. | |
161 | */ | |
162 | int smp_call_function_on(void (*func) (void *info), void *info, | |
163 | int nonatomic, int wait, int cpu) | |
164 | { | |
165 | struct call_data_struct data; | |
166 | int curr_cpu; | |
167 | ||
168 | if (!cpu_online(cpu)) | |
169 | return -EINVAL; | |
170 | ||
171 | /* disable preemption for local function call */ | |
172 | curr_cpu = get_cpu(); | |
173 | ||
174 | if (curr_cpu == cpu) { | |
175 | /* direct call to function */ | |
176 | func(info); | |
177 | put_cpu(); | |
178 | return 0; | |
179 | } | |
180 | ||
181 | data.func = func; | |
182 | data.info = info; | |
183 | atomic_set(&data.started, 0); | |
184 | data.wait = wait; | |
185 | if (wait) | |
186 | atomic_set(&data.finished, 0); | |
187 | ||
188 | spin_lock_bh(&call_lock); | |
189 | call_data = &data; | |
190 | smp_ext_bitcall(cpu, ec_call_function); | |
191 | ||
192 | /* Wait for response */ | |
193 | while (atomic_read(&data.started) != 1) | |
194 | cpu_relax(); | |
195 | ||
196 | if (wait) | |
197 | while (atomic_read(&data.finished) != 1) | |
198 | cpu_relax(); | |
199 | ||
200 | spin_unlock_bh(&call_lock); | |
201 | put_cpu(); | |
202 | return 0; | |
203 | } | |
204 | EXPORT_SYMBOL(smp_call_function_on); | |
205 | ||
206 | static inline void do_send_stop(void) | |
207 | { | |
208 | int cpu, rc; | |
209 | ||
210 | /* stop all processors */ | |
211 | for_each_online_cpu(cpu) { | |
212 | if (cpu == smp_processor_id()) | |
213 | continue; | |
214 | do { | |
215 | rc = signal_processor(cpu, sigp_stop); | |
216 | } while (rc == sigp_busy); | |
217 | } | |
218 | } | |
219 | ||
220 | static inline void do_store_status(void) | |
221 | { | |
222 | int cpu, rc; | |
223 | ||
224 | /* store status of all processors in their lowcores (real 0) */ | |
225 | for_each_online_cpu(cpu) { | |
226 | if (cpu == smp_processor_id()) | |
227 | continue; | |
228 | do { | |
229 | rc = signal_processor_p( | |
230 | (__u32)(unsigned long) lowcore_ptr[cpu], cpu, | |
231 | sigp_store_status_at_address); | |
232 | } while(rc == sigp_busy); | |
233 | } | |
234 | } | |
235 | ||
236 | /* | |
237 | * this function sends a 'stop' sigp to all other CPUs in the system. | |
238 | * it goes straight through. | |
239 | */ | |
240 | void smp_send_stop(void) | |
241 | { | |
242 | /* write magic number to zero page (absolute 0) */ | |
243 | lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; | |
244 | ||
245 | /* stop other processors. */ | |
246 | do_send_stop(); | |
247 | ||
248 | /* store status of other processors. */ | |
249 | do_store_status(); | |
250 | } | |
251 | ||
252 | /* | |
253 | * Reboot, halt and power_off routines for SMP. | |
254 | */ | |
255 | ||
256 | static void do_machine_restart(void * __unused) | |
257 | { | |
258 | int cpu; | |
259 | static atomic_t cpuid = ATOMIC_INIT(-1); | |
260 | ||
973bd993 | 261 | if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1) |
1da177e4 LT |
262 | signal_processor(smp_processor_id(), sigp_stop); |
263 | ||
264 | /* Wait for all other cpus to enter stopped state */ | |
265 | for_each_online_cpu(cpu) { | |
266 | if (cpu == smp_processor_id()) | |
267 | continue; | |
268 | while(!smp_cpu_not_running(cpu)) | |
269 | cpu_relax(); | |
270 | } | |
271 | ||
272 | /* Store status of other cpus. */ | |
273 | do_store_status(); | |
274 | ||
275 | /* | |
276 | * Finally call reipl. Because we waited for all other | |
277 | * cpus to enter this function we know that they do | |
278 | * not hold any s390irq-locks (the cpus have been | |
279 | * interrupted by an external interrupt and s390irq | |
280 | * locks are always held disabled). | |
281 | */ | |
c782268b VS |
282 | reipl_diag(); |
283 | ||
1da177e4 | 284 | if (MACHINE_IS_VM) |
6b979de3 | 285 | cpcmd ("IPL", NULL, 0, NULL); |
1da177e4 LT |
286 | else |
287 | reipl (0x10000 | S390_lowcore.ipl_device); | |
288 | } | |
289 | ||
290 | void machine_restart_smp(char * __unused) | |
291 | { | |
292 | on_each_cpu(do_machine_restart, NULL, 0, 0); | |
293 | } | |
294 | ||
295 | static void do_wait_for_stop(void) | |
296 | { | |
297 | unsigned long cr[16]; | |
298 | ||
299 | __ctl_store(cr, 0, 15); | |
300 | cr[0] &= ~0xffff; | |
301 | cr[6] = 0; | |
302 | __ctl_load(cr, 0, 15); | |
303 | for (;;) | |
304 | enabled_wait(); | |
305 | } | |
306 | ||
307 | static void do_machine_halt(void * __unused) | |
308 | { | |
309 | static atomic_t cpuid = ATOMIC_INIT(-1); | |
310 | ||
973bd993 | 311 | if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) { |
1da177e4 LT |
312 | smp_send_stop(); |
313 | if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) | |
6b979de3 | 314 | cpcmd(vmhalt_cmd, NULL, 0, NULL); |
1da177e4 LT |
315 | signal_processor(smp_processor_id(), |
316 | sigp_stop_and_store_status); | |
317 | } | |
318 | do_wait_for_stop(); | |
319 | } | |
320 | ||
321 | void machine_halt_smp(void) | |
322 | { | |
323 | on_each_cpu(do_machine_halt, NULL, 0, 0); | |
324 | } | |
325 | ||
326 | static void do_machine_power_off(void * __unused) | |
327 | { | |
328 | static atomic_t cpuid = ATOMIC_INIT(-1); | |
329 | ||
973bd993 | 330 | if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) { |
1da177e4 LT |
331 | smp_send_stop(); |
332 | if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) | |
6b979de3 | 333 | cpcmd(vmpoff_cmd, NULL, 0, NULL); |
1da177e4 LT |
334 | signal_processor(smp_processor_id(), |
335 | sigp_stop_and_store_status); | |
336 | } | |
337 | do_wait_for_stop(); | |
338 | } | |
339 | ||
340 | void machine_power_off_smp(void) | |
341 | { | |
342 | on_each_cpu(do_machine_power_off, NULL, 0, 0); | |
343 | } | |
344 | ||
345 | /* | |
346 | * This is the main routine where commands issued by other | |
347 | * cpus are handled. | |
348 | */ | |
349 | ||
350 | void do_ext_call_interrupt(struct pt_regs *regs, __u16 code) | |
351 | { | |
352 | unsigned long bits; | |
353 | ||
354 | /* | |
355 | * handle bit signal external calls | |
356 | * | |
357 | * For the ec_schedule signal we have to do nothing. All the work | |
358 | * is done automatically when we return from the interrupt. | |
359 | */ | |
360 | bits = xchg(&S390_lowcore.ext_call_fast, 0); | |
361 | ||
362 | if (test_bit(ec_call_function, &bits)) | |
363 | do_call_function(); | |
364 | } | |
365 | ||
366 | /* | |
367 | * Send an external call sigp to another cpu and return without waiting | |
368 | * for its completion. | |
369 | */ | |
370 | static void smp_ext_bitcall(int cpu, ec_bit_sig sig) | |
371 | { | |
372 | /* | |
373 | * Set signaling bit in lowcore of target cpu and kick it | |
374 | */ | |
375 | set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); | |
99b2d8df | 376 | while(signal_processor(cpu, sigp_emergency_signal) == sigp_busy) |
1da177e4 LT |
377 | udelay(10); |
378 | } | |
379 | ||
380 | /* | |
381 | * Send an external call sigp to every other cpu in the system and | |
382 | * return without waiting for its completion. | |
383 | */ | |
384 | static void smp_ext_bitcall_others(ec_bit_sig sig) | |
385 | { | |
386 | int cpu; | |
387 | ||
388 | for_each_online_cpu(cpu) { | |
389 | if (cpu == smp_processor_id()) | |
390 | continue; | |
391 | /* | |
392 | * Set signaling bit in lowcore of target cpu and kick it | |
393 | */ | |
394 | set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); | |
99b2d8df | 395 | while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy) |
1da177e4 LT |
396 | udelay(10); |
397 | } | |
398 | } | |
399 | ||
347a8dc3 | 400 | #ifndef CONFIG_64BIT |
1da177e4 LT |
401 | /* |
402 | * this function sends a 'purge tlb' signal to another CPU. | |
403 | */ | |
404 | void smp_ptlb_callback(void *info) | |
405 | { | |
406 | local_flush_tlb(); | |
407 | } | |
408 | ||
409 | void smp_ptlb_all(void) | |
410 | { | |
411 | on_each_cpu(smp_ptlb_callback, NULL, 0, 1); | |
412 | } | |
413 | EXPORT_SYMBOL(smp_ptlb_all); | |
347a8dc3 | 414 | #endif /* ! CONFIG_64BIT */ |
1da177e4 LT |
415 | |
416 | /* | |
417 | * this function sends a 'reschedule' IPI to another CPU. | |
418 | * it goes straight through and wastes no time serializing | |
419 | * anything. Worst case is that we lose a reschedule ... | |
420 | */ | |
421 | void smp_send_reschedule(int cpu) | |
422 | { | |
423 | smp_ext_bitcall(cpu, ec_schedule); | |
424 | } | |
425 | ||
426 | /* | |
427 | * parameter area for the set/clear control bit callbacks | |
428 | */ | |
429 | typedef struct | |
430 | { | |
431 | __u16 start_ctl; | |
432 | __u16 end_ctl; | |
433 | unsigned long orvals[16]; | |
434 | unsigned long andvals[16]; | |
435 | } ec_creg_mask_parms; | |
436 | ||
437 | /* | |
438 | * callback for setting/clearing control bits | |
439 | */ | |
440 | void smp_ctl_bit_callback(void *info) { | |
441 | ec_creg_mask_parms *pp; | |
442 | unsigned long cregs[16]; | |
443 | int i; | |
444 | ||
445 | pp = (ec_creg_mask_parms *) info; | |
446 | __ctl_store(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl); | |
447 | for (i = pp->start_ctl; i <= pp->end_ctl; i++) | |
448 | cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i]; | |
449 | __ctl_load(cregs[pp->start_ctl], pp->start_ctl, pp->end_ctl); | |
450 | } | |
451 | ||
452 | /* | |
453 | * Set a bit in a control register of all cpus | |
454 | */ | |
455 | void smp_ctl_set_bit(int cr, int bit) { | |
456 | ec_creg_mask_parms parms; | |
457 | ||
458 | parms.start_ctl = cr; | |
459 | parms.end_ctl = cr; | |
460 | parms.orvals[cr] = 1 << bit; | |
461 | parms.andvals[cr] = -1L; | |
462 | preempt_disable(); | |
463 | smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); | |
464 | __ctl_set_bit(cr, bit); | |
465 | preempt_enable(); | |
466 | } | |
467 | ||
468 | /* | |
469 | * Clear a bit in a control register of all cpus | |
470 | */ | |
471 | void smp_ctl_clear_bit(int cr, int bit) { | |
472 | ec_creg_mask_parms parms; | |
473 | ||
474 | parms.start_ctl = cr; | |
475 | parms.end_ctl = cr; | |
476 | parms.orvals[cr] = 0; | |
477 | parms.andvals[cr] = ~(1L << bit); | |
478 | preempt_disable(); | |
479 | smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); | |
480 | __ctl_clear_bit(cr, bit); | |
481 | preempt_enable(); | |
482 | } | |
483 | ||
484 | /* | |
485 | * Lets check how many CPUs we have. | |
486 | */ | |
487 | ||
255acee7 HC |
488 | static unsigned int |
489 | __init smp_count_cpus(void) | |
1da177e4 | 490 | { |
255acee7 | 491 | unsigned int cpu, num_cpus; |
1da177e4 LT |
492 | __u16 boot_cpu_addr; |
493 | ||
494 | /* | |
495 | * cpu 0 is the boot cpu. See smp_prepare_boot_cpu. | |
496 | */ | |
497 | ||
498 | boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr; | |
499 | current_thread_info()->cpu = 0; | |
500 | num_cpus = 1; | |
255acee7 | 501 | for (cpu = 0; cpu <= 65535; cpu++) { |
1da177e4 LT |
502 | if ((__u16) cpu == boot_cpu_addr) |
503 | continue; | |
255acee7 HC |
504 | __cpu_logical_map[1] = (__u16) cpu; |
505 | if (signal_processor(1, sigp_sense) == | |
1da177e4 LT |
506 | sigp_not_operational) |
507 | continue; | |
1da177e4 LT |
508 | num_cpus++; |
509 | } | |
510 | ||
1da177e4 LT |
511 | printk("Detected %d CPU's\n",(int) num_cpus); |
512 | printk("Boot cpu address %2X\n", boot_cpu_addr); | |
255acee7 HC |
513 | |
514 | return num_cpus; | |
1da177e4 LT |
515 | } |
516 | ||
517 | /* | |
518 | * Activate a secondary processor. | |
519 | */ | |
520 | extern void init_cpu_timer(void); | |
521 | extern void init_cpu_vtimer(void); | |
522 | extern int pfault_init(void); | |
523 | extern void pfault_fini(void); | |
524 | ||
525 | int __devinit start_secondary(void *cpuvoid) | |
526 | { | |
527 | /* Setup the cpu */ | |
528 | cpu_init(); | |
5bfb5d69 | 529 | preempt_disable(); |
1da177e4 LT |
530 | /* init per CPU timer */ |
531 | init_cpu_timer(); | |
532 | #ifdef CONFIG_VIRT_TIMER | |
533 | init_cpu_vtimer(); | |
534 | #endif | |
535 | #ifdef CONFIG_PFAULT | |
536 | /* Enable pfault pseudo page faults on this cpu. */ | |
5d3f229f HC |
537 | if (MACHINE_IS_VM) |
538 | pfault_init(); | |
1da177e4 LT |
539 | #endif |
540 | /* Mark this cpu as online */ | |
541 | cpu_set(smp_processor_id(), cpu_online_map); | |
542 | /* Switch on interrupts */ | |
543 | local_irq_enable(); | |
544 | /* Print info about this processor */ | |
545 | print_cpu_info(&S390_lowcore.cpu_data); | |
546 | /* cpu_idle will call schedule for us */ | |
547 | cpu_idle(); | |
548 | return 0; | |
549 | } | |
550 | ||
551 | static void __init smp_create_idle(unsigned int cpu) | |
552 | { | |
553 | struct task_struct *p; | |
554 | ||
555 | /* | |
556 | * don't care about the psw and regs settings since we'll never | |
557 | * reschedule the forked task. | |
558 | */ | |
559 | p = fork_idle(cpu); | |
560 | if (IS_ERR(p)) | |
561 | panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); | |
562 | current_set[cpu] = p; | |
563 | } | |
564 | ||
565 | /* Reserving and releasing of CPUs */ | |
566 | ||
567 | static DEFINE_SPINLOCK(smp_reserve_lock); | |
568 | static int smp_cpu_reserved[NR_CPUS]; | |
569 | ||
570 | int | |
571 | smp_get_cpu(cpumask_t cpu_mask) | |
572 | { | |
573 | unsigned long flags; | |
574 | int cpu; | |
575 | ||
576 | spin_lock_irqsave(&smp_reserve_lock, flags); | |
577 | /* Try to find an already reserved cpu. */ | |
578 | for_each_cpu_mask(cpu, cpu_mask) { | |
579 | if (smp_cpu_reserved[cpu] != 0) { | |
580 | smp_cpu_reserved[cpu]++; | |
581 | /* Found one. */ | |
582 | goto out; | |
583 | } | |
584 | } | |
585 | /* Reserve a new cpu from cpu_mask. */ | |
586 | for_each_cpu_mask(cpu, cpu_mask) { | |
587 | if (cpu_online(cpu)) { | |
588 | smp_cpu_reserved[cpu]++; | |
589 | goto out; | |
590 | } | |
591 | } | |
592 | cpu = -ENODEV; | |
593 | out: | |
594 | spin_unlock_irqrestore(&smp_reserve_lock, flags); | |
595 | return cpu; | |
596 | } | |
597 | ||
598 | void | |
599 | smp_put_cpu(int cpu) | |
600 | { | |
601 | unsigned long flags; | |
602 | ||
603 | spin_lock_irqsave(&smp_reserve_lock, flags); | |
604 | smp_cpu_reserved[cpu]--; | |
605 | spin_unlock_irqrestore(&smp_reserve_lock, flags); | |
606 | } | |
607 | ||
608 | static inline int | |
609 | cpu_stopped(int cpu) | |
610 | { | |
611 | __u32 status; | |
612 | ||
613 | /* Check for stopped state */ | |
614 | if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) { | |
615 | if (status & 0x40) | |
616 | return 1; | |
617 | } | |
618 | return 0; | |
619 | } | |
620 | ||
621 | /* Upping and downing of CPUs */ | |
622 | ||
623 | int | |
624 | __cpu_up(unsigned int cpu) | |
625 | { | |
626 | struct task_struct *idle; | |
627 | struct _lowcore *cpu_lowcore; | |
628 | struct stack_frame *sf; | |
629 | sigp_ccode ccode; | |
630 | int curr_cpu; | |
631 | ||
632 | for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) { | |
633 | __cpu_logical_map[cpu] = (__u16) curr_cpu; | |
634 | if (cpu_stopped(cpu)) | |
635 | break; | |
636 | } | |
637 | ||
638 | if (!cpu_stopped(cpu)) | |
639 | return -ENODEV; | |
640 | ||
641 | ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]), | |
642 | cpu, sigp_set_prefix); | |
643 | if (ccode){ | |
644 | printk("sigp_set_prefix failed for cpu %d " | |
645 | "with condition code %d\n", | |
646 | (int) cpu, (int) ccode); | |
647 | return -EIO; | |
648 | } | |
649 | ||
650 | idle = current_set[cpu]; | |
651 | cpu_lowcore = lowcore_ptr[cpu]; | |
652 | cpu_lowcore->kernel_stack = (unsigned long) | |
30af7120 | 653 | task_stack_page(idle) + (THREAD_SIZE); |
1da177e4 LT |
654 | sf = (struct stack_frame *) (cpu_lowcore->kernel_stack |
655 | - sizeof(struct pt_regs) | |
656 | - sizeof(struct stack_frame)); | |
657 | memset(sf, 0, sizeof(struct stack_frame)); | |
658 | sf->gprs[9] = (unsigned long) sf; | |
659 | cpu_lowcore->save_area[15] = (unsigned long) sf; | |
660 | __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15); | |
661 | __asm__ __volatile__("stam 0,15,0(%0)" | |
662 | : : "a" (&cpu_lowcore->access_regs_save_area) | |
663 | : "memory"); | |
664 | cpu_lowcore->percpu_offset = __per_cpu_offset[cpu]; | |
665 | cpu_lowcore->current_task = (unsigned long) idle; | |
666 | cpu_lowcore->cpu_data.cpu_nr = cpu; | |
667 | eieio(); | |
668 | signal_processor(cpu,sigp_restart); | |
669 | ||
670 | while (!cpu_online(cpu)) | |
671 | cpu_relax(); | |
672 | return 0; | |
673 | } | |
674 | ||
255acee7 | 675 | static unsigned int __initdata additional_cpus; |
37a33026 | 676 | static unsigned int __initdata possible_cpus; |
255acee7 HC |
677 | |
678 | void __init smp_setup_cpu_possible_map(void) | |
679 | { | |
680 | unsigned int pcpus, cpu; | |
681 | ||
37a33026 | 682 | pcpus = min(smp_count_cpus() + additional_cpus, (unsigned int) NR_CPUS); |
255acee7 | 683 | |
37a33026 HC |
684 | if (possible_cpus) |
685 | pcpus = min(possible_cpus, (unsigned int) NR_CPUS); | |
255acee7 HC |
686 | |
687 | for (cpu = 0; cpu < pcpus; cpu++) | |
688 | cpu_set(cpu, cpu_possible_map); | |
689 | ||
690 | cpu_present_map = cpu_possible_map; | |
691 | } | |
692 | ||
693 | #ifdef CONFIG_HOTPLUG_CPU | |
694 | ||
695 | static int __init setup_additional_cpus(char *s) | |
696 | { | |
697 | additional_cpus = simple_strtoul(s, NULL, 0); | |
698 | return 0; | |
699 | } | |
700 | early_param("additional_cpus", setup_additional_cpus); | |
701 | ||
37a33026 HC |
702 | static int __init setup_possible_cpus(char *s) |
703 | { | |
704 | possible_cpus = simple_strtoul(s, NULL, 0); | |
705 | return 0; | |
706 | } | |
707 | early_param("possible_cpus", setup_possible_cpus); | |
708 | ||
1da177e4 LT |
709 | int |
710 | __cpu_disable(void) | |
711 | { | |
712 | unsigned long flags; | |
713 | ec_creg_mask_parms cr_parms; | |
f3705136 | 714 | int cpu = smp_processor_id(); |
1da177e4 LT |
715 | |
716 | spin_lock_irqsave(&smp_reserve_lock, flags); | |
f3705136 | 717 | if (smp_cpu_reserved[cpu] != 0) { |
1da177e4 LT |
718 | spin_unlock_irqrestore(&smp_reserve_lock, flags); |
719 | return -EBUSY; | |
720 | } | |
f3705136 | 721 | cpu_clear(cpu, cpu_online_map); |
1da177e4 LT |
722 | |
723 | #ifdef CONFIG_PFAULT | |
724 | /* Disable pfault pseudo page faults on this cpu. */ | |
5d3f229f HC |
725 | if (MACHINE_IS_VM) |
726 | pfault_fini(); | |
1da177e4 LT |
727 | #endif |
728 | ||
729 | /* disable all external interrupts */ | |
730 | ||
731 | cr_parms.start_ctl = 0; | |
732 | cr_parms.end_ctl = 0; | |
733 | cr_parms.orvals[0] = 0; | |
734 | cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 | | |
735 | 1<<11 | 1<<10 | 1<< 6 | 1<< 4); | |
736 | smp_ctl_bit_callback(&cr_parms); | |
737 | ||
738 | /* disable all I/O interrupts */ | |
739 | ||
740 | cr_parms.start_ctl = 6; | |
741 | cr_parms.end_ctl = 6; | |
742 | cr_parms.orvals[6] = 0; | |
743 | cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 | | |
744 | 1<<27 | 1<<26 | 1<<25 | 1<<24); | |
745 | smp_ctl_bit_callback(&cr_parms); | |
746 | ||
747 | /* disable most machine checks */ | |
748 | ||
749 | cr_parms.start_ctl = 14; | |
750 | cr_parms.end_ctl = 14; | |
751 | cr_parms.orvals[14] = 0; | |
752 | cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24); | |
753 | smp_ctl_bit_callback(&cr_parms); | |
754 | ||
755 | spin_unlock_irqrestore(&smp_reserve_lock, flags); | |
756 | return 0; | |
757 | } | |
758 | ||
759 | void | |
760 | __cpu_die(unsigned int cpu) | |
761 | { | |
762 | /* Wait until target cpu is down */ | |
763 | while (!smp_cpu_not_running(cpu)) | |
764 | cpu_relax(); | |
765 | printk("Processor %d spun down\n", cpu); | |
766 | } | |
767 | ||
768 | void | |
769 | cpu_die(void) | |
770 | { | |
771 | idle_task_exit(); | |
772 | signal_processor(smp_processor_id(), sigp_stop); | |
773 | BUG(); | |
774 | for(;;); | |
775 | } | |
776 | ||
255acee7 HC |
777 | #endif /* CONFIG_HOTPLUG_CPU */ |
778 | ||
1da177e4 LT |
779 | /* |
780 | * Cycle through the processors and setup structures. | |
781 | */ | |
782 | ||
783 | void __init smp_prepare_cpus(unsigned int max_cpus) | |
784 | { | |
785 | unsigned long stack; | |
786 | unsigned int cpu; | |
787 | int i; | |
788 | ||
99b2d8df HC |
789 | /* request the 0x1201 emergency signal external interrupt */ |
790 | if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0) | |
791 | panic("Couldn't request external interrupt 0x1201"); | |
1da177e4 LT |
792 | memset(lowcore_ptr,0,sizeof(lowcore_ptr)); |
793 | /* | |
794 | * Initialize prefix pages and stacks for all possible cpus | |
795 | */ | |
796 | print_cpu_info(&S390_lowcore.cpu_data); | |
797 | ||
798 | for(i = 0; i < NR_CPUS; i++) { | |
799 | if (!cpu_possible(i)) | |
800 | continue; | |
801 | lowcore_ptr[i] = (struct _lowcore *) | |
802 | __get_free_pages(GFP_KERNEL|GFP_DMA, | |
803 | sizeof(void*) == 8 ? 1 : 0); | |
804 | stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER); | |
805 | if (lowcore_ptr[i] == NULL || stack == 0ULL) | |
806 | panic("smp_boot_cpus failed to allocate memory\n"); | |
807 | ||
808 | *(lowcore_ptr[i]) = S390_lowcore; | |
809 | lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE); | |
1da177e4 LT |
810 | stack = __get_free_pages(GFP_KERNEL,0); |
811 | if (stack == 0ULL) | |
812 | panic("smp_boot_cpus failed to allocate memory\n"); | |
813 | lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE); | |
347a8dc3 | 814 | #ifndef CONFIG_64BIT |
77fa2245 HC |
815 | if (MACHINE_HAS_IEEE) { |
816 | lowcore_ptr[i]->extended_save_area_addr = | |
817 | (__u32) __get_free_pages(GFP_KERNEL,0); | |
818 | if (lowcore_ptr[i]->extended_save_area_addr == 0) | |
819 | panic("smp_boot_cpus failed to " | |
820 | "allocate memory\n"); | |
821 | } | |
1da177e4 LT |
822 | #endif |
823 | } | |
347a8dc3 | 824 | #ifndef CONFIG_64BIT |
77fa2245 HC |
825 | if (MACHINE_HAS_IEEE) |
826 | ctl_set_bit(14, 29); /* enable extended save area */ | |
827 | #endif | |
1da177e4 LT |
828 | set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]); |
829 | ||
830 | for_each_cpu(cpu) | |
831 | if (cpu != smp_processor_id()) | |
832 | smp_create_idle(cpu); | |
833 | } | |
834 | ||
835 | void __devinit smp_prepare_boot_cpu(void) | |
836 | { | |
837 | BUG_ON(smp_processor_id() != 0); | |
838 | ||
839 | cpu_set(0, cpu_online_map); | |
1da177e4 LT |
840 | S390_lowcore.percpu_offset = __per_cpu_offset[0]; |
841 | current_set[0] = current; | |
842 | } | |
843 | ||
844 | void smp_cpus_done(unsigned int max_cpus) | |
845 | { | |
1da177e4 LT |
846 | } |
847 | ||
848 | /* | |
849 | * the frequency of the profiling timer can be changed | |
850 | * by writing a multiplier value into /proc/profile. | |
851 | * | |
852 | * usually you want to run this on all CPUs ;) | |
853 | */ | |
854 | int setup_profiling_timer(unsigned int multiplier) | |
855 | { | |
856 | return 0; | |
857 | } | |
858 | ||
859 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | |
860 | ||
861 | static int __init topology_init(void) | |
862 | { | |
863 | int cpu; | |
864 | int ret; | |
865 | ||
866 | for_each_cpu(cpu) { | |
867 | ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL); | |
868 | if (ret) | |
869 | printk(KERN_WARNING "topology_init: register_cpu %d " | |
870 | "failed (%d)\n", cpu, ret); | |
871 | } | |
872 | return 0; | |
873 | } | |
874 | ||
875 | subsys_initcall(topology_init); | |
876 | ||
255acee7 | 877 | EXPORT_SYMBOL(cpu_online_map); |
1da177e4 LT |
878 | EXPORT_SYMBOL(cpu_possible_map); |
879 | EXPORT_SYMBOL(lowcore_ptr); | |
880 | EXPORT_SYMBOL(smp_ctl_set_bit); | |
881 | EXPORT_SYMBOL(smp_ctl_clear_bit); | |
882 | EXPORT_SYMBOL(smp_call_function); | |
883 | EXPORT_SYMBOL(smp_get_cpu); | |
884 | EXPORT_SYMBOL(smp_put_cpu); | |
885 |