Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * SMP support for ppc. | |
3 | * | |
4 | * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great | |
5 | * deal of code from the sparc and intel versions. | |
6 | * | |
7 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | |
8 | * | |
9 | * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and | |
10 | * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or | |
13 | * modify it under the terms of the GNU General Public License | |
14 | * as published by the Free Software Foundation; either version | |
15 | * 2 of the License, or (at your option) any later version. | |
16 | */ | |
17 | ||
18 | #undef DEBUG | |
19 | ||
20 | #include <linux/config.h> | |
21 | #include <linux/kernel.h> | |
22 | #include <linux/module.h> | |
23 | #include <linux/sched.h> | |
24 | #include <linux/smp.h> | |
25 | #include <linux/interrupt.h> | |
26 | #include <linux/delay.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/spinlock.h> | |
29 | #include <linux/cache.h> | |
30 | #include <linux/err.h> | |
31 | #include <linux/sysdev.h> | |
32 | #include <linux/cpu.h> | |
33 | #include <linux/notifier.h> | |
34 | ||
35 | #include <asm/ptrace.h> | |
36 | #include <asm/atomic.h> | |
37 | #include <asm/irq.h> | |
38 | #include <asm/page.h> | |
39 | #include <asm/pgtable.h> | |
40 | #include <asm/prom.h> | |
41 | #include <asm/smp.h> | |
1da177e4 LT |
42 | #include <asm/time.h> |
43 | #include <asm/machdep.h> | |
44 | #include <asm/cputable.h> | |
45 | #include <asm/system.h> | |
bbeb3f4c | 46 | #include <asm/mpic.h> |
a7f290da | 47 | #include <asm/vdso_datapage.h> |
5ad57078 PM |
48 | #ifdef CONFIG_PPC64 |
49 | #include <asm/paca.h> | |
50 | #endif | |
51 | ||
52 | int smp_hw_index[NR_CPUS]; | |
53 | struct thread_info *secondary_ti; | |
1da177e4 LT |
54 | |
55 | #ifdef DEBUG | |
56 | #define DBG(fmt...) udbg_printf(fmt) | |
57 | #else | |
58 | #define DBG(fmt...) | |
59 | #endif | |
60 | ||
61 | cpumask_t cpu_possible_map = CPU_MASK_NONE; | |
62 | cpumask_t cpu_online_map = CPU_MASK_NONE; | |
63 | cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; | |
64 | ||
65 | EXPORT_SYMBOL(cpu_online_map); | |
66 | EXPORT_SYMBOL(cpu_possible_map); | |
67 | ||
5ad57078 | 68 | /* SMP operations for this machine */ |
1da177e4 LT |
69 | struct smp_ops_t *smp_ops; |
70 | ||
71 | static volatile unsigned int cpu_callin_map[NR_CPUS]; | |
72 | ||
1da177e4 LT |
73 | void smp_call_function_interrupt(void); |
74 | ||
75 | int smt_enabled_at_boot = 1; | |
76 | ||
cebf589c | 77 | #ifdef CONFIG_MPIC |
1da177e4 LT |
78 | int __init smp_mpic_probe(void) |
79 | { | |
80 | int nr_cpus; | |
81 | ||
82 | DBG("smp_mpic_probe()...\n"); | |
83 | ||
84 | nr_cpus = cpus_weight(cpu_possible_map); | |
85 | ||
86 | DBG("nr_cpus: %d\n", nr_cpus); | |
87 | ||
88 | if (nr_cpus > 1) | |
89 | mpic_request_ipis(); | |
90 | ||
91 | return nr_cpus; | |
92 | } | |
93 | ||
94 | void __devinit smp_mpic_setup_cpu(int cpu) | |
95 | { | |
96 | mpic_setup_this_cpu(); | |
97 | } | |
5ad57078 | 98 | #endif /* CONFIG_MPIC */ |
1da177e4 | 99 | |
5ad57078 | 100 | #ifdef CONFIG_PPC64 |
1da177e4 LT |
101 | void __devinit smp_generic_kick_cpu(int nr) |
102 | { | |
103 | BUG_ON(nr < 0 || nr >= NR_CPUS); | |
104 | ||
105 | /* | |
106 | * The processor is currently spinning, waiting for the | |
107 | * cpu_start field to become non-zero After we set cpu_start, | |
108 | * the processor will continue on to secondary_start | |
109 | */ | |
110 | paca[nr].cpu_start = 1; | |
0d8d4d42 | 111 | smp_mb(); |
1da177e4 | 112 | } |
5ad57078 | 113 | #endif |
1da177e4 | 114 | |
1da177e4 LT |
115 | void smp_message_recv(int msg, struct pt_regs *regs) |
116 | { | |
117 | switch(msg) { | |
118 | case PPC_MSG_CALL_FUNCTION: | |
119 | smp_call_function_interrupt(); | |
120 | break; | |
5ad57078 | 121 | case PPC_MSG_RESCHEDULE: |
1da177e4 LT |
122 | /* XXX Do we have to do this? */ |
123 | set_need_resched(); | |
124 | break; | |
1da177e4 LT |
125 | #ifdef CONFIG_DEBUGGER |
126 | case PPC_MSG_DEBUGGER_BREAK: | |
127 | debugger_ipi(regs); | |
128 | break; | |
129 | #endif | |
130 | default: | |
131 | printk("SMP %d: smp_message_recv(): unknown msg %d\n", | |
132 | smp_processor_id(), msg); | |
133 | break; | |
134 | } | |
135 | } | |
136 | ||
137 | void smp_send_reschedule(int cpu) | |
138 | { | |
139 | smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); | |
140 | } | |
141 | ||
142 | #ifdef CONFIG_DEBUGGER | |
143 | void smp_send_debugger_break(int cpu) | |
144 | { | |
145 | smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); | |
146 | } | |
147 | #endif | |
148 | ||
149 | static void stop_this_cpu(void *dummy) | |
150 | { | |
151 | local_irq_disable(); | |
152 | while (1) | |
153 | ; | |
154 | } | |
155 | ||
156 | void smp_send_stop(void) | |
157 | { | |
158 | smp_call_function(stop_this_cpu, NULL, 1, 0); | |
159 | } | |
160 | ||
161 | /* | |
162 | * Structure and data for smp_call_function(). This is designed to minimise | |
163 | * static memory requirements. It also looks cleaner. | |
164 | * Stolen from the i386 version. | |
165 | */ | |
166 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock); | |
167 | ||
168 | static struct call_data_struct { | |
169 | void (*func) (void *info); | |
170 | void *info; | |
171 | atomic_t started; | |
172 | atomic_t finished; | |
173 | int wait; | |
174 | } *call_data; | |
175 | ||
5ad57078 PM |
176 | /* delay of at least 8 seconds */ |
177 | #define SMP_CALL_TIMEOUT 8 | |
1da177e4 LT |
178 | |
179 | /* | |
180 | * This function sends a 'generic call function' IPI to all other CPUs | |
181 | * in the system. | |
182 | * | |
183 | * [SUMMARY] Run a function on all other CPUs. | |
184 | * <func> The function to run. This must be fast and non-blocking. | |
185 | * <info> An arbitrary pointer to pass to the function. | |
186 | * <nonatomic> currently unused. | |
187 | * <wait> If true, wait (atomically) until function has completed on other CPUs. | |
188 | * [RETURNS] 0 on success, else a negative status code. Does not return until | |
189 | * remote CPUs are nearly ready to execute <<func>> or are or have executed. | |
190 | * | |
191 | * You must not call this function with disabled interrupts or from a | |
192 | * hardware interrupt handler or from a bottom half handler. | |
193 | */ | |
194 | int smp_call_function (void (*func) (void *info), void *info, int nonatomic, | |
195 | int wait) | |
196 | { | |
197 | struct call_data_struct data; | |
198 | int ret = -1, cpus; | |
5ad57078 | 199 | u64 timeout; |
1da177e4 LT |
200 | |
201 | /* Can deadlock when called with interrupts disabled */ | |
202 | WARN_ON(irqs_disabled()); | |
203 | ||
204 | data.func = func; | |
205 | data.info = info; | |
206 | atomic_set(&data.started, 0); | |
207 | data.wait = wait; | |
208 | if (wait) | |
209 | atomic_set(&data.finished, 0); | |
210 | ||
211 | spin_lock(&call_lock); | |
212 | /* Must grab online cpu count with preempt disabled, otherwise | |
213 | * it can change. */ | |
214 | cpus = num_online_cpus() - 1; | |
215 | if (!cpus) { | |
216 | ret = 0; | |
217 | goto out; | |
218 | } | |
219 | ||
220 | call_data = &data; | |
0d8d4d42 | 221 | smp_wmb(); |
1da177e4 LT |
222 | /* Send a message to all other CPUs and wait for them to respond */ |
223 | smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_CALL_FUNCTION); | |
224 | ||
5ad57078 PM |
225 | timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec; |
226 | ||
1da177e4 | 227 | /* Wait for response */ |
1da177e4 LT |
228 | while (atomic_read(&data.started) != cpus) { |
229 | HMT_low(); | |
5ad57078 | 230 | if (get_tb() >= timeout) { |
1da177e4 LT |
231 | printk("smp_call_function on cpu %d: other cpus not " |
232 | "responding (%d)\n", smp_processor_id(), | |
233 | atomic_read(&data.started)); | |
234 | debugger(NULL); | |
235 | goto out; | |
236 | } | |
237 | } | |
238 | ||
239 | if (wait) { | |
1da177e4 LT |
240 | while (atomic_read(&data.finished) != cpus) { |
241 | HMT_low(); | |
5ad57078 | 242 | if (get_tb() >= timeout) { |
1da177e4 LT |
243 | printk("smp_call_function on cpu %d: other " |
244 | "cpus not finishing (%d/%d)\n", | |
245 | smp_processor_id(), | |
246 | atomic_read(&data.finished), | |
247 | atomic_read(&data.started)); | |
248 | debugger(NULL); | |
249 | goto out; | |
250 | } | |
251 | } | |
252 | } | |
253 | ||
254 | ret = 0; | |
255 | ||
5ad57078 | 256 | out: |
1da177e4 LT |
257 | call_data = NULL; |
258 | HMT_medium(); | |
259 | spin_unlock(&call_lock); | |
260 | return ret; | |
261 | } | |
262 | ||
263 | EXPORT_SYMBOL(smp_call_function); | |
264 | ||
265 | void smp_call_function_interrupt(void) | |
266 | { | |
267 | void (*func) (void *info); | |
268 | void *info; | |
269 | int wait; | |
270 | ||
271 | /* call_data will be NULL if the sender timed out while | |
272 | * waiting on us to receive the call. | |
273 | */ | |
274 | if (!call_data) | |
275 | return; | |
276 | ||
277 | func = call_data->func; | |
278 | info = call_data->info; | |
279 | wait = call_data->wait; | |
280 | ||
281 | if (!wait) | |
282 | smp_mb__before_atomic_inc(); | |
283 | ||
284 | /* | |
285 | * Notify initiating CPU that I've grabbed the data and am | |
286 | * about to execute the function | |
287 | */ | |
288 | atomic_inc(&call_data->started); | |
289 | /* | |
290 | * At this point the info structure may be out of scope unless wait==1 | |
291 | */ | |
292 | (*func)(info); | |
293 | if (wait) { | |
294 | smp_mb__before_atomic_inc(); | |
295 | atomic_inc(&call_data->finished); | |
296 | } | |
297 | } | |
298 | ||
1da177e4 LT |
299 | extern struct gettimeofday_struct do_gtod; |
300 | ||
301 | struct thread_info *current_set[NR_CPUS]; | |
302 | ||
303 | DECLARE_PER_CPU(unsigned int, pvr); | |
304 | ||
305 | static void __devinit smp_store_cpu_info(int id) | |
306 | { | |
307 | per_cpu(pvr, id) = mfspr(SPRN_PVR); | |
308 | } | |
309 | ||
310 | static void __init smp_create_idle(unsigned int cpu) | |
311 | { | |
312 | struct task_struct *p; | |
313 | ||
314 | /* create a process for the processor */ | |
315 | p = fork_idle(cpu); | |
316 | if (IS_ERR(p)) | |
317 | panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); | |
5ad57078 | 318 | #ifdef CONFIG_PPC64 |
1da177e4 | 319 | paca[cpu].__current = p; |
5ad57078 | 320 | #endif |
1da177e4 | 321 | current_set[cpu] = p->thread_info; |
5ad57078 | 322 | p->thread_info->cpu = cpu; |
1da177e4 LT |
323 | } |
324 | ||
325 | void __init smp_prepare_cpus(unsigned int max_cpus) | |
326 | { | |
327 | unsigned int cpu; | |
328 | ||
329 | DBG("smp_prepare_cpus\n"); | |
330 | ||
331 | /* | |
332 | * setup_cpu may need to be called on the boot cpu. We havent | |
333 | * spun any cpus up but lets be paranoid. | |
334 | */ | |
335 | BUG_ON(boot_cpuid != smp_processor_id()); | |
336 | ||
337 | /* Fixup boot cpu */ | |
338 | smp_store_cpu_info(boot_cpuid); | |
339 | cpu_callin_map[boot_cpuid] = 1; | |
340 | ||
1da177e4 LT |
341 | max_cpus = smp_ops->probe(); |
342 | ||
343 | smp_space_timers(max_cpus); | |
344 | ||
345 | for_each_cpu(cpu) | |
346 | if (cpu != boot_cpuid) | |
347 | smp_create_idle(cpu); | |
348 | } | |
349 | ||
350 | void __devinit smp_prepare_boot_cpu(void) | |
351 | { | |
352 | BUG_ON(smp_processor_id() != boot_cpuid); | |
353 | ||
354 | cpu_set(boot_cpuid, cpu_online_map); | |
5ad57078 | 355 | #ifdef CONFIG_PPC64 |
1da177e4 | 356 | paca[boot_cpuid].__current = current; |
5ad57078 | 357 | #endif |
1da177e4 LT |
358 | current_set[boot_cpuid] = current->thread_info; |
359 | } | |
360 | ||
361 | #ifdef CONFIG_HOTPLUG_CPU | |
362 | /* State of each CPU during hotplug phases */ | |
363 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | |
364 | ||
365 | int generic_cpu_disable(void) | |
366 | { | |
367 | unsigned int cpu = smp_processor_id(); | |
368 | ||
369 | if (cpu == boot_cpuid) | |
370 | return -EBUSY; | |
371 | ||
1da177e4 | 372 | cpu_clear(cpu, cpu_online_map); |
799d6046 | 373 | #ifdef CONFIG_PPC64 |
a7f290da | 374 | vdso_data->processorCount--; |
1da177e4 | 375 | fixup_irqs(cpu_online_map); |
094fe2e7 | 376 | #endif |
1da177e4 LT |
377 | return 0; |
378 | } | |
379 | ||
380 | int generic_cpu_enable(unsigned int cpu) | |
381 | { | |
382 | /* Do the normal bootup if we haven't | |
383 | * already bootstrapped. */ | |
384 | if (system_state != SYSTEM_RUNNING) | |
385 | return -ENOSYS; | |
386 | ||
387 | /* get the target out of it's holding state */ | |
388 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | |
0d8d4d42 | 389 | smp_wmb(); |
1da177e4 LT |
390 | |
391 | while (!cpu_online(cpu)) | |
392 | cpu_relax(); | |
393 | ||
094fe2e7 | 394 | #ifdef CONFIG_PPC64 |
1da177e4 LT |
395 | fixup_irqs(cpu_online_map); |
396 | /* counter the irq disable in fixup_irqs */ | |
397 | local_irq_enable(); | |
094fe2e7 | 398 | #endif |
1da177e4 LT |
399 | return 0; |
400 | } | |
401 | ||
402 | void generic_cpu_die(unsigned int cpu) | |
403 | { | |
404 | int i; | |
405 | ||
406 | for (i = 0; i < 100; i++) { | |
0d8d4d42 | 407 | smp_rmb(); |
1da177e4 LT |
408 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) |
409 | return; | |
410 | msleep(100); | |
411 | } | |
412 | printk(KERN_ERR "CPU%d didn't die...\n", cpu); | |
413 | } | |
414 | ||
415 | void generic_mach_cpu_die(void) | |
416 | { | |
417 | unsigned int cpu; | |
418 | ||
419 | local_irq_disable(); | |
420 | cpu = smp_processor_id(); | |
421 | printk(KERN_DEBUG "CPU%d offline\n", cpu); | |
422 | __get_cpu_var(cpu_state) = CPU_DEAD; | |
0d8d4d42 | 423 | smp_wmb(); |
1da177e4 LT |
424 | while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) |
425 | cpu_relax(); | |
426 | ||
094fe2e7 | 427 | #ifdef CONFIG_PPC64 |
1da177e4 | 428 | flush_tlb_pending(); |
094fe2e7 | 429 | #endif |
1da177e4 LT |
430 | cpu_set(cpu, cpu_online_map); |
431 | local_irq_enable(); | |
432 | } | |
433 | #endif | |
434 | ||
435 | static int __devinit cpu_enable(unsigned int cpu) | |
436 | { | |
437 | if (smp_ops->cpu_enable) | |
438 | return smp_ops->cpu_enable(cpu); | |
439 | ||
440 | return -ENOSYS; | |
441 | } | |
442 | ||
443 | int __devinit __cpu_up(unsigned int cpu) | |
444 | { | |
445 | int c; | |
446 | ||
5ad57078 | 447 | secondary_ti = current_set[cpu]; |
1da177e4 LT |
448 | if (!cpu_enable(cpu)) |
449 | return 0; | |
450 | ||
451 | if (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)) | |
452 | return -EINVAL; | |
453 | ||
5ad57078 | 454 | #ifdef CONFIG_PPC64 |
c4eb2a93 | 455 | paca[cpu].default_decr = tb_ticks_per_jiffy; |
5ad57078 | 456 | #endif |
1da177e4 | 457 | |
1da177e4 LT |
458 | /* Make sure callin-map entry is 0 (can be leftover a CPU |
459 | * hotplug | |
460 | */ | |
461 | cpu_callin_map[cpu] = 0; | |
462 | ||
463 | /* The information for processor bringup must | |
464 | * be written out to main store before we release | |
465 | * the processor. | |
466 | */ | |
0d8d4d42 | 467 | smp_mb(); |
1da177e4 LT |
468 | |
469 | /* wake up cpus */ | |
470 | DBG("smp: kicking cpu %d\n", cpu); | |
471 | smp_ops->kick_cpu(cpu); | |
472 | ||
473 | /* | |
474 | * wait to see if the cpu made a callin (is actually up). | |
475 | * use this value that I found through experimentation. | |
476 | * -- Cort | |
477 | */ | |
478 | if (system_state < SYSTEM_RUNNING) | |
479 | for (c = 5000; c && !cpu_callin_map[cpu]; c--) | |
480 | udelay(100); | |
481 | #ifdef CONFIG_HOTPLUG_CPU | |
482 | else | |
483 | /* | |
484 | * CPUs can take much longer to come up in the | |
485 | * hotplug case. Wait five seconds. | |
486 | */ | |
487 | for (c = 25; c && !cpu_callin_map[cpu]; c--) { | |
488 | msleep(200); | |
489 | } | |
490 | #endif | |
491 | ||
492 | if (!cpu_callin_map[cpu]) { | |
493 | printk("Processor %u is stuck.\n", cpu); | |
494 | return -ENOENT; | |
495 | } | |
496 | ||
497 | printk("Processor %u found.\n", cpu); | |
498 | ||
499 | if (smp_ops->give_timebase) | |
500 | smp_ops->give_timebase(); | |
501 | ||
502 | /* Wait until cpu puts itself in the online map */ | |
503 | while (!cpu_online(cpu)) | |
504 | cpu_relax(); | |
505 | ||
506 | return 0; | |
507 | } | |
508 | ||
509 | ||
510 | /* Activate a secondary processor. */ | |
511 | int __devinit start_secondary(void *unused) | |
512 | { | |
513 | unsigned int cpu = smp_processor_id(); | |
514 | ||
515 | atomic_inc(&init_mm.mm_count); | |
516 | current->active_mm = &init_mm; | |
517 | ||
518 | smp_store_cpu_info(cpu); | |
5ad57078 | 519 | set_dec(tb_ticks_per_jiffy); |
e4d76e1c | 520 | preempt_disable(); |
1da177e4 LT |
521 | cpu_callin_map[cpu] = 1; |
522 | ||
523 | smp_ops->setup_cpu(cpu); | |
524 | if (smp_ops->take_timebase) | |
525 | smp_ops->take_timebase(); | |
526 | ||
527 | spin_lock(&call_lock); | |
528 | cpu_set(cpu, cpu_online_map); | |
529 | spin_unlock(&call_lock); | |
530 | ||
531 | local_irq_enable(); | |
532 | ||
533 | cpu_idle(); | |
534 | return 0; | |
535 | } | |
536 | ||
537 | int setup_profiling_timer(unsigned int multiplier) | |
538 | { | |
539 | return 0; | |
540 | } | |
541 | ||
542 | void __init smp_cpus_done(unsigned int max_cpus) | |
543 | { | |
544 | cpumask_t old_mask; | |
545 | ||
546 | /* We want the setup_cpu() here to be called from CPU 0, but our | |
547 | * init thread may have been "borrowed" by another CPU in the meantime | |
548 | * se we pin us down to CPU 0 for a short while | |
549 | */ | |
550 | old_mask = current->cpus_allowed; | |
551 | set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid)); | |
552 | ||
553 | smp_ops->setup_cpu(boot_cpuid); | |
554 | ||
555 | set_cpus_allowed(current, old_mask); | |
556 | } | |
557 | ||
558 | #ifdef CONFIG_HOTPLUG_CPU | |
559 | int __cpu_disable(void) | |
560 | { | |
561 | if (smp_ops->cpu_disable) | |
562 | return smp_ops->cpu_disable(); | |
563 | ||
564 | return -ENOSYS; | |
565 | } | |
566 | ||
567 | void __cpu_die(unsigned int cpu) | |
568 | { | |
569 | if (smp_ops->cpu_die) | |
570 | smp_ops->cpu_die(cpu); | |
571 | } | |
572 | #endif |