4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/sched.h>
23 #include <linux/smp.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/init.h>
27 #include <linux/spinlock.h>
28 #include <linux/cache.h>
29 #include <linux/err.h>
30 #include <linux/device.h>
31 #include <linux/cpu.h>
32 #include <linux/notifier.h>
33 #include <linux/topology.h>
35 #include <asm/ptrace.h>
36 #include <linux/atomic.h>
38 #include <asm/hw_irq.h>
39 #include <asm/kvm_ppc.h>
41 #include <asm/pgtable.h>
45 #include <asm/machdep.h>
46 #include <asm/cputhreads.h>
47 #include <asm/cputable.h>
49 #include <asm/vdso_datapage.h>
54 #include <asm/debug.h>
55 #include <asm/kexec.h>
56 #include <asm/asm-prototypes.h>
60 #define DBG(fmt...) udbg_printf(fmt)
65 #ifdef CONFIG_HOTPLUG_CPU
66 /* State of each CPU during hotplug phases */
67 static DEFINE_PER_CPU(int, cpu_state
) = { 0 };
70 struct thread_info
*secondary_ti
;
72 DEFINE_PER_CPU(cpumask_var_t
, cpu_sibling_map
);
73 DEFINE_PER_CPU(cpumask_var_t
, cpu_core_map
);
75 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map
);
76 EXPORT_PER_CPU_SYMBOL(cpu_core_map
);
78 /* SMP operations for this machine */
79 struct smp_ops_t
*smp_ops
;
81 /* Can't be static due to PowerMac hackery */
82 volatile unsigned int cpu_callin_map
[NR_CPUS
];
84 int smt_enabled_at_boot
= 1;
86 static void (*crash_ipi_function_ptr
)(struct pt_regs
*) = NULL
;
89 * Returns 1 if the specified cpu should be brought up during boot.
90 * Used to inhibit booting threads if they've been disabled or
91 * limited on the command line
93 int smp_generic_cpu_bootable(unsigned int nr
)
95 /* Special case - we inhibit secondary thread startup
96 * during boot if the user requests it.
98 if (system_state
== SYSTEM_BOOTING
&& cpu_has_feature(CPU_FTR_SMT
)) {
99 if (!smt_enabled_at_boot
&& cpu_thread_in_core(nr
) != 0)
101 if (smt_enabled_at_boot
102 && cpu_thread_in_core(nr
) >= smt_enabled_at_boot
)
111 int smp_generic_kick_cpu(int nr
)
113 BUG_ON(nr
< 0 || nr
>= NR_CPUS
);
116 * The processor is currently spinning, waiting for the
117 * cpu_start field to become non-zero After we set cpu_start,
118 * the processor will continue on to secondary_start
120 if (!paca
[nr
].cpu_start
) {
121 paca
[nr
].cpu_start
= 1;
126 #ifdef CONFIG_HOTPLUG_CPU
128 * Ok it's not there, so it might be soft-unplugged, let's
129 * try to bring it back
131 generic_set_cpu_up(nr
);
133 smp_send_reschedule(nr
);
134 #endif /* CONFIG_HOTPLUG_CPU */
138 #endif /* CONFIG_PPC64 */
140 static irqreturn_t
call_function_action(int irq
, void *data
)
142 generic_smp_call_function_interrupt();
146 static irqreturn_t
reschedule_action(int irq
, void *data
)
152 static irqreturn_t
tick_broadcast_ipi_action(int irq
, void *data
)
154 tick_broadcast_ipi_handler();
158 static irqreturn_t
debug_ipi_action(int irq
, void *data
)
160 if (crash_ipi_function_ptr
) {
161 crash_ipi_function_ptr(get_irq_regs());
165 #ifdef CONFIG_DEBUGGER
166 debugger_ipi(get_irq_regs());
167 #endif /* CONFIG_DEBUGGER */
172 static irq_handler_t smp_ipi_action
[] = {
173 [PPC_MSG_CALL_FUNCTION
] = call_function_action
,
174 [PPC_MSG_RESCHEDULE
] = reschedule_action
,
175 [PPC_MSG_TICK_BROADCAST
] = tick_broadcast_ipi_action
,
176 [PPC_MSG_DEBUGGER_BREAK
] = debug_ipi_action
,
179 const char *smp_ipi_name
[] = {
180 [PPC_MSG_CALL_FUNCTION
] = "ipi call function",
181 [PPC_MSG_RESCHEDULE
] = "ipi reschedule",
182 [PPC_MSG_TICK_BROADCAST
] = "ipi tick-broadcast",
183 [PPC_MSG_DEBUGGER_BREAK
] = "ipi debugger",
186 /* optional function to request ipi, for controllers with >= 4 ipis */
187 int smp_request_message_ipi(int virq
, int msg
)
191 if (msg
< 0 || msg
> PPC_MSG_DEBUGGER_BREAK
) {
194 #if !defined(CONFIG_DEBUGGER) && !defined(CONFIG_KEXEC)
195 if (msg
== PPC_MSG_DEBUGGER_BREAK
) {
199 err
= request_irq(virq
, smp_ipi_action
[msg
],
200 IRQF_PERCPU
| IRQF_NO_THREAD
| IRQF_NO_SUSPEND
,
201 smp_ipi_name
[msg
], NULL
);
202 WARN(err
< 0, "unable to request_irq %d for %s (rc %d)\n",
203 virq
, smp_ipi_name
[msg
], err
);
208 #ifdef CONFIG_PPC_SMP_MUXED_IPI
209 struct cpu_messages
{
210 long messages
; /* current messages */
211 unsigned long data
; /* data for cause ipi */
213 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages
, ipi_message
);
215 void smp_muxed_ipi_set_data(int cpu
, unsigned long data
)
217 struct cpu_messages
*info
= &per_cpu(ipi_message
, cpu
);
222 void smp_muxed_ipi_set_message(int cpu
, int msg
)
224 struct cpu_messages
*info
= &per_cpu(ipi_message
, cpu
);
225 char *message
= (char *)&info
->messages
;
228 * Order previous accesses before accesses in the IPI handler.
234 void smp_muxed_ipi_message_pass(int cpu
, int msg
)
236 struct cpu_messages
*info
= &per_cpu(ipi_message
, cpu
);
238 smp_muxed_ipi_set_message(cpu
, msg
);
240 * cause_ipi functions are required to include a full barrier
241 * before doing whatever causes the IPI.
243 smp_ops
->cause_ipi(cpu
, info
->data
);
246 #ifdef __BIG_ENDIAN__
247 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
249 #define IPI_MESSAGE(A) (1uL << (8 * (A)))
252 irqreturn_t
smp_ipi_demux(void)
254 struct cpu_messages
*info
= this_cpu_ptr(&ipi_message
);
257 mb(); /* order any irq clear */
260 all
= xchg(&info
->messages
, 0);
261 #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
263 * Must check for PPC_MSG_RM_HOST_ACTION messages
264 * before PPC_MSG_CALL_FUNCTION messages because when
265 * a VM is destroyed, we call kick_all_cpus_sync()
266 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
267 * messages have completed before we free any VCPUs.
269 if (all
& IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION
))
270 kvmppc_xics_ipi_action();
272 if (all
& IPI_MESSAGE(PPC_MSG_CALL_FUNCTION
))
273 generic_smp_call_function_interrupt();
274 if (all
& IPI_MESSAGE(PPC_MSG_RESCHEDULE
))
276 if (all
& IPI_MESSAGE(PPC_MSG_TICK_BROADCAST
))
277 tick_broadcast_ipi_handler();
278 if (all
& IPI_MESSAGE(PPC_MSG_DEBUGGER_BREAK
))
279 debug_ipi_action(0, NULL
);
280 } while (info
->messages
);
284 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
286 static inline void do_message_pass(int cpu
, int msg
)
288 if (smp_ops
->message_pass
)
289 smp_ops
->message_pass(cpu
, msg
);
290 #ifdef CONFIG_PPC_SMP_MUXED_IPI
292 smp_muxed_ipi_message_pass(cpu
, msg
);
296 void smp_send_reschedule(int cpu
)
299 do_message_pass(cpu
, PPC_MSG_RESCHEDULE
);
301 EXPORT_SYMBOL_GPL(smp_send_reschedule
);
303 void arch_send_call_function_single_ipi(int cpu
)
305 do_message_pass(cpu
, PPC_MSG_CALL_FUNCTION
);
308 void arch_send_call_function_ipi_mask(const struct cpumask
*mask
)
312 for_each_cpu(cpu
, mask
)
313 do_message_pass(cpu
, PPC_MSG_CALL_FUNCTION
);
316 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
317 void tick_broadcast(const struct cpumask
*mask
)
321 for_each_cpu(cpu
, mask
)
322 do_message_pass(cpu
, PPC_MSG_TICK_BROADCAST
);
326 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
327 void smp_send_debugger_break(void)
330 int me
= raw_smp_processor_id();
332 if (unlikely(!smp_ops
))
335 for_each_online_cpu(cpu
)
337 do_message_pass(cpu
, PPC_MSG_DEBUGGER_BREAK
);
342 void crash_send_ipi(void (*crash_ipi_callback
)(struct pt_regs
*))
344 crash_ipi_function_ptr
= crash_ipi_callback
;
345 if (crash_ipi_callback
) {
347 smp_send_debugger_break();
352 static void stop_this_cpu(void *dummy
)
354 /* Remove this CPU */
355 set_cpu_online(smp_processor_id(), false);
362 void smp_send_stop(void)
364 smp_call_function(stop_this_cpu
, NULL
, 0);
367 struct thread_info
*current_set
[NR_CPUS
];
369 static void smp_store_cpu_info(int id
)
371 per_cpu(cpu_pvr
, id
) = mfspr(SPRN_PVR
);
372 #ifdef CONFIG_PPC_FSL_BOOK3E
373 per_cpu(next_tlbcam_idx
, id
)
374 = (mfspr(SPRN_TLB1CFG
) & TLBnCFG_N_ENTRY
) - 1;
378 void __init
smp_prepare_cpus(unsigned int max_cpus
)
382 DBG("smp_prepare_cpus\n");
385 * setup_cpu may need to be called on the boot cpu. We havent
386 * spun any cpus up but lets be paranoid.
388 BUG_ON(boot_cpuid
!= smp_processor_id());
391 smp_store_cpu_info(boot_cpuid
);
392 cpu_callin_map
[boot_cpuid
] = 1;
394 for_each_possible_cpu(cpu
) {
395 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map
, cpu
),
396 GFP_KERNEL
, cpu_to_node(cpu
));
397 zalloc_cpumask_var_node(&per_cpu(cpu_core_map
, cpu
),
398 GFP_KERNEL
, cpu_to_node(cpu
));
400 * numa_node_id() works after this.
402 if (cpu_present(cpu
)) {
403 set_cpu_numa_node(cpu
, numa_cpu_lookup_table
[cpu
]);
404 set_cpu_numa_mem(cpu
,
405 local_memory_node(numa_cpu_lookup_table
[cpu
]));
409 cpumask_set_cpu(boot_cpuid
, cpu_sibling_mask(boot_cpuid
));
410 cpumask_set_cpu(boot_cpuid
, cpu_core_mask(boot_cpuid
));
412 if (smp_ops
&& smp_ops
->probe
)
416 void smp_prepare_boot_cpu(void)
418 BUG_ON(smp_processor_id() != boot_cpuid
);
420 paca
[boot_cpuid
].__current
= current
;
422 set_numa_node(numa_cpu_lookup_table
[boot_cpuid
]);
423 current_set
[boot_cpuid
] = task_thread_info(current
);
426 #ifdef CONFIG_HOTPLUG_CPU
428 int generic_cpu_disable(void)
430 unsigned int cpu
= smp_processor_id();
432 if (cpu
== boot_cpuid
)
435 set_cpu_online(cpu
, false);
437 vdso_data
->processorCount
--;
443 void generic_cpu_die(unsigned int cpu
)
447 for (i
= 0; i
< 100; i
++) {
449 if (is_cpu_dead(cpu
))
453 printk(KERN_ERR
"CPU%d didn't die...\n", cpu
);
456 void generic_set_cpu_dead(unsigned int cpu
)
458 per_cpu(cpu_state
, cpu
) = CPU_DEAD
;
462 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
463 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
464 * which makes the delay in generic_cpu_die() not happen.
466 void generic_set_cpu_up(unsigned int cpu
)
468 per_cpu(cpu_state
, cpu
) = CPU_UP_PREPARE
;
471 int generic_check_cpu_restart(unsigned int cpu
)
473 return per_cpu(cpu_state
, cpu
) == CPU_UP_PREPARE
;
476 int is_cpu_dead(unsigned int cpu
)
478 return per_cpu(cpu_state
, cpu
) == CPU_DEAD
;
481 static bool secondaries_inhibited(void)
483 return kvm_hv_mode_active();
486 #else /* HOTPLUG_CPU */
488 #define secondaries_inhibited() 0
492 static void cpu_idle_thread_init(unsigned int cpu
, struct task_struct
*idle
)
494 struct thread_info
*ti
= task_thread_info(idle
);
497 paca
[cpu
].__current
= idle
;
498 paca
[cpu
].kstack
= (unsigned long)ti
+ THREAD_SIZE
- STACK_FRAME_OVERHEAD
;
501 secondary_ti
= current_set
[cpu
] = ti
;
504 int __cpu_up(unsigned int cpu
, struct task_struct
*tidle
)
509 * Don't allow secondary threads to come online if inhibited
511 if (threads_per_core
> 1 && secondaries_inhibited() &&
512 cpu_thread_in_subcore(cpu
))
515 if (smp_ops
== NULL
||
516 (smp_ops
->cpu_bootable
&& !smp_ops
->cpu_bootable(cpu
)))
519 cpu_idle_thread_init(cpu
, tidle
);
521 /* Make sure callin-map entry is 0 (can be leftover a CPU
524 cpu_callin_map
[cpu
] = 0;
526 /* The information for processor bringup must
527 * be written out to main store before we release
533 DBG("smp: kicking cpu %d\n", cpu
);
534 rc
= smp_ops
->kick_cpu(cpu
);
536 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu
, rc
);
541 * wait to see if the cpu made a callin (is actually up).
542 * use this value that I found through experimentation.
545 if (system_state
< SYSTEM_RUNNING
)
546 for (c
= 50000; c
&& !cpu_callin_map
[cpu
]; c
--)
548 #ifdef CONFIG_HOTPLUG_CPU
551 * CPUs can take much longer to come up in the
552 * hotplug case. Wait five seconds.
554 for (c
= 5000; c
&& !cpu_callin_map
[cpu
]; c
--)
558 if (!cpu_callin_map
[cpu
]) {
559 printk(KERN_ERR
"Processor %u is stuck.\n", cpu
);
563 DBG("Processor %u found.\n", cpu
);
565 if (smp_ops
->give_timebase
)
566 smp_ops
->give_timebase();
568 /* Wait until cpu puts itself in the online & active maps */
569 while (!cpu_online(cpu
))
575 /* Return the value of the reg property corresponding to the given
578 int cpu_to_core_id(int cpu
)
580 struct device_node
*np
;
584 np
= of_get_cpu_node(cpu
, NULL
);
588 reg
= of_get_property(np
, "reg", NULL
);
592 id
= be32_to_cpup(reg
);
598 /* Helper routines for cpu to core mapping */
599 int cpu_core_index_of_thread(int cpu
)
601 return cpu
>> threads_shift
;
603 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread
);
605 int cpu_first_thread_of_core(int core
)
607 return core
<< threads_shift
;
609 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core
);
611 static void traverse_siblings_chip_id(int cpu
, bool add
, int chipid
)
613 const struct cpumask
*mask
;
614 struct device_node
*np
;
618 mask
= add
? cpu_online_mask
: cpu_present_mask
;
619 for_each_cpu(i
, mask
) {
620 np
= of_get_cpu_node(i
, NULL
);
623 prop
= of_get_property(np
, "ibm,chip-id", &plen
);
624 if (prop
&& plen
== sizeof(int) &&
625 of_read_number(prop
, 1) == chipid
) {
627 cpumask_set_cpu(cpu
, cpu_core_mask(i
));
628 cpumask_set_cpu(i
, cpu_core_mask(cpu
));
630 cpumask_clear_cpu(cpu
, cpu_core_mask(i
));
631 cpumask_clear_cpu(i
, cpu_core_mask(cpu
));
638 /* Must be called when no change can occur to cpu_present_mask,
639 * i.e. during cpu online or offline.
641 static struct device_node
*cpu_to_l2cache(int cpu
)
643 struct device_node
*np
;
644 struct device_node
*cache
;
646 if (!cpu_present(cpu
))
649 np
= of_get_cpu_node(cpu
, NULL
);
653 cache
= of_find_next_cache_node(np
);
660 static void traverse_core_siblings(int cpu
, bool add
)
662 struct device_node
*l2_cache
, *np
;
663 const struct cpumask
*mask
;
667 /* First see if we have ibm,chip-id properties in cpu nodes */
668 np
= of_get_cpu_node(cpu
, NULL
);
671 prop
= of_get_property(np
, "ibm,chip-id", &plen
);
672 if (prop
&& plen
== sizeof(int))
673 chip
= of_read_number(prop
, 1);
676 traverse_siblings_chip_id(cpu
, add
, chip
);
681 l2_cache
= cpu_to_l2cache(cpu
);
682 mask
= add
? cpu_online_mask
: cpu_present_mask
;
683 for_each_cpu(i
, mask
) {
684 np
= cpu_to_l2cache(i
);
687 if (np
== l2_cache
) {
689 cpumask_set_cpu(cpu
, cpu_core_mask(i
));
690 cpumask_set_cpu(i
, cpu_core_mask(cpu
));
692 cpumask_clear_cpu(cpu
, cpu_core_mask(i
));
693 cpumask_clear_cpu(i
, cpu_core_mask(cpu
));
698 of_node_put(l2_cache
);
701 /* Activate a secondary processor. */
702 void start_secondary(void *unused
)
704 unsigned int cpu
= smp_processor_id();
707 atomic_inc(&init_mm
.mm_count
);
708 current
->active_mm
= &init_mm
;
710 smp_store_cpu_info(cpu
);
711 set_dec(tb_ticks_per_jiffy
);
713 cpu_callin_map
[cpu
] = 1;
715 if (smp_ops
->setup_cpu
)
716 smp_ops
->setup_cpu(cpu
);
717 if (smp_ops
->take_timebase
)
718 smp_ops
->take_timebase();
720 secondary_cpu_time_init();
723 if (system_state
== SYSTEM_RUNNING
)
724 vdso_data
->processorCount
++;
728 /* Update sibling maps */
729 base
= cpu_first_thread_sibling(cpu
);
730 for (i
= 0; i
< threads_per_core
; i
++) {
731 if (cpu_is_offline(base
+ i
) && (cpu
!= base
+ i
))
733 cpumask_set_cpu(cpu
, cpu_sibling_mask(base
+ i
));
734 cpumask_set_cpu(base
+ i
, cpu_sibling_mask(cpu
));
736 /* cpu_core_map should be a superset of
737 * cpu_sibling_map even if we don't have cache
738 * information, so update the former here, too.
740 cpumask_set_cpu(cpu
, cpu_core_mask(base
+ i
));
741 cpumask_set_cpu(base
+ i
, cpu_core_mask(cpu
));
743 traverse_core_siblings(cpu
, true);
745 set_numa_node(numa_cpu_lookup_table
[cpu
]);
746 set_numa_mem(local_memory_node(numa_cpu_lookup_table
[cpu
]));
749 notify_cpu_starting(cpu
);
750 set_cpu_online(cpu
, true);
754 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE
);
759 int setup_profiling_timer(unsigned int multiplier
)
764 #ifdef CONFIG_SCHED_SMT
765 /* cpumask of CPUs with asymetric SMT dependancy */
766 static int powerpc_smt_flags(void)
768 int flags
= SD_SHARE_CPUCAPACITY
| SD_SHARE_PKG_RESOURCES
;
770 if (cpu_has_feature(CPU_FTR_ASYM_SMT
)) {
771 printk_once(KERN_INFO
"Enabling Asymmetric SMT scheduling\n");
772 flags
|= SD_ASYM_PACKING
;
778 static struct sched_domain_topology_level powerpc_topology
[] = {
779 #ifdef CONFIG_SCHED_SMT
780 { cpu_smt_mask
, powerpc_smt_flags
, SD_INIT_NAME(SMT
) },
782 { cpu_cpu_mask
, SD_INIT_NAME(DIE
) },
786 void __init
smp_cpus_done(unsigned int max_cpus
)
788 cpumask_var_t old_mask
;
790 /* We want the setup_cpu() here to be called from CPU 0, but our
791 * init thread may have been "borrowed" by another CPU in the meantime
792 * se we pin us down to CPU 0 for a short while
794 alloc_cpumask_var(&old_mask
, GFP_NOWAIT
);
795 cpumask_copy(old_mask
, tsk_cpus_allowed(current
));
796 set_cpus_allowed_ptr(current
, cpumask_of(boot_cpuid
));
798 if (smp_ops
&& smp_ops
->setup_cpu
)
799 smp_ops
->setup_cpu(boot_cpuid
);
801 set_cpus_allowed_ptr(current
, old_mask
);
803 free_cpumask_var(old_mask
);
805 if (smp_ops
&& smp_ops
->bringup_done
)
806 smp_ops
->bringup_done();
808 dump_numa_cpu_topology();
810 set_sched_topology(powerpc_topology
);
814 #ifdef CONFIG_HOTPLUG_CPU
815 int __cpu_disable(void)
817 int cpu
= smp_processor_id();
821 if (!smp_ops
->cpu_disable
)
824 err
= smp_ops
->cpu_disable();
828 /* Update sibling maps */
829 base
= cpu_first_thread_sibling(cpu
);
830 for (i
= 0; i
< threads_per_core
; i
++) {
831 cpumask_clear_cpu(cpu
, cpu_sibling_mask(base
+ i
));
832 cpumask_clear_cpu(base
+ i
, cpu_sibling_mask(cpu
));
833 cpumask_clear_cpu(cpu
, cpu_core_mask(base
+ i
));
834 cpumask_clear_cpu(base
+ i
, cpu_core_mask(cpu
));
836 traverse_core_siblings(cpu
, false);
841 void __cpu_die(unsigned int cpu
)
843 if (smp_ops
->cpu_die
)
844 smp_ops
->cpu_die(cpu
);
852 /* If we return, we re-enter start_secondary */
853 start_secondary_resume();