2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 #include <linux/lockdep.h>
23 #include <linux/tick.h>
24 #include <linux/irq.h>
26 #include <trace/events/power.h>
27 #define CREATE_TRACE_POINTS
28 #include <trace/events/cpuhp.h>
33 * cpuhp_cpu_state - Per cpu hotplug state storage
34 * @state: The current cpu state
35 * @target: The target state
37 struct cpuhp_cpu_state
{
38 enum cpuhp_state state
;
39 enum cpuhp_state target
;
42 static DEFINE_PER_CPU(struct cpuhp_cpu_state
, cpuhp_state
);
45 * cpuhp_step - Hotplug state machine step
46 * @name: Name of the step
47 * @startup: Startup function of the step
48 * @teardown: Teardown function of the step
49 * @skip_onerr: Do not invoke the functions on error rollback
50 * Will go away once the notifiers are gone
51 * @cant_stop: Bringup/teardown can't be stopped at this step
55 int (*startup
)(unsigned int cpu
);
56 int (*teardown
)(unsigned int cpu
);
61 static DEFINE_MUTEX(cpuhp_state_mutex
);
62 static struct cpuhp_step cpuhp_bp_states
[];
63 static struct cpuhp_step cpuhp_ap_states
[];
66 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
67 * @cpu: The cpu for which the callback should be invoked
68 * @step: The step in the state machine
69 * @cb: The callback function to invoke
71 * Called from cpu hotplug and from the state register machinery
73 static int cpuhp_invoke_callback(unsigned int cpu
, enum cpuhp_state step
,
74 int (*cb
)(unsigned int))
76 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
80 trace_cpuhp_enter(cpu
, st
->target
, step
, cb
);
82 trace_cpuhp_exit(cpu
, st
->state
, step
, ret
);
88 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
89 static DEFINE_MUTEX(cpu_add_remove_lock
);
90 bool cpuhp_tasks_frozen
;
91 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen
);
94 * The following two APIs (cpu_maps_update_begin/done) must be used when
95 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
96 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
97 * hotplug callback (un)registration performed using __register_cpu_notifier()
98 * or __unregister_cpu_notifier().
100 void cpu_maps_update_begin(void)
102 mutex_lock(&cpu_add_remove_lock
);
104 EXPORT_SYMBOL(cpu_notifier_register_begin
);
106 void cpu_maps_update_done(void)
108 mutex_unlock(&cpu_add_remove_lock
);
110 EXPORT_SYMBOL(cpu_notifier_register_done
);
112 static RAW_NOTIFIER_HEAD(cpu_chain
);
114 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
115 * Should always be manipulated under cpu_add_remove_lock
117 static int cpu_hotplug_disabled
;
119 #ifdef CONFIG_HOTPLUG_CPU
122 struct task_struct
*active_writer
;
123 /* wait queue to wake up the active_writer */
124 wait_queue_head_t wq
;
125 /* verifies that no writer will get active while readers are active */
128 * Also blocks the new readers during
129 * an ongoing cpu hotplug operation.
133 #ifdef CONFIG_DEBUG_LOCK_ALLOC
134 struct lockdep_map dep_map
;
137 .active_writer
= NULL
,
138 .wq
= __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug
.wq
),
139 .lock
= __MUTEX_INITIALIZER(cpu_hotplug
.lock
),
140 #ifdef CONFIG_DEBUG_LOCK_ALLOC
141 .dep_map
= {.name
= "cpu_hotplug.lock" },
145 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
146 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
147 #define cpuhp_lock_acquire_tryread() \
148 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
149 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
150 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
153 void get_online_cpus(void)
156 if (cpu_hotplug
.active_writer
== current
)
158 cpuhp_lock_acquire_read();
159 mutex_lock(&cpu_hotplug
.lock
);
160 atomic_inc(&cpu_hotplug
.refcount
);
161 mutex_unlock(&cpu_hotplug
.lock
);
163 EXPORT_SYMBOL_GPL(get_online_cpus
);
165 void put_online_cpus(void)
169 if (cpu_hotplug
.active_writer
== current
)
172 refcount
= atomic_dec_return(&cpu_hotplug
.refcount
);
173 if (WARN_ON(refcount
< 0)) /* try to fix things up */
174 atomic_inc(&cpu_hotplug
.refcount
);
176 if (refcount
<= 0 && waitqueue_active(&cpu_hotplug
.wq
))
177 wake_up(&cpu_hotplug
.wq
);
179 cpuhp_lock_release();
182 EXPORT_SYMBOL_GPL(put_online_cpus
);
185 * This ensures that the hotplug operation can begin only when the
186 * refcount goes to zero.
188 * Note that during a cpu-hotplug operation, the new readers, if any,
189 * will be blocked by the cpu_hotplug.lock
191 * Since cpu_hotplug_begin() is always called after invoking
192 * cpu_maps_update_begin(), we can be sure that only one writer is active.
194 * Note that theoretically, there is a possibility of a livelock:
195 * - Refcount goes to zero, last reader wakes up the sleeping
197 * - Last reader unlocks the cpu_hotplug.lock.
198 * - A new reader arrives at this moment, bumps up the refcount.
199 * - The writer acquires the cpu_hotplug.lock finds the refcount
200 * non zero and goes to sleep again.
202 * However, this is very difficult to achieve in practice since
203 * get_online_cpus() not an api which is called all that often.
206 void cpu_hotplug_begin(void)
210 cpu_hotplug
.active_writer
= current
;
211 cpuhp_lock_acquire();
214 mutex_lock(&cpu_hotplug
.lock
);
215 prepare_to_wait(&cpu_hotplug
.wq
, &wait
, TASK_UNINTERRUPTIBLE
);
216 if (likely(!atomic_read(&cpu_hotplug
.refcount
)))
218 mutex_unlock(&cpu_hotplug
.lock
);
221 finish_wait(&cpu_hotplug
.wq
, &wait
);
224 void cpu_hotplug_done(void)
226 cpu_hotplug
.active_writer
= NULL
;
227 mutex_unlock(&cpu_hotplug
.lock
);
228 cpuhp_lock_release();
232 * Wait for currently running CPU hotplug operations to complete (if any) and
233 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
234 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
235 * hotplug path before performing hotplug operations. So acquiring that lock
236 * guarantees mutual exclusion from any currently running hotplug operations.
238 void cpu_hotplug_disable(void)
240 cpu_maps_update_begin();
241 cpu_hotplug_disabled
++;
242 cpu_maps_update_done();
244 EXPORT_SYMBOL_GPL(cpu_hotplug_disable
);
246 void cpu_hotplug_enable(void)
248 cpu_maps_update_begin();
249 WARN_ON(--cpu_hotplug_disabled
< 0);
250 cpu_maps_update_done();
252 EXPORT_SYMBOL_GPL(cpu_hotplug_enable
);
253 #endif /* CONFIG_HOTPLUG_CPU */
255 /* Need to know about CPUs going up/down? */
256 int register_cpu_notifier(struct notifier_block
*nb
)
259 cpu_maps_update_begin();
260 ret
= raw_notifier_chain_register(&cpu_chain
, nb
);
261 cpu_maps_update_done();
265 int __register_cpu_notifier(struct notifier_block
*nb
)
267 return raw_notifier_chain_register(&cpu_chain
, nb
);
270 static int __cpu_notify(unsigned long val
, unsigned int cpu
, int nr_to_call
,
273 unsigned long mod
= cpuhp_tasks_frozen
? CPU_TASKS_FROZEN
: 0;
274 void *hcpu
= (void *)(long)cpu
;
278 ret
= __raw_notifier_call_chain(&cpu_chain
, val
| mod
, hcpu
, nr_to_call
,
281 return notifier_to_errno(ret
);
284 static int cpu_notify(unsigned long val
, unsigned int cpu
)
286 return __cpu_notify(val
, cpu
, -1, NULL
);
289 /* Notifier wrappers for transitioning to state machine */
290 static int notify_prepare(unsigned int cpu
)
295 ret
= __cpu_notify(CPU_UP_PREPARE
, cpu
, -1, &nr_calls
);
298 printk(KERN_WARNING
"%s: attempt to bring up CPU %u failed\n",
300 __cpu_notify(CPU_UP_CANCELED
, cpu
, nr_calls
, NULL
);
305 static int notify_online(unsigned int cpu
)
307 cpu_notify(CPU_ONLINE
, cpu
);
311 static int notify_starting(unsigned int cpu
)
313 cpu_notify(CPU_STARTING
, cpu
);
317 static int bringup_cpu(unsigned int cpu
)
319 struct task_struct
*idle
= idle_thread_get(cpu
);
322 /* Arch-specific enabling code. */
323 ret
= __cpu_up(cpu
, idle
);
325 cpu_notify(CPU_UP_CANCELED
, cpu
);
328 BUG_ON(!cpu_online(cpu
));
332 #ifdef CONFIG_HOTPLUG_CPU
333 EXPORT_SYMBOL(register_cpu_notifier
);
334 EXPORT_SYMBOL(__register_cpu_notifier
);
336 void unregister_cpu_notifier(struct notifier_block
*nb
)
338 cpu_maps_update_begin();
339 raw_notifier_chain_unregister(&cpu_chain
, nb
);
340 cpu_maps_update_done();
342 EXPORT_SYMBOL(unregister_cpu_notifier
);
344 void __unregister_cpu_notifier(struct notifier_block
*nb
)
346 raw_notifier_chain_unregister(&cpu_chain
, nb
);
348 EXPORT_SYMBOL(__unregister_cpu_notifier
);
351 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
354 * This function walks all processes, finds a valid mm struct for each one and
355 * then clears a corresponding bit in mm's cpumask. While this all sounds
356 * trivial, there are various non-obvious corner cases, which this function
357 * tries to solve in a safe manner.
359 * Also note that the function uses a somewhat relaxed locking scheme, so it may
360 * be called only for an already offlined CPU.
362 void clear_tasks_mm_cpumask(int cpu
)
364 struct task_struct
*p
;
367 * This function is called after the cpu is taken down and marked
368 * offline, so its not like new tasks will ever get this cpu set in
369 * their mm mask. -- Peter Zijlstra
370 * Thus, we may use rcu_read_lock() here, instead of grabbing
371 * full-fledged tasklist_lock.
373 WARN_ON(cpu_online(cpu
));
375 for_each_process(p
) {
376 struct task_struct
*t
;
379 * Main thread might exit, but other threads may still have
380 * a valid mm. Find one.
382 t
= find_lock_task_mm(p
);
385 cpumask_clear_cpu(cpu
, mm_cpumask(t
->mm
));
391 static inline void check_for_tasks(int dead_cpu
)
393 struct task_struct
*g
, *p
;
395 read_lock(&tasklist_lock
);
396 for_each_process_thread(g
, p
) {
400 * We do the check with unlocked task_rq(p)->lock.
401 * Order the reading to do not warn about a task,
402 * which was running on this cpu in the past, and
403 * it's just been woken on another cpu.
406 if (task_cpu(p
) != dead_cpu
)
409 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
410 p
->comm
, task_pid_nr(p
), dead_cpu
, p
->state
, p
->flags
);
412 read_unlock(&tasklist_lock
);
415 static void cpu_notify_nofail(unsigned long val
, unsigned int cpu
)
417 BUG_ON(cpu_notify(val
, cpu
));
420 static int notify_down_prepare(unsigned int cpu
)
422 int err
, nr_calls
= 0;
424 err
= __cpu_notify(CPU_DOWN_PREPARE
, cpu
, -1, &nr_calls
);
427 __cpu_notify(CPU_DOWN_FAILED
, cpu
, nr_calls
, NULL
);
428 pr_warn("%s: attempt to take down CPU %u failed\n",
434 static int notify_dying(unsigned int cpu
)
436 cpu_notify(CPU_DYING
, cpu
);
440 /* Take this CPU down. */
441 static int take_cpu_down(void *_param
)
443 struct cpuhp_cpu_state
*st
= this_cpu_ptr(&cpuhp_state
);
444 enum cpuhp_state target
= max((int)st
->target
, CPUHP_AP_OFFLINE
);
445 int err
, cpu
= smp_processor_id();
447 /* Ensure this CPU doesn't handle any more interrupts. */
448 err
= __cpu_disable();
452 /* Invoke the former CPU_DYING callbacks */
453 for (; st
->state
> target
; st
->state
--) {
454 struct cpuhp_step
*step
= cpuhp_ap_states
+ st
->state
;
456 cpuhp_invoke_callback(cpu
, st
->state
, step
->teardown
);
458 /* Give up timekeeping duties */
459 tick_handover_do_timer();
460 /* Park the stopper thread */
461 stop_machine_park(cpu
);
465 static int takedown_cpu(unsigned int cpu
)
470 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
471 * and RCU users of this state to go away such that all new such users
474 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
475 * not imply sync_sched(), so wait for both.
477 * Do sync before park smpboot threads to take care the rcu boost case.
479 if (IS_ENABLED(CONFIG_PREEMPT
))
480 synchronize_rcu_mult(call_rcu
, call_rcu_sched
);
484 smpboot_park_threads(cpu
);
487 * Prevent irq alloc/free while the dying cpu reorganizes the
488 * interrupt affinities.
493 * So now all preempt/rcu users must observe !cpu_active().
495 err
= stop_machine(take_cpu_down
, NULL
, cpumask_of(cpu
));
497 /* CPU didn't die: tell everyone. Can't complain. */
498 cpu_notify_nofail(CPU_DOWN_FAILED
, cpu
);
502 BUG_ON(cpu_online(cpu
));
505 * The migration_call() CPU_DYING callback will have removed all
506 * runnable tasks from the cpu, there's only the idle task left now
507 * that the migration thread is done doing the stop_machine thing.
509 * Wait for the stop thread to go away.
511 while (!per_cpu(cpu_dead_idle
, cpu
))
513 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
514 per_cpu(cpu_dead_idle
, cpu
) = false;
516 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
519 hotplug_cpu__broadcast_tick_pull(cpu
);
520 /* This actually kills the CPU. */
523 tick_cleanup_dead_cpu(cpu
);
527 static int notify_dead(unsigned int cpu
)
529 cpu_notify_nofail(CPU_DEAD
, cpu
);
530 check_for_tasks(cpu
);
535 #define notify_down_prepare NULL
536 #define takedown_cpu NULL
537 #define notify_dead NULL
538 #define notify_dying NULL
541 #ifdef CONFIG_HOTPLUG_CPU
542 static void undo_cpu_down(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
544 for (st
->state
++; st
->state
< st
->target
; st
->state
++) {
545 struct cpuhp_step
*step
= cpuhp_bp_states
+ st
->state
;
547 if (!step
->skip_onerr
)
548 cpuhp_invoke_callback(cpu
, st
->state
, step
->startup
);
552 /* Requires cpu_add_remove_lock to be held */
553 static int __ref
_cpu_down(unsigned int cpu
, int tasks_frozen
,
554 enum cpuhp_state target
)
556 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
557 int prev_state
, ret
= 0;
558 bool hasdied
= false;
560 if (num_online_cpus() == 1)
563 if (!cpu_present(cpu
))
568 cpuhp_tasks_frozen
= tasks_frozen
;
570 prev_state
= st
->state
;
572 for (; st
->state
> st
->target
; st
->state
--) {
573 struct cpuhp_step
*step
= cpuhp_bp_states
+ st
->state
;
575 ret
= cpuhp_invoke_callback(cpu
, st
->state
, step
->teardown
);
577 st
->target
= prev_state
;
578 undo_cpu_down(cpu
, st
);
582 hasdied
= prev_state
!= st
->state
&& st
->state
== CPUHP_OFFLINE
;
585 /* This post dead nonsense must die */
587 cpu_notify_nofail(CPU_POST_DEAD
, cpu
);
591 static int do_cpu_down(unsigned int cpu
, enum cpuhp_state target
)
595 cpu_maps_update_begin();
597 if (cpu_hotplug_disabled
) {
602 err
= _cpu_down(cpu
, 0, target
);
605 cpu_maps_update_done();
608 int cpu_down(unsigned int cpu
)
610 return do_cpu_down(cpu
, CPUHP_OFFLINE
);
612 EXPORT_SYMBOL(cpu_down
);
613 #endif /*CONFIG_HOTPLUG_CPU*/
616 * Unpark per-CPU smpboot kthreads at CPU-online time.
618 static int smpboot_thread_call(struct notifier_block
*nfb
,
619 unsigned long action
, void *hcpu
)
621 int cpu
= (long)hcpu
;
623 switch (action
& ~CPU_TASKS_FROZEN
) {
625 case CPU_DOWN_FAILED
:
627 smpboot_unpark_threads(cpu
);
637 static struct notifier_block smpboot_thread_notifier
= {
638 .notifier_call
= smpboot_thread_call
,
639 .priority
= CPU_PRI_SMPBOOT
,
642 void smpboot_thread_init(void)
644 register_cpu_notifier(&smpboot_thread_notifier
);
648 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
649 * @cpu: cpu that just started
651 * This function calls the cpu_chain notifiers with CPU_STARTING.
652 * It must be called by the arch code on the new cpu, before the new cpu
653 * enables interrupts and before the "boot" cpu returns from __cpu_up().
655 void notify_cpu_starting(unsigned int cpu
)
657 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
658 enum cpuhp_state target
= min((int)st
->target
, CPUHP_AP_ONLINE
);
660 while (st
->state
< target
) {
661 struct cpuhp_step
*step
;
664 step
= cpuhp_ap_states
+ st
->state
;
665 cpuhp_invoke_callback(cpu
, st
->state
, step
->startup
);
670 * Called from the idle task. We need to set active here, so we can kick off
671 * the stopper thread.
673 static int cpuhp_set_cpu_active(unsigned int cpu
)
675 /* The cpu is marked online, set it active now */
676 set_cpu_active(cpu
, true);
677 /* Unpark the stopper thread */
678 stop_machine_unpark(cpu
);
682 static void undo_cpu_up(unsigned int cpu
, struct cpuhp_cpu_state
*st
)
684 for (st
->state
--; st
->state
> st
->target
; st
->state
--) {
685 struct cpuhp_step
*step
= cpuhp_bp_states
+ st
->state
;
687 if (!step
->skip_onerr
)
688 cpuhp_invoke_callback(cpu
, st
->state
, step
->teardown
);
692 /* Requires cpu_add_remove_lock to be held */
693 static int _cpu_up(unsigned int cpu
, int tasks_frozen
, enum cpuhp_state target
)
695 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
696 struct task_struct
*idle
;
697 int prev_state
, ret
= 0;
701 if (!cpu_present(cpu
)) {
707 * The caller of do_cpu_up might have raced with another
708 * caller. Ignore it for now.
710 if (st
->state
>= target
)
713 if (st
->state
== CPUHP_OFFLINE
) {
714 /* Let it fail before we try to bring the cpu up */
715 idle
= idle_thread_get(cpu
);
722 cpuhp_tasks_frozen
= tasks_frozen
;
724 prev_state
= st
->state
;
726 while (st
->state
< st
->target
) {
727 struct cpuhp_step
*step
;
730 step
= cpuhp_bp_states
+ st
->state
;
731 ret
= cpuhp_invoke_callback(cpu
, st
->state
, step
->startup
);
733 st
->target
= prev_state
;
734 undo_cpu_up(cpu
, st
);
743 static int do_cpu_up(unsigned int cpu
, enum cpuhp_state target
)
747 if (!cpu_possible(cpu
)) {
748 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
750 #if defined(CONFIG_IA64)
751 pr_err("please check additional_cpus= boot parameter\n");
756 err
= try_online_node(cpu_to_node(cpu
));
760 cpu_maps_update_begin();
762 if (cpu_hotplug_disabled
) {
767 err
= _cpu_up(cpu
, 0, target
);
769 cpu_maps_update_done();
773 int cpu_up(unsigned int cpu
)
775 return do_cpu_up(cpu
, CPUHP_ONLINE
);
777 EXPORT_SYMBOL_GPL(cpu_up
);
779 #ifdef CONFIG_PM_SLEEP_SMP
780 static cpumask_var_t frozen_cpus
;
782 int disable_nonboot_cpus(void)
784 int cpu
, first_cpu
, error
= 0;
786 cpu_maps_update_begin();
787 first_cpu
= cpumask_first(cpu_online_mask
);
789 * We take down all of the non-boot CPUs in one shot to avoid races
790 * with the userspace trying to use the CPU hotplug at the same time
792 cpumask_clear(frozen_cpus
);
794 pr_info("Disabling non-boot CPUs ...\n");
795 for_each_online_cpu(cpu
) {
796 if (cpu
== first_cpu
)
798 trace_suspend_resume(TPS("CPU_OFF"), cpu
, true);
799 error
= _cpu_down(cpu
, 1, CPUHP_OFFLINE
);
800 trace_suspend_resume(TPS("CPU_OFF"), cpu
, false);
802 cpumask_set_cpu(cpu
, frozen_cpus
);
804 pr_err("Error taking CPU%d down: %d\n", cpu
, error
);
810 BUG_ON(num_online_cpus() > 1);
812 pr_err("Non-boot CPUs are not disabled\n");
815 * Make sure the CPUs won't be enabled by someone else. We need to do
816 * this even in case of failure as all disable_nonboot_cpus() users are
817 * supposed to do enable_nonboot_cpus() on the failure path.
819 cpu_hotplug_disabled
++;
821 cpu_maps_update_done();
825 void __weak
arch_enable_nonboot_cpus_begin(void)
829 void __weak
arch_enable_nonboot_cpus_end(void)
833 void enable_nonboot_cpus(void)
837 /* Allow everyone to use the CPU hotplug again */
838 cpu_maps_update_begin();
839 WARN_ON(--cpu_hotplug_disabled
< 0);
840 if (cpumask_empty(frozen_cpus
))
843 pr_info("Enabling non-boot CPUs ...\n");
845 arch_enable_nonboot_cpus_begin();
847 for_each_cpu(cpu
, frozen_cpus
) {
848 trace_suspend_resume(TPS("CPU_ON"), cpu
, true);
849 error
= _cpu_up(cpu
, 1, CPUHP_ONLINE
);
850 trace_suspend_resume(TPS("CPU_ON"), cpu
, false);
852 pr_info("CPU%d is up\n", cpu
);
855 pr_warn("Error taking CPU%d up: %d\n", cpu
, error
);
858 arch_enable_nonboot_cpus_end();
860 cpumask_clear(frozen_cpus
);
862 cpu_maps_update_done();
865 static int __init
alloc_frozen_cpus(void)
867 if (!alloc_cpumask_var(&frozen_cpus
, GFP_KERNEL
|__GFP_ZERO
))
871 core_initcall(alloc_frozen_cpus
);
874 * When callbacks for CPU hotplug notifications are being executed, we must
875 * ensure that the state of the system with respect to the tasks being frozen
876 * or not, as reported by the notification, remains unchanged *throughout the
877 * duration* of the execution of the callbacks.
878 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
880 * This synchronization is implemented by mutually excluding regular CPU
881 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
882 * Hibernate notifications.
885 cpu_hotplug_pm_callback(struct notifier_block
*nb
,
886 unsigned long action
, void *ptr
)
890 case PM_SUSPEND_PREPARE
:
891 case PM_HIBERNATION_PREPARE
:
892 cpu_hotplug_disable();
895 case PM_POST_SUSPEND
:
896 case PM_POST_HIBERNATION
:
897 cpu_hotplug_enable();
908 static int __init
cpu_hotplug_pm_sync_init(void)
911 * cpu_hotplug_pm_callback has higher priority than x86
912 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
913 * to disable cpu hotplug to avoid cpu hotplug race.
915 pm_notifier(cpu_hotplug_pm_callback
, 0);
918 core_initcall(cpu_hotplug_pm_sync_init
);
920 #endif /* CONFIG_PM_SLEEP_SMP */
922 #endif /* CONFIG_SMP */
924 /* Boot processor state steps */
925 static struct cpuhp_step cpuhp_bp_states
[] = {
932 [CPUHP_CREATE_THREADS
]= {
933 .name
= "threads:create",
934 .startup
= smpboot_create_threads
,
938 [CPUHP_NOTIFY_PREPARE
] = {
939 .name
= "notify:prepare",
940 .startup
= notify_prepare
,
941 .teardown
= notify_dead
,
945 [CPUHP_BRINGUP_CPU
] = {
946 .name
= "cpu:bringup",
947 .startup
= bringup_cpu
,
951 [CPUHP_TEARDOWN_CPU
] = {
952 .name
= "cpu:teardown",
954 .teardown
= takedown_cpu
,
957 [CPUHP_CPU_SET_ACTIVE
] = {
958 .name
= "cpu:active",
959 .startup
= cpuhp_set_cpu_active
,
962 [CPUHP_NOTIFY_ONLINE
] = {
963 .name
= "notify:online",
964 .startup
= notify_online
,
965 .teardown
= notify_down_prepare
,
976 /* Application processor state steps */
977 static struct cpuhp_step cpuhp_ap_states
[] = {
979 [CPUHP_AP_NOTIFY_STARTING
] = {
980 .name
= "notify:starting",
981 .startup
= notify_starting
,
982 .teardown
= notify_dying
,
994 /* Sanity check for callbacks */
995 static int cpuhp_cb_check(enum cpuhp_state state
)
997 if (state
<= CPUHP_OFFLINE
|| state
>= CPUHP_ONLINE
)
1002 static bool cpuhp_is_ap_state(enum cpuhp_state state
)
1004 return (state
> CPUHP_AP_OFFLINE
&& state
< CPUHP_AP_ONLINE
);
1007 static struct cpuhp_step
*cpuhp_get_step(enum cpuhp_state state
)
1009 struct cpuhp_step
*sp
;
1011 sp
= cpuhp_is_ap_state(state
) ? cpuhp_ap_states
: cpuhp_bp_states
;
1015 static void cpuhp_store_callbacks(enum cpuhp_state state
,
1017 int (*startup
)(unsigned int cpu
),
1018 int (*teardown
)(unsigned int cpu
))
1020 /* (Un)Install the callbacks for further cpu hotplug operations */
1021 struct cpuhp_step
*sp
;
1023 mutex_lock(&cpuhp_state_mutex
);
1024 sp
= cpuhp_get_step(state
);
1025 sp
->startup
= startup
;
1026 sp
->teardown
= teardown
;
1028 mutex_unlock(&cpuhp_state_mutex
);
1031 static void *cpuhp_get_teardown_cb(enum cpuhp_state state
)
1033 return cpuhp_get_step(state
)->teardown
;
1036 /* Helper function to run callback on the target cpu */
1037 static void cpuhp_on_cpu_cb(void *__cb
)
1039 int (*cb
)(unsigned int cpu
) = __cb
;
1041 BUG_ON(cb(smp_processor_id()));
1045 * Call the startup/teardown function for a step either on the AP or
1046 * on the current CPU.
1048 static int cpuhp_issue_call(int cpu
, enum cpuhp_state state
,
1049 int (*cb
)(unsigned int), bool bringup
)
1057 * This invokes the callback directly for now. In a later step we
1058 * convert that to use cpuhp_invoke_callback().
1060 if (cpuhp_is_ap_state(state
)) {
1062 * Note, that a function called on the AP is not
1065 if (cpu_online(cpu
))
1066 smp_call_function_single(cpu
, cpuhp_on_cpu_cb
, cb
, 1);
1071 * The non AP bound callbacks can fail on bringup. On teardown
1072 * e.g. module removal we crash for now.
1075 BUG_ON(ret
&& !bringup
);
1080 * Called from __cpuhp_setup_state on a recoverable failure.
1082 * Note: The teardown callbacks for rollback are not allowed to fail!
1084 static void cpuhp_rollback_install(int failedcpu
, enum cpuhp_state state
,
1085 int (*teardown
)(unsigned int cpu
))
1092 /* Roll back the already executed steps on the other cpus */
1093 for_each_present_cpu(cpu
) {
1094 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1095 int cpustate
= st
->state
;
1097 if (cpu
>= failedcpu
)
1100 /* Did we invoke the startup call on that cpu ? */
1101 if (cpustate
>= state
)
1102 cpuhp_issue_call(cpu
, state
, teardown
, false);
1107 * Returns a free for dynamic slot assignment of the Online state. The states
1108 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1109 * by having no name assigned.
1111 static int cpuhp_reserve_state(enum cpuhp_state state
)
1115 mutex_lock(&cpuhp_state_mutex
);
1116 for (i
= CPUHP_ONLINE_DYN
; i
<= CPUHP_ONLINE_DYN_END
; i
++) {
1117 if (cpuhp_bp_states
[i
].name
)
1120 cpuhp_bp_states
[i
].name
= "Reserved";
1121 mutex_unlock(&cpuhp_state_mutex
);
1124 mutex_unlock(&cpuhp_state_mutex
);
1125 WARN(1, "No more dynamic states available for CPU hotplug\n");
1130 * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
1131 * @state: The state to setup
1132 * @invoke: If true, the startup function is invoked for cpus where
1133 * cpu state >= @state
1134 * @startup: startup callback function
1135 * @teardown: teardown callback function
1137 * Returns 0 if successful, otherwise a proper error code
1139 int __cpuhp_setup_state(enum cpuhp_state state
,
1140 const char *name
, bool invoke
,
1141 int (*startup
)(unsigned int cpu
),
1142 int (*teardown
)(unsigned int cpu
))
1147 if (cpuhp_cb_check(state
) || !name
)
1152 /* currently assignments for the ONLINE state are possible */
1153 if (state
== CPUHP_ONLINE_DYN
) {
1155 ret
= cpuhp_reserve_state(state
);
1161 cpuhp_store_callbacks(state
, name
, startup
, teardown
);
1163 if (!invoke
|| !startup
)
1167 * Try to call the startup callback for each present cpu
1168 * depending on the hotplug state of the cpu.
1170 for_each_present_cpu(cpu
) {
1171 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1172 int cpustate
= st
->state
;
1174 if (cpustate
< state
)
1177 ret
= cpuhp_issue_call(cpu
, state
, startup
, true);
1179 cpuhp_rollback_install(cpu
, state
, teardown
);
1180 cpuhp_store_callbacks(state
, NULL
, NULL
, NULL
);
1186 if (!ret
&& dyn_state
)
1190 EXPORT_SYMBOL(__cpuhp_setup_state
);
1193 * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
1194 * @state: The state to remove
1195 * @invoke: If true, the teardown function is invoked for cpus where
1196 * cpu state >= @state
1198 * The teardown callback is currently not allowed to fail. Think
1199 * about module removal!
1201 void __cpuhp_remove_state(enum cpuhp_state state
, bool invoke
)
1203 int (*teardown
)(unsigned int cpu
) = cpuhp_get_teardown_cb(state
);
1206 BUG_ON(cpuhp_cb_check(state
));
1210 if (!invoke
|| !teardown
)
1214 * Call the teardown callback for each present cpu depending
1215 * on the hotplug state of the cpu. This function is not
1216 * allowed to fail currently!
1218 for_each_present_cpu(cpu
) {
1219 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, cpu
);
1220 int cpustate
= st
->state
;
1222 if (cpustate
>= state
)
1223 cpuhp_issue_call(cpu
, state
, teardown
, false);
1226 cpuhp_store_callbacks(state
, NULL
, NULL
, NULL
);
1229 EXPORT_SYMBOL(__cpuhp_remove_state
);
1231 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1232 static ssize_t
show_cpuhp_state(struct device
*dev
,
1233 struct device_attribute
*attr
, char *buf
)
1235 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1237 return sprintf(buf
, "%d\n", st
->state
);
1239 static DEVICE_ATTR(state
, 0444, show_cpuhp_state
, NULL
);
1241 static ssize_t
write_cpuhp_target(struct device
*dev
,
1242 struct device_attribute
*attr
,
1243 const char *buf
, size_t count
)
1245 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1246 struct cpuhp_step
*sp
;
1249 ret
= kstrtoint(buf
, 10, &target
);
1253 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1254 if (target
< CPUHP_OFFLINE
|| target
> CPUHP_ONLINE
)
1257 if (target
!= CPUHP_OFFLINE
&& target
!= CPUHP_ONLINE
)
1261 ret
= lock_device_hotplug_sysfs();
1265 mutex_lock(&cpuhp_state_mutex
);
1266 sp
= cpuhp_get_step(target
);
1267 ret
= !sp
->name
|| sp
->cant_stop
? -EINVAL
: 0;
1268 mutex_unlock(&cpuhp_state_mutex
);
1272 if (st
->state
< target
)
1273 ret
= do_cpu_up(dev
->id
, target
);
1275 ret
= do_cpu_down(dev
->id
, target
);
1277 unlock_device_hotplug();
1278 return ret
? ret
: count
;
1281 static ssize_t
show_cpuhp_target(struct device
*dev
,
1282 struct device_attribute
*attr
, char *buf
)
1284 struct cpuhp_cpu_state
*st
= per_cpu_ptr(&cpuhp_state
, dev
->id
);
1286 return sprintf(buf
, "%d\n", st
->target
);
1288 static DEVICE_ATTR(target
, 0644, show_cpuhp_target
, write_cpuhp_target
);
1290 static struct attribute
*cpuhp_cpu_attrs
[] = {
1291 &dev_attr_state
.attr
,
1292 &dev_attr_target
.attr
,
1296 static struct attribute_group cpuhp_cpu_attr_group
= {
1297 .attrs
= cpuhp_cpu_attrs
,
1302 static ssize_t
show_cpuhp_states(struct device
*dev
,
1303 struct device_attribute
*attr
, char *buf
)
1305 ssize_t cur
, res
= 0;
1308 mutex_lock(&cpuhp_state_mutex
);
1309 for (i
= CPUHP_OFFLINE
; i
<= CPUHP_ONLINE
; i
++) {
1310 struct cpuhp_step
*sp
= cpuhp_get_step(i
);
1313 cur
= sprintf(buf
, "%3d: %s\n", i
, sp
->name
);
1318 mutex_unlock(&cpuhp_state_mutex
);
1321 static DEVICE_ATTR(states
, 0444, show_cpuhp_states
, NULL
);
1323 static struct attribute
*cpuhp_cpu_root_attrs
[] = {
1324 &dev_attr_states
.attr
,
1328 static struct attribute_group cpuhp_cpu_root_attr_group
= {
1329 .attrs
= cpuhp_cpu_root_attrs
,
1334 static int __init
cpuhp_sysfs_init(void)
1338 ret
= sysfs_create_group(&cpu_subsys
.dev_root
->kobj
,
1339 &cpuhp_cpu_root_attr_group
);
1343 for_each_possible_cpu(cpu
) {
1344 struct device
*dev
= get_cpu_device(cpu
);
1348 ret
= sysfs_create_group(&dev
->kobj
, &cpuhp_cpu_attr_group
);
1354 device_initcall(cpuhp_sysfs_init
);
1358 * cpu_bit_bitmap[] is a special, "compressed" data structure that
1359 * represents all NR_CPUS bits binary values of 1<<nr.
1361 * It is used by cpumask_of() to get a constant address to a CPU
1362 * mask value that has a single bit set only.
1365 /* cpu_bit_bitmap[0] is empty - so we can back into it */
1366 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
1367 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
1368 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
1369 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
1371 const unsigned long cpu_bit_bitmap
[BITS_PER_LONG
+1][BITS_TO_LONGS(NR_CPUS
)] = {
1373 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
1374 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
1375 #if BITS_PER_LONG > 32
1376 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
1377 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
1380 EXPORT_SYMBOL_GPL(cpu_bit_bitmap
);
1382 const DECLARE_BITMAP(cpu_all_bits
, NR_CPUS
) = CPU_BITS_ALL
;
1383 EXPORT_SYMBOL(cpu_all_bits
);
1385 #ifdef CONFIG_INIT_ALL_POSSIBLE
1386 struct cpumask __cpu_possible_mask __read_mostly
1389 struct cpumask __cpu_possible_mask __read_mostly
;
1391 EXPORT_SYMBOL(__cpu_possible_mask
);
1393 struct cpumask __cpu_online_mask __read_mostly
;
1394 EXPORT_SYMBOL(__cpu_online_mask
);
1396 struct cpumask __cpu_present_mask __read_mostly
;
1397 EXPORT_SYMBOL(__cpu_present_mask
);
1399 struct cpumask __cpu_active_mask __read_mostly
;
1400 EXPORT_SYMBOL(__cpu_active_mask
);
1402 void init_cpu_present(const struct cpumask
*src
)
1404 cpumask_copy(&__cpu_present_mask
, src
);
1407 void init_cpu_possible(const struct cpumask
*src
)
1409 cpumask_copy(&__cpu_possible_mask
, src
);
1412 void init_cpu_online(const struct cpumask
*src
)
1414 cpumask_copy(&__cpu_online_mask
, src
);
1418 * Activate the first processor.
1420 void __init
boot_cpu_init(void)
1422 int cpu
= smp_processor_id();
1424 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
1425 set_cpu_online(cpu
, true);
1426 set_cpu_active(cpu
, true);
1427 set_cpu_present(cpu
, true);
1428 set_cpu_possible(cpu
, true);
1432 * Must be called _AFTER_ setting up the per_cpu areas
1434 void __init
boot_cpu_state_init(void)
1436 per_cpu_ptr(&cpuhp_state
, smp_processor_id())->state
= CPUHP_ONLINE
;