modpost: fix module autoloading for OF devices with generic compatible property
[deliverable/linux.git] / kernel / cpu.c
CommitLineData
1da177e4
LT
1/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
10#include <linux/sched.h>
11#include <linux/unistd.h>
12#include <linux/cpu.h>
cb79295e
AV
13#include <linux/oom.h>
14#include <linux/rcupdate.h>
9984de1a 15#include <linux/export.h>
e4cc2f87 16#include <linux/bug.h>
1da177e4
LT
17#include <linux/kthread.h>
18#include <linux/stop_machine.h>
81615b62 19#include <linux/mutex.h>
5a0e3ad6 20#include <linux/gfp.h>
79cfbdfa 21#include <linux/suspend.h>
a19423b9 22#include <linux/lockdep.h>
345527b1 23#include <linux/tick.h>
a8994181 24#include <linux/irq.h>
4cb28ced 25#include <linux/smpboot.h>
cff7d378 26
bb3632c6 27#include <trace/events/power.h>
cff7d378
TG
28#define CREATE_TRACE_POINTS
29#include <trace/events/cpuhp.h>
1da177e4 30
38498a67
TG
31#include "smpboot.h"
32
cff7d378
TG
33/**
34 * cpuhp_cpu_state - Per cpu hotplug state storage
35 * @state: The current cpu state
36 * @target: The target state
4cb28ced
TG
37 * @thread: Pointer to the hotplug thread
38 * @should_run: Thread should execute
3b9d6da6 39 * @rollback: Perform a rollback
4cb28ced
TG
40 * @cb_stat: The state for a single callback (install/uninstall)
41 * @cb: Single callback function (install/uninstall)
42 * @result: Result of the operation
43 * @done: Signal completion to the issuer of the task
cff7d378
TG
44 */
45struct cpuhp_cpu_state {
46 enum cpuhp_state state;
47 enum cpuhp_state target;
4cb28ced
TG
48#ifdef CONFIG_SMP
49 struct task_struct *thread;
50 bool should_run;
3b9d6da6 51 bool rollback;
4cb28ced
TG
52 enum cpuhp_state cb_state;
53 int (*cb)(unsigned int cpu);
54 int result;
55 struct completion done;
56#endif
cff7d378
TG
57};
58
59static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
60
61/**
62 * cpuhp_step - Hotplug state machine step
63 * @name: Name of the step
64 * @startup: Startup function of the step
65 * @teardown: Teardown function of the step
66 * @skip_onerr: Do not invoke the functions on error rollback
67 * Will go away once the notifiers are gone
757c989b 68 * @cant_stop: Bringup/teardown can't be stopped at this step
cff7d378
TG
69 */
70struct cpuhp_step {
71 const char *name;
72 int (*startup)(unsigned int cpu);
73 int (*teardown)(unsigned int cpu);
74 bool skip_onerr;
757c989b 75 bool cant_stop;
cff7d378
TG
76};
77
98f8cdce 78static DEFINE_MUTEX(cpuhp_state_mutex);
cff7d378 79static struct cpuhp_step cpuhp_bp_states[];
4baa0afc 80static struct cpuhp_step cpuhp_ap_states[];
cff7d378
TG
81
82/**
83 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
84 * @cpu: The cpu for which the callback should be invoked
85 * @step: The step in the state machine
86 * @cb: The callback function to invoke
87 *
88 * Called from cpu hotplug and from the state register machinery
89 */
90static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state step,
91 int (*cb)(unsigned int))
92{
93 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
94 int ret = 0;
95
96 if (cb) {
97 trace_cpuhp_enter(cpu, st->target, step, cb);
98 ret = cb(cpu);
99 trace_cpuhp_exit(cpu, st->state, step, ret);
100 }
101 return ret;
102}
103
98a79d6a 104#ifdef CONFIG_SMP
b3199c02 105/* Serializes the updates to cpu_online_mask, cpu_present_mask */
aa953877 106static DEFINE_MUTEX(cpu_add_remove_lock);
090e77c3
TG
107bool cpuhp_tasks_frozen;
108EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
1da177e4 109
79a6cdeb 110/*
93ae4f97
SB
111 * The following two APIs (cpu_maps_update_begin/done) must be used when
112 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
113 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
114 * hotplug callback (un)registration performed using __register_cpu_notifier()
115 * or __unregister_cpu_notifier().
79a6cdeb
LJ
116 */
117void cpu_maps_update_begin(void)
118{
119 mutex_lock(&cpu_add_remove_lock);
120}
93ae4f97 121EXPORT_SYMBOL(cpu_notifier_register_begin);
79a6cdeb
LJ
122
123void cpu_maps_update_done(void)
124{
125 mutex_unlock(&cpu_add_remove_lock);
126}
93ae4f97 127EXPORT_SYMBOL(cpu_notifier_register_done);
79a6cdeb 128
5c113fbe 129static RAW_NOTIFIER_HEAD(cpu_chain);
1da177e4 130
e3920fb4
RW
131/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
132 * Should always be manipulated under cpu_add_remove_lock
133 */
134static int cpu_hotplug_disabled;
135
79a6cdeb
LJ
136#ifdef CONFIG_HOTPLUG_CPU
137
d221938c
GS
138static struct {
139 struct task_struct *active_writer;
87af9e7f
DH
140 /* wait queue to wake up the active_writer */
141 wait_queue_head_t wq;
142 /* verifies that no writer will get active while readers are active */
143 struct mutex lock;
d221938c
GS
144 /*
145 * Also blocks the new readers during
146 * an ongoing cpu hotplug operation.
147 */
87af9e7f 148 atomic_t refcount;
a19423b9
GS
149
150#ifdef CONFIG_DEBUG_LOCK_ALLOC
151 struct lockdep_map dep_map;
152#endif
31950eb6
LT
153} cpu_hotplug = {
154 .active_writer = NULL,
87af9e7f 155 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
31950eb6 156 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
a19423b9
GS
157#ifdef CONFIG_DEBUG_LOCK_ALLOC
158 .dep_map = {.name = "cpu_hotplug.lock" },
159#endif
31950eb6 160};
d221938c 161
a19423b9
GS
162/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
163#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
dd56af42
PM
164#define cpuhp_lock_acquire_tryread() \
165 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
a19423b9
GS
166#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
167#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
168
62db99f4 169
86ef5c9a 170void get_online_cpus(void)
a9d9baa1 171{
d221938c
GS
172 might_sleep();
173 if (cpu_hotplug.active_writer == current)
aa953877 174 return;
a19423b9 175 cpuhp_lock_acquire_read();
d221938c 176 mutex_lock(&cpu_hotplug.lock);
87af9e7f 177 atomic_inc(&cpu_hotplug.refcount);
d221938c 178 mutex_unlock(&cpu_hotplug.lock);
a9d9baa1 179}
86ef5c9a 180EXPORT_SYMBOL_GPL(get_online_cpus);
90d45d17 181
86ef5c9a 182void put_online_cpus(void)
a9d9baa1 183{
87af9e7f
DH
184 int refcount;
185
d221938c 186 if (cpu_hotplug.active_writer == current)
aa953877 187 return;
075663d1 188
87af9e7f
DH
189 refcount = atomic_dec_return(&cpu_hotplug.refcount);
190 if (WARN_ON(refcount < 0)) /* try to fix things up */
191 atomic_inc(&cpu_hotplug.refcount);
192
193 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
194 wake_up(&cpu_hotplug.wq);
075663d1 195
a19423b9 196 cpuhp_lock_release();
d221938c 197
a9d9baa1 198}
86ef5c9a 199EXPORT_SYMBOL_GPL(put_online_cpus);
a9d9baa1 200
d221938c
GS
201/*
202 * This ensures that the hotplug operation can begin only when the
203 * refcount goes to zero.
204 *
205 * Note that during a cpu-hotplug operation, the new readers, if any,
206 * will be blocked by the cpu_hotplug.lock
207 *
d2ba7e2a
ON
208 * Since cpu_hotplug_begin() is always called after invoking
209 * cpu_maps_update_begin(), we can be sure that only one writer is active.
d221938c
GS
210 *
211 * Note that theoretically, there is a possibility of a livelock:
212 * - Refcount goes to zero, last reader wakes up the sleeping
213 * writer.
214 * - Last reader unlocks the cpu_hotplug.lock.
215 * - A new reader arrives at this moment, bumps up the refcount.
216 * - The writer acquires the cpu_hotplug.lock finds the refcount
217 * non zero and goes to sleep again.
218 *
219 * However, this is very difficult to achieve in practice since
86ef5c9a 220 * get_online_cpus() not an api which is called all that often.
d221938c
GS
221 *
222 */
b9d10be7 223void cpu_hotplug_begin(void)
d221938c 224{
87af9e7f 225 DEFINE_WAIT(wait);
d2ba7e2a 226
87af9e7f 227 cpu_hotplug.active_writer = current;
a19423b9 228 cpuhp_lock_acquire();
87af9e7f 229
d2ba7e2a
ON
230 for (;;) {
231 mutex_lock(&cpu_hotplug.lock);
87af9e7f
DH
232 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
233 if (likely(!atomic_read(&cpu_hotplug.refcount)))
234 break;
d221938c
GS
235 mutex_unlock(&cpu_hotplug.lock);
236 schedule();
d221938c 237 }
87af9e7f 238 finish_wait(&cpu_hotplug.wq, &wait);
d221938c
GS
239}
240
b9d10be7 241void cpu_hotplug_done(void)
d221938c
GS
242{
243 cpu_hotplug.active_writer = NULL;
244 mutex_unlock(&cpu_hotplug.lock);
a19423b9 245 cpuhp_lock_release();
d221938c 246}
79a6cdeb 247
16e53dbf
SB
248/*
249 * Wait for currently running CPU hotplug operations to complete (if any) and
250 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
251 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
252 * hotplug path before performing hotplug operations. So acquiring that lock
253 * guarantees mutual exclusion from any currently running hotplug operations.
254 */
255void cpu_hotplug_disable(void)
256{
257 cpu_maps_update_begin();
89af7ba5 258 cpu_hotplug_disabled++;
16e53dbf
SB
259 cpu_maps_update_done();
260}
32145c46 261EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
16e53dbf
SB
262
263void cpu_hotplug_enable(void)
264{
265 cpu_maps_update_begin();
89af7ba5 266 WARN_ON(--cpu_hotplug_disabled < 0);
16e53dbf
SB
267 cpu_maps_update_done();
268}
32145c46 269EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
b9d10be7 270#endif /* CONFIG_HOTPLUG_CPU */
79a6cdeb 271
1da177e4 272/* Need to know about CPUs going up/down? */
71cf5aee 273int register_cpu_notifier(struct notifier_block *nb)
1da177e4 274{
bd5349cf 275 int ret;
d221938c 276 cpu_maps_update_begin();
bd5349cf 277 ret = raw_notifier_chain_register(&cpu_chain, nb);
d221938c 278 cpu_maps_update_done();
bd5349cf 279 return ret;
1da177e4 280}
65edc68c 281
71cf5aee 282int __register_cpu_notifier(struct notifier_block *nb)
93ae4f97
SB
283{
284 return raw_notifier_chain_register(&cpu_chain, nb);
285}
286
090e77c3 287static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
e9fb7631
AM
288 int *nr_calls)
289{
090e77c3
TG
290 unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
291 void *hcpu = (void *)(long)cpu;
292
e6bde73b
AM
293 int ret;
294
090e77c3 295 ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
e9fb7631 296 nr_calls);
e6bde73b
AM
297
298 return notifier_to_errno(ret);
e9fb7631
AM
299}
300
090e77c3 301static int cpu_notify(unsigned long val, unsigned int cpu)
e9fb7631 302{
090e77c3 303 return __cpu_notify(val, cpu, -1, NULL);
e9fb7631
AM
304}
305
3b9d6da6
SAS
306static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
307{
308 BUG_ON(cpu_notify(val, cpu));
309}
310
ba997462
TG
311/* Notifier wrappers for transitioning to state machine */
312static int notify_prepare(unsigned int cpu)
313{
314 int nr_calls = 0;
315 int ret;
316
317 ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
318 if (ret) {
319 nr_calls--;
320 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
321 __func__, cpu);
322 __cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
323 }
324 return ret;
325}
326
327static int notify_online(unsigned int cpu)
328{
329 cpu_notify(CPU_ONLINE, cpu);
330 return 0;
331}
332
4baa0afc
TG
333static int notify_starting(unsigned int cpu)
334{
335 cpu_notify(CPU_STARTING, cpu);
336 return 0;
337}
338
8df3e07e
TG
339static int bringup_wait_for_ap(unsigned int cpu)
340{
341 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
342
343 wait_for_completion(&st->done);
344 return st->result;
345}
346
ba997462
TG
347static int bringup_cpu(unsigned int cpu)
348{
349 struct task_struct *idle = idle_thread_get(cpu);
350 int ret;
351
352 /* Arch-specific enabling code. */
353 ret = __cpu_up(cpu, idle);
354 if (ret) {
355 cpu_notify(CPU_UP_CANCELED, cpu);
356 return ret;
357 }
8df3e07e 358 ret = bringup_wait_for_ap(cpu);
ba997462 359 BUG_ON(!cpu_online(cpu));
8df3e07e 360 return ret;
ba997462
TG
361}
362
2e1a3483
TG
363/*
364 * Hotplug state machine related functions
365 */
366static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st,
367 struct cpuhp_step *steps)
368{
369 for (st->state++; st->state < st->target; st->state++) {
370 struct cpuhp_step *step = steps + st->state;
371
372 if (!step->skip_onerr)
373 cpuhp_invoke_callback(cpu, st->state, step->startup);
374 }
375}
376
377static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
378 struct cpuhp_step *steps, enum cpuhp_state target)
379{
380 enum cpuhp_state prev_state = st->state;
381 int ret = 0;
382
383 for (; st->state > target; st->state--) {
384 struct cpuhp_step *step = steps + st->state;
385
386 ret = cpuhp_invoke_callback(cpu, st->state, step->teardown);
387 if (ret) {
388 st->target = prev_state;
389 undo_cpu_down(cpu, st, steps);
390 break;
391 }
392 }
393 return ret;
394}
395
396static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st,
397 struct cpuhp_step *steps)
398{
399 for (st->state--; st->state > st->target; st->state--) {
400 struct cpuhp_step *step = steps + st->state;
401
402 if (!step->skip_onerr)
403 cpuhp_invoke_callback(cpu, st->state, step->teardown);
404 }
405}
406
407static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
408 struct cpuhp_step *steps, enum cpuhp_state target)
409{
410 enum cpuhp_state prev_state = st->state;
411 int ret = 0;
412
413 while (st->state < target) {
414 struct cpuhp_step *step;
415
416 st->state++;
417 step = steps + st->state;
418 ret = cpuhp_invoke_callback(cpu, st->state, step->startup);
419 if (ret) {
420 st->target = prev_state;
421 undo_cpu_up(cpu, st, steps);
422 break;
423 }
424 }
425 return ret;
426}
427
4cb28ced
TG
428/*
429 * The cpu hotplug threads manage the bringup and teardown of the cpus
430 */
431static void cpuhp_create(unsigned int cpu)
432{
433 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
434
435 init_completion(&st->done);
436}
437
438static int cpuhp_should_run(unsigned int cpu)
439{
440 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
441
442 return st->should_run;
443}
444
445/* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
446static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st)
447{
1cf4f629 448 enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU);
4cb28ced
TG
449
450 return cpuhp_down_callbacks(cpu, st, cpuhp_ap_states, target);
451}
452
453/* Execute the online startup callbacks. Used to be CPU_ONLINE */
454static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st)
455{
456 return cpuhp_up_callbacks(cpu, st, cpuhp_ap_states, st->target);
457}
458
459/*
460 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
461 * callbacks when a state gets [un]installed at runtime.
462 */
463static void cpuhp_thread_fun(unsigned int cpu)
464{
465 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
466 int ret = 0;
467
468 /*
469 * Paired with the mb() in cpuhp_kick_ap_work and
470 * cpuhp_invoke_ap_callback, so the work set is consistent visible.
471 */
472 smp_mb();
473 if (!st->should_run)
474 return;
475
476 st->should_run = false;
477
478 /* Single callback invocation for [un]install ? */
479 if (st->cb) {
480 if (st->cb_state < CPUHP_AP_ONLINE) {
481 local_irq_disable();
482 ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb);
483 local_irq_enable();
484 } else {
485 ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb);
486 }
3b9d6da6
SAS
487 } else if (st->rollback) {
488 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
489
490 undo_cpu_down(cpu, st, cpuhp_ap_states);
491 /*
492 * This is a momentary workaround to keep the notifier users
493 * happy. Will go away once we got rid of the notifiers.
494 */
495 cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
496 st->rollback = false;
4cb28ced 497 } else {
1cf4f629 498 /* Cannot happen .... */
8df3e07e 499 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
1cf4f629 500
4cb28ced
TG
501 /* Regular hotplug work */
502 if (st->state < st->target)
503 ret = cpuhp_ap_online(cpu, st);
504 else if (st->state > st->target)
505 ret = cpuhp_ap_offline(cpu, st);
506 }
507 st->result = ret;
508 complete(&st->done);
509}
510
511/* Invoke a single callback on a remote cpu */
512static int cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state,
513 int (*cb)(unsigned int))
514{
515 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
516
517 if (!cpu_online(cpu))
518 return 0;
519
520 st->cb_state = state;
521 st->cb = cb;
522 /*
523 * Make sure the above stores are visible before should_run becomes
524 * true. Paired with the mb() above in cpuhp_thread_fun()
525 */
526 smp_mb();
527 st->should_run = true;
528 wake_up_process(st->thread);
529 wait_for_completion(&st->done);
530 return st->result;
531}
532
533/* Regular hotplug invocation of the AP hotplug thread */
1cf4f629 534static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st)
4cb28ced 535{
4cb28ced
TG
536 st->result = 0;
537 st->cb = NULL;
538 /*
539 * Make sure the above stores are visible before should_run becomes
540 * true. Paired with the mb() above in cpuhp_thread_fun()
541 */
542 smp_mb();
543 st->should_run = true;
544 wake_up_process(st->thread);
1cf4f629
TG
545}
546
547static int cpuhp_kick_ap_work(unsigned int cpu)
548{
549 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
550 enum cpuhp_state state = st->state;
551
552 trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
553 __cpuhp_kick_ap_work(st);
4cb28ced
TG
554 wait_for_completion(&st->done);
555 trace_cpuhp_exit(cpu, st->state, state, st->result);
556 return st->result;
557}
558
559static struct smp_hotplug_thread cpuhp_threads = {
560 .store = &cpuhp_state.thread,
561 .create = &cpuhp_create,
562 .thread_should_run = cpuhp_should_run,
563 .thread_fn = cpuhp_thread_fun,
564 .thread_comm = "cpuhp/%u",
565 .selfparking = true,
566};
567
568void __init cpuhp_threads_init(void)
569{
570 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
571 kthread_unpark(this_cpu_read(cpuhp_state.thread));
572}
573
00b9b0af 574#ifdef CONFIG_HOTPLUG_CPU
1da177e4 575EXPORT_SYMBOL(register_cpu_notifier);
93ae4f97 576EXPORT_SYMBOL(__register_cpu_notifier);
71cf5aee 577void unregister_cpu_notifier(struct notifier_block *nb)
1da177e4 578{
d221938c 579 cpu_maps_update_begin();
bd5349cf 580 raw_notifier_chain_unregister(&cpu_chain, nb);
d221938c 581 cpu_maps_update_done();
1da177e4
LT
582}
583EXPORT_SYMBOL(unregister_cpu_notifier);
584
71cf5aee 585void __unregister_cpu_notifier(struct notifier_block *nb)
93ae4f97
SB
586{
587 raw_notifier_chain_unregister(&cpu_chain, nb);
588}
589EXPORT_SYMBOL(__unregister_cpu_notifier);
590
e4cc2f87
AV
591/**
592 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
593 * @cpu: a CPU id
594 *
595 * This function walks all processes, finds a valid mm struct for each one and
596 * then clears a corresponding bit in mm's cpumask. While this all sounds
597 * trivial, there are various non-obvious corner cases, which this function
598 * tries to solve in a safe manner.
599 *
600 * Also note that the function uses a somewhat relaxed locking scheme, so it may
601 * be called only for an already offlined CPU.
602 */
cb79295e
AV
603void clear_tasks_mm_cpumask(int cpu)
604{
605 struct task_struct *p;
606
607 /*
608 * This function is called after the cpu is taken down and marked
609 * offline, so its not like new tasks will ever get this cpu set in
610 * their mm mask. -- Peter Zijlstra
611 * Thus, we may use rcu_read_lock() here, instead of grabbing
612 * full-fledged tasklist_lock.
613 */
e4cc2f87 614 WARN_ON(cpu_online(cpu));
cb79295e
AV
615 rcu_read_lock();
616 for_each_process(p) {
617 struct task_struct *t;
618
e4cc2f87
AV
619 /*
620 * Main thread might exit, but other threads may still have
621 * a valid mm. Find one.
622 */
cb79295e
AV
623 t = find_lock_task_mm(p);
624 if (!t)
625 continue;
626 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
627 task_unlock(t);
628 }
629 rcu_read_unlock();
630}
631
b728ca06 632static inline void check_for_tasks(int dead_cpu)
1da177e4 633{
b728ca06 634 struct task_struct *g, *p;
1da177e4 635
a75a6068
ON
636 read_lock(&tasklist_lock);
637 for_each_process_thread(g, p) {
b728ca06
KT
638 if (!p->on_rq)
639 continue;
640 /*
641 * We do the check with unlocked task_rq(p)->lock.
642 * Order the reading to do not warn about a task,
643 * which was running on this cpu in the past, and
644 * it's just been woken on another cpu.
645 */
646 rmb();
647 if (task_cpu(p) != dead_cpu)
648 continue;
649
650 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
651 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
a75a6068
ON
652 }
653 read_unlock(&tasklist_lock);
1da177e4
LT
654}
655
98458172
TG
656static int notify_down_prepare(unsigned int cpu)
657{
658 int err, nr_calls = 0;
659
660 err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
661 if (err) {
662 nr_calls--;
663 __cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
664 pr_warn("%s: attempt to take down CPU %u failed\n",
665 __func__, cpu);
666 }
667 return err;
668}
669
4baa0afc
TG
670static int notify_dying(unsigned int cpu)
671{
672 cpu_notify(CPU_DYING, cpu);
673 return 0;
674}
675
1da177e4 676/* Take this CPU down. */
71cf5aee 677static int take_cpu_down(void *_param)
1da177e4 678{
4baa0afc
TG
679 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
680 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
090e77c3 681 int err, cpu = smp_processor_id();
1da177e4 682
1da177e4
LT
683 /* Ensure this CPU doesn't handle any more interrupts. */
684 err = __cpu_disable();
685 if (err < 0)
f3705136 686 return err;
1da177e4 687
4baa0afc
TG
688 /* Invoke the former CPU_DYING callbacks */
689 for (; st->state > target; st->state--) {
690 struct cpuhp_step *step = cpuhp_ap_states + st->state;
691
692 cpuhp_invoke_callback(cpu, st->state, step->teardown);
693 }
52c063d1
TG
694 /* Give up timekeeping duties */
695 tick_handover_do_timer();
14e568e7 696 /* Park the stopper thread */
090e77c3 697 stop_machine_park(cpu);
f3705136 698 return 0;
1da177e4
LT
699}
700
98458172 701static int takedown_cpu(unsigned int cpu)
1da177e4 702{
e69aab13 703 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
98458172 704 int err;
1da177e4 705
6acce3ef
PZ
706 /*
707 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
708 * and RCU users of this state to go away such that all new such users
709 * will observe it.
710 *
711 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
779de6ce 712 * not imply sync_sched(), so wait for both.
106dd5af
M
713 *
714 * Do sync before park smpboot threads to take care the rcu boost case.
6acce3ef 715 */
779de6ce
PM
716 if (IS_ENABLED(CONFIG_PREEMPT))
717 synchronize_rcu_mult(call_rcu, call_rcu_sched);
718 else
719 synchronize_rcu();
6acce3ef 720
2a58c527 721 /* Park the smpboot threads */
1cf4f629 722 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
2a58c527 723 smpboot_park_threads(cpu);
1cf4f629 724
6acce3ef 725 /*
a8994181
TG
726 * Prevent irq alloc/free while the dying cpu reorganizes the
727 * interrupt affinities.
6acce3ef 728 */
a8994181 729 irq_lock_sparse();
6acce3ef 730
a8994181
TG
731 /*
732 * So now all preempt/rcu users must observe !cpu_active().
733 */
090e77c3 734 err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
04321587 735 if (err) {
3b9d6da6 736 /* CPU refused to die */
a8994181 737 irq_unlock_sparse();
3b9d6da6
SAS
738 /* Unpark the hotplug thread so we can rollback there */
739 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
98458172 740 return err;
8fa1d7d3 741 }
04321587 742 BUG_ON(cpu_online(cpu));
1da177e4 743
48c5ccae
PZ
744 /*
745 * The migration_call() CPU_DYING callback will have removed all
746 * runnable tasks from the cpu, there's only the idle task left now
747 * that the migration thread is done doing the stop_machine thing.
51a96c77
PZ
748 *
749 * Wait for the stop thread to go away.
48c5ccae 750 */
e69aab13
TG
751 wait_for_completion(&st->done);
752 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
1da177e4 753
a8994181
TG
754 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
755 irq_unlock_sparse();
756
345527b1 757 hotplug_cpu__broadcast_tick_pull(cpu);
1da177e4
LT
758 /* This actually kills the CPU. */
759 __cpu_die(cpu);
760
a49b116d 761 tick_cleanup_dead_cpu(cpu);
98458172
TG
762 return 0;
763}
1da177e4 764
98458172
TG
765static int notify_dead(unsigned int cpu)
766{
767 cpu_notify_nofail(CPU_DEAD, cpu);
1da177e4 768 check_for_tasks(cpu);
98458172
TG
769 return 0;
770}
771
71f87b2f
TG
772static void cpuhp_complete_idle_dead(void *arg)
773{
774 struct cpuhp_cpu_state *st = arg;
775
776 complete(&st->done);
777}
778
e69aab13
TG
779void cpuhp_report_idle_dead(void)
780{
781 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
782
783 BUG_ON(st->state != CPUHP_AP_OFFLINE);
27d50c7e 784 rcu_report_dead(smp_processor_id());
71f87b2f
TG
785 st->state = CPUHP_AP_IDLE_DEAD;
786 /*
787 * We cannot call complete after rcu_report_dead() so we delegate it
788 * to an online cpu.
789 */
790 smp_call_function_single(cpumask_first(cpu_online_mask),
791 cpuhp_complete_idle_dead, st, 0);
e69aab13
TG
792}
793
cff7d378
TG
794#else
795#define notify_down_prepare NULL
796#define takedown_cpu NULL
797#define notify_dead NULL
4baa0afc 798#define notify_dying NULL
cff7d378
TG
799#endif
800
801#ifdef CONFIG_HOTPLUG_CPU
cff7d378 802
98458172 803/* Requires cpu_add_remove_lock to be held */
af1f4045
TG
804static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
805 enum cpuhp_state target)
98458172 806{
cff7d378
TG
807 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
808 int prev_state, ret = 0;
809 bool hasdied = false;
98458172
TG
810
811 if (num_online_cpus() == 1)
812 return -EBUSY;
813
757c989b 814 if (!cpu_present(cpu))
98458172
TG
815 return -EINVAL;
816
817 cpu_hotplug_begin();
818
819 cpuhp_tasks_frozen = tasks_frozen;
820
cff7d378 821 prev_state = st->state;
af1f4045 822 st->target = target;
1cf4f629
TG
823 /*
824 * If the current CPU state is in the range of the AP hotplug thread,
825 * then we need to kick the thread.
826 */
8df3e07e 827 if (st->state > CPUHP_TEARDOWN_CPU) {
1cf4f629
TG
828 ret = cpuhp_kick_ap_work(cpu);
829 /*
830 * The AP side has done the error rollback already. Just
831 * return the error code..
832 */
833 if (ret)
834 goto out;
835
836 /*
837 * We might have stopped still in the range of the AP hotplug
838 * thread. Nothing to do anymore.
839 */
8df3e07e 840 if (st->state > CPUHP_TEARDOWN_CPU)
1cf4f629
TG
841 goto out;
842 }
843 /*
8df3e07e 844 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1cf4f629
TG
845 * to do the further cleanups.
846 */
2e1a3483 847 ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target);
3b9d6da6
SAS
848 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
849 st->target = prev_state;
850 st->rollback = true;
851 cpuhp_kick_ap_work(cpu);
852 }
98458172 853
cff7d378 854 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
1cf4f629 855out:
d221938c 856 cpu_hotplug_done();
cff7d378
TG
857 /* This post dead nonsense must die */
858 if (!ret && hasdied)
090e77c3 859 cpu_notify_nofail(CPU_POST_DEAD, cpu);
cff7d378 860 return ret;
e3920fb4
RW
861}
862
af1f4045 863static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
e3920fb4 864{
9ea09af3 865 int err;
e3920fb4 866
d221938c 867 cpu_maps_update_begin();
e761b772
MK
868
869 if (cpu_hotplug_disabled) {
e3920fb4 870 err = -EBUSY;
e761b772
MK
871 goto out;
872 }
873
af1f4045 874 err = _cpu_down(cpu, 0, target);
e3920fb4 875
e761b772 876out:
d221938c 877 cpu_maps_update_done();
1da177e4
LT
878 return err;
879}
af1f4045
TG
880int cpu_down(unsigned int cpu)
881{
882 return do_cpu_down(cpu, CPUHP_OFFLINE);
883}
b62b8ef9 884EXPORT_SYMBOL(cpu_down);
1da177e4
LT
885#endif /*CONFIG_HOTPLUG_CPU*/
886
4baa0afc
TG
887/**
888 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
889 * @cpu: cpu that just started
890 *
891 * This function calls the cpu_chain notifiers with CPU_STARTING.
892 * It must be called by the arch code on the new cpu, before the new cpu
893 * enables interrupts and before the "boot" cpu returns from __cpu_up().
894 */
895void notify_cpu_starting(unsigned int cpu)
896{
897 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
898 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
899
900 while (st->state < target) {
901 struct cpuhp_step *step;
902
903 st->state++;
904 step = cpuhp_ap_states + st->state;
905 cpuhp_invoke_callback(cpu, st->state, step->startup);
906 }
907}
908
949338e3
TG
909/*
910 * Called from the idle task. We need to set active here, so we can kick off
8df3e07e
TG
911 * the stopper thread and unpark the smpboot threads. If the target state is
912 * beyond CPUHP_AP_ONLINE_IDLE we kick cpuhp thread and let it bring up the
913 * cpu further.
949338e3 914 */
8df3e07e 915void cpuhp_online_idle(enum cpuhp_state state)
949338e3 916{
8df3e07e
TG
917 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
918 unsigned int cpu = smp_processor_id();
919
920 /* Happens for the boot cpu */
921 if (state != CPUHP_AP_ONLINE_IDLE)
922 return;
923
924 st->state = CPUHP_AP_ONLINE_IDLE;
1cf4f629 925
949338e3
TG
926 /* The cpu is marked online, set it active now */
927 set_cpu_active(cpu, true);
8df3e07e 928 /* Unpark the stopper thread and the hotplug thread of this cpu */
949338e3 929 stop_machine_unpark(cpu);
1cf4f629 930 kthread_unpark(st->thread);
8df3e07e
TG
931
932 /* Should we go further up ? */
933 if (st->target > CPUHP_AP_ONLINE_IDLE)
934 __cpuhp_kick_ap_work(st);
935 else
936 complete(&st->done);
949338e3
TG
937}
938
e3920fb4 939/* Requires cpu_add_remove_lock to be held */
af1f4045 940static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1da177e4 941{
cff7d378 942 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
3bb5d2ee 943 struct task_struct *idle;
2e1a3483 944 int ret = 0;
1da177e4 945
d221938c 946 cpu_hotplug_begin();
38498a67 947
757c989b 948 if (!cpu_present(cpu)) {
5e5041f3
YI
949 ret = -EINVAL;
950 goto out;
951 }
952
757c989b
TG
953 /*
954 * The caller of do_cpu_up might have raced with another
955 * caller. Ignore it for now.
956 */
957 if (st->state >= target)
38498a67 958 goto out;
757c989b
TG
959
960 if (st->state == CPUHP_OFFLINE) {
961 /* Let it fail before we try to bring the cpu up */
962 idle = idle_thread_get(cpu);
963 if (IS_ERR(idle)) {
964 ret = PTR_ERR(idle);
965 goto out;
966 }
3bb5d2ee 967 }
38498a67 968
ba997462
TG
969 cpuhp_tasks_frozen = tasks_frozen;
970
af1f4045 971 st->target = target;
1cf4f629
TG
972 /*
973 * If the current CPU state is in the range of the AP hotplug thread,
974 * then we need to kick the thread once more.
975 */
8df3e07e 976 if (st->state > CPUHP_BRINGUP_CPU) {
1cf4f629
TG
977 ret = cpuhp_kick_ap_work(cpu);
978 /*
979 * The AP side has done the error rollback already. Just
980 * return the error code..
981 */
982 if (ret)
983 goto out;
984 }
985
986 /*
987 * Try to reach the target state. We max out on the BP at
8df3e07e 988 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1cf4f629
TG
989 * responsible for bringing it up to the target state.
990 */
8df3e07e 991 target = min((int)target, CPUHP_BRINGUP_CPU);
2e1a3483 992 ret = cpuhp_up_callbacks(cpu, st, cpuhp_bp_states, target);
38498a67 993out:
d221938c 994 cpu_hotplug_done();
e3920fb4
RW
995 return ret;
996}
997
af1f4045 998static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
e3920fb4
RW
999{
1000 int err = 0;
cf23422b 1001
e0b582ec 1002 if (!cpu_possible(cpu)) {
84117da5
FF
1003 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1004 cpu);
87d5e023 1005#if defined(CONFIG_IA64)
84117da5 1006 pr_err("please check additional_cpus= boot parameter\n");
73e753a5
KH
1007#endif
1008 return -EINVAL;
1009 }
e3920fb4 1010
01b0f197
TK
1011 err = try_online_node(cpu_to_node(cpu));
1012 if (err)
1013 return err;
cf23422b 1014
d221938c 1015 cpu_maps_update_begin();
e761b772
MK
1016
1017 if (cpu_hotplug_disabled) {
e3920fb4 1018 err = -EBUSY;
e761b772
MK
1019 goto out;
1020 }
1021
af1f4045 1022 err = _cpu_up(cpu, 0, target);
e761b772 1023out:
d221938c 1024 cpu_maps_update_done();
e3920fb4
RW
1025 return err;
1026}
af1f4045
TG
1027
1028int cpu_up(unsigned int cpu)
1029{
1030 return do_cpu_up(cpu, CPUHP_ONLINE);
1031}
a513f6ba 1032EXPORT_SYMBOL_GPL(cpu_up);
e3920fb4 1033
f3de4be9 1034#ifdef CONFIG_PM_SLEEP_SMP
e0b582ec 1035static cpumask_var_t frozen_cpus;
e3920fb4
RW
1036
1037int disable_nonboot_cpus(void)
1038{
e9a5f426 1039 int cpu, first_cpu, error = 0;
e3920fb4 1040
d221938c 1041 cpu_maps_update_begin();
e0b582ec 1042 first_cpu = cpumask_first(cpu_online_mask);
9ee349ad
XF
1043 /*
1044 * We take down all of the non-boot CPUs in one shot to avoid races
e3920fb4
RW
1045 * with the userspace trying to use the CPU hotplug at the same time
1046 */
e0b582ec 1047 cpumask_clear(frozen_cpus);
6ad4c188 1048
84117da5 1049 pr_info("Disabling non-boot CPUs ...\n");
e3920fb4
RW
1050 for_each_online_cpu(cpu) {
1051 if (cpu == first_cpu)
1052 continue;
bb3632c6 1053 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
af1f4045 1054 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
bb3632c6 1055 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
feae3203 1056 if (!error)
e0b582ec 1057 cpumask_set_cpu(cpu, frozen_cpus);
feae3203 1058 else {
84117da5 1059 pr_err("Error taking CPU%d down: %d\n", cpu, error);
e3920fb4
RW
1060 break;
1061 }
1062 }
86886e55 1063
89af7ba5 1064 if (!error)
e3920fb4 1065 BUG_ON(num_online_cpus() > 1);
89af7ba5 1066 else
84117da5 1067 pr_err("Non-boot CPUs are not disabled\n");
89af7ba5
VK
1068
1069 /*
1070 * Make sure the CPUs won't be enabled by someone else. We need to do
1071 * this even in case of failure as all disable_nonboot_cpus() users are
1072 * supposed to do enable_nonboot_cpus() on the failure path.
1073 */
1074 cpu_hotplug_disabled++;
1075
d221938c 1076 cpu_maps_update_done();
e3920fb4
RW
1077 return error;
1078}
1079
d0af9eed
SS
1080void __weak arch_enable_nonboot_cpus_begin(void)
1081{
1082}
1083
1084void __weak arch_enable_nonboot_cpus_end(void)
1085{
1086}
1087
71cf5aee 1088void enable_nonboot_cpus(void)
e3920fb4
RW
1089{
1090 int cpu, error;
1091
1092 /* Allow everyone to use the CPU hotplug again */
d221938c 1093 cpu_maps_update_begin();
89af7ba5 1094 WARN_ON(--cpu_hotplug_disabled < 0);
e0b582ec 1095 if (cpumask_empty(frozen_cpus))
1d64b9cb 1096 goto out;
e3920fb4 1097
84117da5 1098 pr_info("Enabling non-boot CPUs ...\n");
d0af9eed
SS
1099
1100 arch_enable_nonboot_cpus_begin();
1101
e0b582ec 1102 for_each_cpu(cpu, frozen_cpus) {
bb3632c6 1103 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
af1f4045 1104 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
bb3632c6 1105 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
e3920fb4 1106 if (!error) {
84117da5 1107 pr_info("CPU%d is up\n", cpu);
e3920fb4
RW
1108 continue;
1109 }
84117da5 1110 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
e3920fb4 1111 }
d0af9eed
SS
1112
1113 arch_enable_nonboot_cpus_end();
1114
e0b582ec 1115 cpumask_clear(frozen_cpus);
1d64b9cb 1116out:
d221938c 1117 cpu_maps_update_done();
1da177e4 1118}
e0b582ec 1119
d7268a31 1120static int __init alloc_frozen_cpus(void)
e0b582ec
RR
1121{
1122 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1123 return -ENOMEM;
1124 return 0;
1125}
1126core_initcall(alloc_frozen_cpus);
79cfbdfa 1127
79cfbdfa
SB
1128/*
1129 * When callbacks for CPU hotplug notifications are being executed, we must
1130 * ensure that the state of the system with respect to the tasks being frozen
1131 * or not, as reported by the notification, remains unchanged *throughout the
1132 * duration* of the execution of the callbacks.
1133 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1134 *
1135 * This synchronization is implemented by mutually excluding regular CPU
1136 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1137 * Hibernate notifications.
1138 */
1139static int
1140cpu_hotplug_pm_callback(struct notifier_block *nb,
1141 unsigned long action, void *ptr)
1142{
1143 switch (action) {
1144
1145 case PM_SUSPEND_PREPARE:
1146 case PM_HIBERNATION_PREPARE:
16e53dbf 1147 cpu_hotplug_disable();
79cfbdfa
SB
1148 break;
1149
1150 case PM_POST_SUSPEND:
1151 case PM_POST_HIBERNATION:
16e53dbf 1152 cpu_hotplug_enable();
79cfbdfa
SB
1153 break;
1154
1155 default:
1156 return NOTIFY_DONE;
1157 }
1158
1159 return NOTIFY_OK;
1160}
1161
1162
d7268a31 1163static int __init cpu_hotplug_pm_sync_init(void)
79cfbdfa 1164{
6e32d479
FY
1165 /*
1166 * cpu_hotplug_pm_callback has higher priority than x86
1167 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1168 * to disable cpu hotplug to avoid cpu hotplug race.
1169 */
79cfbdfa
SB
1170 pm_notifier(cpu_hotplug_pm_callback, 0);
1171 return 0;
1172}
1173core_initcall(cpu_hotplug_pm_sync_init);
1174
f3de4be9 1175#endif /* CONFIG_PM_SLEEP_SMP */
68f4f1ec
MK
1176
1177#endif /* CONFIG_SMP */
b8d317d1 1178
cff7d378
TG
1179/* Boot processor state steps */
1180static struct cpuhp_step cpuhp_bp_states[] = {
1181 [CPUHP_OFFLINE] = {
1182 .name = "offline",
1183 .startup = NULL,
1184 .teardown = NULL,
1185 },
1186#ifdef CONFIG_SMP
1187 [CPUHP_CREATE_THREADS]= {
1188 .name = "threads:create",
1189 .startup = smpboot_create_threads,
1190 .teardown = NULL,
757c989b 1191 .cant_stop = true,
cff7d378 1192 },
d10ef6f9
TG
1193 /*
1194 * Preparatory and dead notifiers. Will be replaced once the notifiers
1195 * are converted to states.
1196 */
cff7d378
TG
1197 [CPUHP_NOTIFY_PREPARE] = {
1198 .name = "notify:prepare",
1199 .startup = notify_prepare,
1200 .teardown = notify_dead,
1201 .skip_onerr = true,
757c989b 1202 .cant_stop = true,
cff7d378 1203 },
d10ef6f9 1204 /* Kicks the plugged cpu into life */
cff7d378
TG
1205 [CPUHP_BRINGUP_CPU] = {
1206 .name = "cpu:bringup",
1207 .startup = bringup_cpu,
4baa0afc 1208 .teardown = NULL,
757c989b 1209 .cant_stop = true,
4baa0afc 1210 },
d10ef6f9
TG
1211 /*
1212 * Handled on controll processor until the plugged processor manages
1213 * this itself.
1214 */
4baa0afc
TG
1215 [CPUHP_TEARDOWN_CPU] = {
1216 .name = "cpu:teardown",
1217 .startup = NULL,
cff7d378 1218 .teardown = takedown_cpu,
757c989b 1219 .cant_stop = true,
cff7d378 1220 },
cff7d378 1221#endif
cff7d378
TG
1222};
1223
4baa0afc
TG
1224/* Application processor state steps */
1225static struct cpuhp_step cpuhp_ap_states[] = {
1226#ifdef CONFIG_SMP
d10ef6f9
TG
1227 /* Final state before CPU kills itself */
1228 [CPUHP_AP_IDLE_DEAD] = {
1229 .name = "idle:dead",
1230 },
1231 /*
1232 * Last state before CPU enters the idle loop to die. Transient state
1233 * for synchronization.
1234 */
1235 [CPUHP_AP_OFFLINE] = {
1236 .name = "ap:offline",
1237 .cant_stop = true,
1238 },
1239 /*
1240 * Low level startup/teardown notifiers. Run with interrupts
1241 * disabled. Will be removed once the notifiers are converted to
1242 * states.
1243 */
4baa0afc
TG
1244 [CPUHP_AP_NOTIFY_STARTING] = {
1245 .name = "notify:starting",
1246 .startup = notify_starting,
1247 .teardown = notify_dying,
1248 .skip_onerr = true,
757c989b 1249 .cant_stop = true,
4baa0afc 1250 },
d10ef6f9
TG
1251 /* Entry state on starting. Interrupts enabled from here on. Transient
1252 * state for synchronsization */
1253 [CPUHP_AP_ONLINE] = {
1254 .name = "ap:online",
1255 },
1256 /* Handle smpboot threads park/unpark */
1cf4f629
TG
1257 [CPUHP_AP_SMPBOOT_THREADS] = {
1258 .name = "smpboot:threads",
1259 .startup = smpboot_unpark_threads,
2a58c527 1260 .teardown = NULL,
1cf4f629 1261 },
d10ef6f9
TG
1262 /*
1263 * Online/down_prepare notifiers. Will be removed once the notifiers
1264 * are converted to states.
1265 */
1cf4f629
TG
1266 [CPUHP_AP_NOTIFY_ONLINE] = {
1267 .name = "notify:online",
1268 .startup = notify_online,
1269 .teardown = notify_down_prepare,
3b9d6da6 1270 .skip_onerr = true,
1cf4f629 1271 },
4baa0afc 1272#endif
d10ef6f9
TG
1273 /*
1274 * The dynamically registered state space is here
1275 */
1276
1277 /* CPU is fully up and running. */
4baa0afc
TG
1278 [CPUHP_ONLINE] = {
1279 .name = "online",
1280 .startup = NULL,
1281 .teardown = NULL,
1282 },
1283};
1284
5b7aa87e
TG
1285/* Sanity check for callbacks */
1286static int cpuhp_cb_check(enum cpuhp_state state)
1287{
1288 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1289 return -EINVAL;
1290 return 0;
1291}
1292
98f8cdce
TG
1293static bool cpuhp_is_ap_state(enum cpuhp_state state)
1294{
d10ef6f9
TG
1295 /*
1296 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
1297 * purposes as that state is handled explicitely in cpu_down.
1298 */
1299 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
98f8cdce
TG
1300}
1301
1302static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
1303{
1304 struct cpuhp_step *sp;
1305
1306 sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
1307 return sp + state;
1308}
1309
5b7aa87e
TG
1310static void cpuhp_store_callbacks(enum cpuhp_state state,
1311 const char *name,
1312 int (*startup)(unsigned int cpu),
1313 int (*teardown)(unsigned int cpu))
1314{
1315 /* (Un)Install the callbacks for further cpu hotplug operations */
1316 struct cpuhp_step *sp;
1317
1318 mutex_lock(&cpuhp_state_mutex);
1319 sp = cpuhp_get_step(state);
1320 sp->startup = startup;
1321 sp->teardown = teardown;
1322 sp->name = name;
1323 mutex_unlock(&cpuhp_state_mutex);
1324}
1325
1326static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1327{
1328 return cpuhp_get_step(state)->teardown;
1329}
1330
5b7aa87e
TG
1331/*
1332 * Call the startup/teardown function for a step either on the AP or
1333 * on the current CPU.
1334 */
1335static int cpuhp_issue_call(int cpu, enum cpuhp_state state,
1336 int (*cb)(unsigned int), bool bringup)
1337{
1338 int ret;
1339
1340 if (!cb)
1341 return 0;
5b7aa87e
TG
1342 /*
1343 * The non AP bound callbacks can fail on bringup. On teardown
1344 * e.g. module removal we crash for now.
1345 */
1cf4f629
TG
1346#ifdef CONFIG_SMP
1347 if (cpuhp_is_ap_state(state))
1348 ret = cpuhp_invoke_ap_callback(cpu, state, cb);
1349 else
1350 ret = cpuhp_invoke_callback(cpu, state, cb);
1351#else
1352 ret = cpuhp_invoke_callback(cpu, state, cb);
1353#endif
5b7aa87e
TG
1354 BUG_ON(ret && !bringup);
1355 return ret;
1356}
1357
1358/*
1359 * Called from __cpuhp_setup_state on a recoverable failure.
1360 *
1361 * Note: The teardown callbacks for rollback are not allowed to fail!
1362 */
1363static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1364 int (*teardown)(unsigned int cpu))
1365{
1366 int cpu;
1367
1368 if (!teardown)
1369 return;
1370
1371 /* Roll back the already executed steps on the other cpus */
1372 for_each_present_cpu(cpu) {
1373 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1374 int cpustate = st->state;
1375
1376 if (cpu >= failedcpu)
1377 break;
1378
1379 /* Did we invoke the startup call on that cpu ? */
1380 if (cpustate >= state)
1381 cpuhp_issue_call(cpu, state, teardown, false);
1382 }
1383}
1384
1385/*
1386 * Returns a free for dynamic slot assignment of the Online state. The states
1387 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1388 * by having no name assigned.
1389 */
1390static int cpuhp_reserve_state(enum cpuhp_state state)
1391{
1392 enum cpuhp_state i;
1393
1394 mutex_lock(&cpuhp_state_mutex);
1cf4f629
TG
1395 for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
1396 if (cpuhp_ap_states[i].name)
5b7aa87e
TG
1397 continue;
1398
1cf4f629 1399 cpuhp_ap_states[i].name = "Reserved";
5b7aa87e
TG
1400 mutex_unlock(&cpuhp_state_mutex);
1401 return i;
1402 }
1403 mutex_unlock(&cpuhp_state_mutex);
1404 WARN(1, "No more dynamic states available for CPU hotplug\n");
1405 return -ENOSPC;
1406}
1407
1408/**
1409 * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
1410 * @state: The state to setup
1411 * @invoke: If true, the startup function is invoked for cpus where
1412 * cpu state >= @state
1413 * @startup: startup callback function
1414 * @teardown: teardown callback function
1415 *
1416 * Returns 0 if successful, otherwise a proper error code
1417 */
1418int __cpuhp_setup_state(enum cpuhp_state state,
1419 const char *name, bool invoke,
1420 int (*startup)(unsigned int cpu),
1421 int (*teardown)(unsigned int cpu))
1422{
1423 int cpu, ret = 0;
1424 int dyn_state = 0;
1425
1426 if (cpuhp_cb_check(state) || !name)
1427 return -EINVAL;
1428
1429 get_online_cpus();
1430
1431 /* currently assignments for the ONLINE state are possible */
1cf4f629 1432 if (state == CPUHP_AP_ONLINE_DYN) {
5b7aa87e
TG
1433 dyn_state = 1;
1434 ret = cpuhp_reserve_state(state);
1435 if (ret < 0)
1436 goto out;
1437 state = ret;
1438 }
1439
1440 cpuhp_store_callbacks(state, name, startup, teardown);
1441
1442 if (!invoke || !startup)
1443 goto out;
1444
1445 /*
1446 * Try to call the startup callback for each present cpu
1447 * depending on the hotplug state of the cpu.
1448 */
1449 for_each_present_cpu(cpu) {
1450 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1451 int cpustate = st->state;
1452
1453 if (cpustate < state)
1454 continue;
1455
1456 ret = cpuhp_issue_call(cpu, state, startup, true);
1457 if (ret) {
1458 cpuhp_rollback_install(cpu, state, teardown);
1459 cpuhp_store_callbacks(state, NULL, NULL, NULL);
1460 goto out;
1461 }
1462 }
1463out:
1464 put_online_cpus();
1465 if (!ret && dyn_state)
1466 return state;
1467 return ret;
1468}
1469EXPORT_SYMBOL(__cpuhp_setup_state);
1470
1471/**
1472 * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
1473 * @state: The state to remove
1474 * @invoke: If true, the teardown function is invoked for cpus where
1475 * cpu state >= @state
1476 *
1477 * The teardown callback is currently not allowed to fail. Think
1478 * about module removal!
1479 */
1480void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1481{
1482 int (*teardown)(unsigned int cpu) = cpuhp_get_teardown_cb(state);
1483 int cpu;
1484
1485 BUG_ON(cpuhp_cb_check(state));
1486
1487 get_online_cpus();
1488
1489 if (!invoke || !teardown)
1490 goto remove;
1491
1492 /*
1493 * Call the teardown callback for each present cpu depending
1494 * on the hotplug state of the cpu. This function is not
1495 * allowed to fail currently!
1496 */
1497 for_each_present_cpu(cpu) {
1498 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1499 int cpustate = st->state;
1500
1501 if (cpustate >= state)
1502 cpuhp_issue_call(cpu, state, teardown, false);
1503 }
1504remove:
1505 cpuhp_store_callbacks(state, NULL, NULL, NULL);
1506 put_online_cpus();
1507}
1508EXPORT_SYMBOL(__cpuhp_remove_state);
1509
98f8cdce
TG
1510#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1511static ssize_t show_cpuhp_state(struct device *dev,
1512 struct device_attribute *attr, char *buf)
1513{
1514 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1515
1516 return sprintf(buf, "%d\n", st->state);
1517}
1518static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1519
757c989b
TG
1520static ssize_t write_cpuhp_target(struct device *dev,
1521 struct device_attribute *attr,
1522 const char *buf, size_t count)
1523{
1524 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1525 struct cpuhp_step *sp;
1526 int target, ret;
1527
1528 ret = kstrtoint(buf, 10, &target);
1529 if (ret)
1530 return ret;
1531
1532#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1533 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1534 return -EINVAL;
1535#else
1536 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1537 return -EINVAL;
1538#endif
1539
1540 ret = lock_device_hotplug_sysfs();
1541 if (ret)
1542 return ret;
1543
1544 mutex_lock(&cpuhp_state_mutex);
1545 sp = cpuhp_get_step(target);
1546 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1547 mutex_unlock(&cpuhp_state_mutex);
1548 if (ret)
1549 return ret;
1550
1551 if (st->state < target)
1552 ret = do_cpu_up(dev->id, target);
1553 else
1554 ret = do_cpu_down(dev->id, target);
1555
1556 unlock_device_hotplug();
1557 return ret ? ret : count;
1558}
1559
98f8cdce
TG
1560static ssize_t show_cpuhp_target(struct device *dev,
1561 struct device_attribute *attr, char *buf)
1562{
1563 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1564
1565 return sprintf(buf, "%d\n", st->target);
1566}
757c989b 1567static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
98f8cdce
TG
1568
1569static struct attribute *cpuhp_cpu_attrs[] = {
1570 &dev_attr_state.attr,
1571 &dev_attr_target.attr,
1572 NULL
1573};
1574
1575static struct attribute_group cpuhp_cpu_attr_group = {
1576 .attrs = cpuhp_cpu_attrs,
1577 .name = "hotplug",
1578 NULL
1579};
1580
1581static ssize_t show_cpuhp_states(struct device *dev,
1582 struct device_attribute *attr, char *buf)
1583{
1584 ssize_t cur, res = 0;
1585 int i;
1586
1587 mutex_lock(&cpuhp_state_mutex);
757c989b 1588 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
98f8cdce
TG
1589 struct cpuhp_step *sp = cpuhp_get_step(i);
1590
1591 if (sp->name) {
1592 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
1593 buf += cur;
1594 res += cur;
1595 }
1596 }
1597 mutex_unlock(&cpuhp_state_mutex);
1598 return res;
1599}
1600static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
1601
1602static struct attribute *cpuhp_cpu_root_attrs[] = {
1603 &dev_attr_states.attr,
1604 NULL
1605};
1606
1607static struct attribute_group cpuhp_cpu_root_attr_group = {
1608 .attrs = cpuhp_cpu_root_attrs,
1609 .name = "hotplug",
1610 NULL
1611};
1612
1613static int __init cpuhp_sysfs_init(void)
1614{
1615 int cpu, ret;
1616
1617 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
1618 &cpuhp_cpu_root_attr_group);
1619 if (ret)
1620 return ret;
1621
1622 for_each_possible_cpu(cpu) {
1623 struct device *dev = get_cpu_device(cpu);
1624
1625 if (!dev)
1626 continue;
1627 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
1628 if (ret)
1629 return ret;
1630 }
1631 return 0;
1632}
1633device_initcall(cpuhp_sysfs_init);
1634#endif
1635
e56b3bc7
LT
1636/*
1637 * cpu_bit_bitmap[] is a special, "compressed" data structure that
1638 * represents all NR_CPUS bits binary values of 1<<nr.
1639 *
e0b582ec 1640 * It is used by cpumask_of() to get a constant address to a CPU
e56b3bc7
LT
1641 * mask value that has a single bit set only.
1642 */
b8d317d1 1643
e56b3bc7 1644/* cpu_bit_bitmap[0] is empty - so we can back into it */
4d51985e 1645#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
e56b3bc7
LT
1646#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
1647#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
1648#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
b8d317d1 1649
e56b3bc7
LT
1650const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
1651
1652 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
1653 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
1654#if BITS_PER_LONG > 32
1655 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
1656 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
b8d317d1
MT
1657#endif
1658};
e56b3bc7 1659EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2d3854a3
RR
1660
1661const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
1662EXPORT_SYMBOL(cpu_all_bits);
b3199c02
RR
1663
1664#ifdef CONFIG_INIT_ALL_POSSIBLE
4b804c85 1665struct cpumask __cpu_possible_mask __read_mostly
c4c54dd1 1666 = {CPU_BITS_ALL};
b3199c02 1667#else
4b804c85 1668struct cpumask __cpu_possible_mask __read_mostly;
b3199c02 1669#endif
4b804c85 1670EXPORT_SYMBOL(__cpu_possible_mask);
b3199c02 1671
4b804c85
RV
1672struct cpumask __cpu_online_mask __read_mostly;
1673EXPORT_SYMBOL(__cpu_online_mask);
b3199c02 1674
4b804c85
RV
1675struct cpumask __cpu_present_mask __read_mostly;
1676EXPORT_SYMBOL(__cpu_present_mask);
b3199c02 1677
4b804c85
RV
1678struct cpumask __cpu_active_mask __read_mostly;
1679EXPORT_SYMBOL(__cpu_active_mask);
3fa41520 1680
3fa41520
RR
1681void init_cpu_present(const struct cpumask *src)
1682{
c4c54dd1 1683 cpumask_copy(&__cpu_present_mask, src);
3fa41520
RR
1684}
1685
1686void init_cpu_possible(const struct cpumask *src)
1687{
c4c54dd1 1688 cpumask_copy(&__cpu_possible_mask, src);
3fa41520
RR
1689}
1690
1691void init_cpu_online(const struct cpumask *src)
1692{
c4c54dd1 1693 cpumask_copy(&__cpu_online_mask, src);
3fa41520 1694}
cff7d378
TG
1695
1696/*
1697 * Activate the first processor.
1698 */
1699void __init boot_cpu_init(void)
1700{
1701 int cpu = smp_processor_id();
1702
1703 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
1704 set_cpu_online(cpu, true);
1705 set_cpu_active(cpu, true);
1706 set_cpu_present(cpu, true);
1707 set_cpu_possible(cpu, true);
1708}
1709
1710/*
1711 * Must be called _AFTER_ setting up the per_cpu areas
1712 */
1713void __init boot_cpu_state_init(void)
1714{
1715 per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
1716}
This page took 0.784418 seconds and 5 git commands to generate.