| 1 | #ifndef _LINUX_STOP_MACHINE |
| 2 | #define _LINUX_STOP_MACHINE |
| 3 | |
| 4 | #include <linux/cpu.h> |
| 5 | #include <linux/cpumask.h> |
| 6 | #include <linux/smp.h> |
| 7 | #include <linux/list.h> |
| 8 | |
| 9 | /* |
| 10 | * stop_cpu[s]() is simplistic per-cpu maximum priority cpu |
| 11 | * monopolization mechanism. The caller can specify a non-sleeping |
| 12 | * function to be executed on a single or multiple cpus preempting all |
| 13 | * other processes and monopolizing those cpus until it finishes. |
| 14 | * |
| 15 | * Resources for this mechanism are preallocated when a cpu is brought |
| 16 | * up and requests are guaranteed to be served as long as the target |
| 17 | * cpus are online. |
| 18 | */ |
| 19 | typedef int (*cpu_stop_fn_t)(void *arg); |
| 20 | |
| 21 | #ifdef CONFIG_SMP |
| 22 | |
| 23 | struct cpu_stop_work { |
| 24 | struct list_head list; /* cpu_stopper->works */ |
| 25 | cpu_stop_fn_t fn; |
| 26 | void *arg; |
| 27 | struct cpu_stop_done *done; |
| 28 | }; |
| 29 | |
| 30 | int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg); |
| 31 | int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg); |
| 32 | bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, |
| 33 | struct cpu_stop_work *work_buf); |
| 34 | int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); |
| 35 | int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg); |
| 36 | void stop_machine_park(int cpu); |
| 37 | void stop_machine_unpark(int cpu); |
| 38 | |
| 39 | #else /* CONFIG_SMP */ |
| 40 | |
| 41 | #include <linux/workqueue.h> |
| 42 | |
| 43 | struct cpu_stop_work { |
| 44 | struct work_struct work; |
| 45 | cpu_stop_fn_t fn; |
| 46 | void *arg; |
| 47 | }; |
| 48 | |
| 49 | static inline int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) |
| 50 | { |
| 51 | int ret = -ENOENT; |
| 52 | preempt_disable(); |
| 53 | if (cpu == smp_processor_id()) |
| 54 | ret = fn(arg); |
| 55 | preempt_enable(); |
| 56 | return ret; |
| 57 | } |
| 58 | |
| 59 | static void stop_one_cpu_nowait_workfn(struct work_struct *work) |
| 60 | { |
| 61 | struct cpu_stop_work *stwork = |
| 62 | container_of(work, struct cpu_stop_work, work); |
| 63 | preempt_disable(); |
| 64 | stwork->fn(stwork->arg); |
| 65 | preempt_enable(); |
| 66 | } |
| 67 | |
| 68 | static inline bool stop_one_cpu_nowait(unsigned int cpu, |
| 69 | cpu_stop_fn_t fn, void *arg, |
| 70 | struct cpu_stop_work *work_buf) |
| 71 | { |
| 72 | if (cpu == smp_processor_id()) { |
| 73 | INIT_WORK(&work_buf->work, stop_one_cpu_nowait_workfn); |
| 74 | work_buf->fn = fn; |
| 75 | work_buf->arg = arg; |
| 76 | schedule_work(&work_buf->work); |
| 77 | return true; |
| 78 | } |
| 79 | |
| 80 | return false; |
| 81 | } |
| 82 | |
| 83 | static inline int stop_cpus(const struct cpumask *cpumask, |
| 84 | cpu_stop_fn_t fn, void *arg) |
| 85 | { |
| 86 | if (cpumask_test_cpu(raw_smp_processor_id(), cpumask)) |
| 87 | return stop_one_cpu(raw_smp_processor_id(), fn, arg); |
| 88 | return -ENOENT; |
| 89 | } |
| 90 | |
| 91 | static inline int try_stop_cpus(const struct cpumask *cpumask, |
| 92 | cpu_stop_fn_t fn, void *arg) |
| 93 | { |
| 94 | return stop_cpus(cpumask, fn, arg); |
| 95 | } |
| 96 | |
| 97 | #endif /* CONFIG_SMP */ |
| 98 | |
| 99 | /* |
| 100 | * stop_machine "Bogolock": stop the entire machine, disable |
| 101 | * interrupts. This is a very heavy lock, which is equivalent to |
| 102 | * grabbing every spinlock (and more). So the "read" side to such a |
| 103 | * lock is anything which disables preemption. |
| 104 | */ |
| 105 | #if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU) |
| 106 | |
| 107 | /** |
| 108 | * stop_machine: freeze the machine on all CPUs and run this function |
| 109 | * @fn: the function to run |
| 110 | * @data: the data ptr for the @fn() |
| 111 | * @cpus: the cpus to run the @fn() on (NULL = any online cpu) |
| 112 | * |
| 113 | * Description: This causes a thread to be scheduled on every cpu, |
| 114 | * each of which disables interrupts. The result is that no one is |
| 115 | * holding a spinlock or inside any other preempt-disabled region when |
| 116 | * @fn() runs. |
| 117 | * |
| 118 | * This can be thought of as a very heavy write lock, equivalent to |
| 119 | * grabbing every spinlock in the kernel. */ |
| 120 | int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); |
| 121 | |
| 122 | int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, |
| 123 | const struct cpumask *cpus); |
| 124 | #else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */ |
| 125 | |
| 126 | static inline int stop_machine(cpu_stop_fn_t fn, void *data, |
| 127 | const struct cpumask *cpus) |
| 128 | { |
| 129 | unsigned long flags; |
| 130 | int ret; |
| 131 | local_irq_save(flags); |
| 132 | ret = fn(data); |
| 133 | local_irq_restore(flags); |
| 134 | return ret; |
| 135 | } |
| 136 | |
| 137 | static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, |
| 138 | const struct cpumask *cpus) |
| 139 | { |
| 140 | return stop_machine(fn, data, cpus); |
| 141 | } |
| 142 | |
| 143 | #endif /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */ |
| 144 | #endif /* _LINUX_STOP_MACHINE */ |