Commit | Line | Data |
---|---|---|
3d442233 JA |
1 | /* |
2 | * Generic helpers for smp ipi calls | |
3 | * | |
4 | * (C) Jens Axboe <jens.axboe@oracle.com> 2008 | |
5 | * | |
6 | */ | |
7 | #include <linux/init.h> | |
8 | #include <linux/module.h> | |
9 | #include <linux/percpu.h> | |
10 | #include <linux/rcupdate.h> | |
59190f42 | 11 | #include <linux/rculist.h> |
3d442233 JA |
12 | #include <linux/smp.h> |
13 | ||
14 | static DEFINE_PER_CPU(struct call_single_queue, call_single_queue); | |
15 | static LIST_HEAD(call_function_queue); | |
16 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock); | |
17 | ||
18 | enum { | |
19 | CSD_FLAG_WAIT = 0x01, | |
20 | CSD_FLAG_ALLOC = 0x02, | |
21 | }; | |
22 | ||
23 | struct call_function_data { | |
24 | struct call_single_data csd; | |
25 | spinlock_t lock; | |
26 | unsigned int refs; | |
27 | cpumask_t cpumask; | |
28 | struct rcu_head rcu_head; | |
29 | }; | |
30 | ||
31 | struct call_single_queue { | |
32 | struct list_head list; | |
33 | spinlock_t lock; | |
34 | }; | |
35 | ||
7babe8db | 36 | static int __cpuinit init_call_single_data(void) |
3d442233 JA |
37 | { |
38 | int i; | |
39 | ||
40 | for_each_possible_cpu(i) { | |
41 | struct call_single_queue *q = &per_cpu(call_single_queue, i); | |
42 | ||
43 | spin_lock_init(&q->lock); | |
44 | INIT_LIST_HEAD(&q->list); | |
45 | } | |
7babe8db | 46 | return 0; |
3d442233 | 47 | } |
7babe8db | 48 | early_initcall(init_call_single_data); |
3d442233 JA |
49 | |
50 | static void csd_flag_wait(struct call_single_data *data) | |
51 | { | |
52 | /* Wait for response */ | |
53 | do { | |
54 | /* | |
55 | * We need to see the flags store in the IPI handler | |
56 | */ | |
57 | smp_mb(); | |
58 | if (!(data->flags & CSD_FLAG_WAIT)) | |
59 | break; | |
60 | cpu_relax(); | |
61 | } while (1); | |
62 | } | |
63 | ||
64 | /* | |
65 | * Insert a previously allocated call_single_data element for execution | |
66 | * on the given CPU. data must already have ->func, ->info, and ->flags set. | |
67 | */ | |
68 | static void generic_exec_single(int cpu, struct call_single_data *data) | |
69 | { | |
70 | struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); | |
71 | int wait = data->flags & CSD_FLAG_WAIT, ipi; | |
72 | unsigned long flags; | |
73 | ||
74 | spin_lock_irqsave(&dst->lock, flags); | |
75 | ipi = list_empty(&dst->list); | |
76 | list_add_tail(&data->list, &dst->list); | |
77 | spin_unlock_irqrestore(&dst->lock, flags); | |
78 | ||
79 | if (ipi) | |
80 | arch_send_call_function_single_ipi(cpu); | |
81 | ||
82 | if (wait) | |
83 | csd_flag_wait(data); | |
84 | } | |
85 | ||
86 | static void rcu_free_call_data(struct rcu_head *head) | |
87 | { | |
88 | struct call_function_data *data; | |
89 | ||
90 | data = container_of(head, struct call_function_data, rcu_head); | |
91 | ||
92 | kfree(data); | |
93 | } | |
94 | ||
95 | /* | |
96 | * Invoked by arch to handle an IPI for call function. Must be called with | |
97 | * interrupts disabled. | |
98 | */ | |
99 | void generic_smp_call_function_interrupt(void) | |
100 | { | |
101 | struct call_function_data *data; | |
102 | int cpu = get_cpu(); | |
103 | ||
104 | /* | |
105 | * It's ok to use list_for_each_rcu() here even though we may delete | |
106 | * 'pos', since list_del_rcu() doesn't clear ->next | |
107 | */ | |
108 | rcu_read_lock(); | |
109 | list_for_each_entry_rcu(data, &call_function_queue, csd.list) { | |
110 | int refs; | |
111 | ||
112 | if (!cpu_isset(cpu, data->cpumask)) | |
113 | continue; | |
114 | ||
115 | data->csd.func(data->csd.info); | |
116 | ||
117 | spin_lock(&data->lock); | |
118 | cpu_clear(cpu, data->cpumask); | |
119 | WARN_ON(data->refs == 0); | |
120 | data->refs--; | |
121 | refs = data->refs; | |
122 | spin_unlock(&data->lock); | |
123 | ||
124 | if (refs) | |
125 | continue; | |
126 | ||
127 | spin_lock(&call_function_lock); | |
128 | list_del_rcu(&data->csd.list); | |
129 | spin_unlock(&call_function_lock); | |
130 | ||
131 | if (data->csd.flags & CSD_FLAG_WAIT) { | |
132 | /* | |
133 | * serialize stores to data with the flag clear | |
134 | * and wakeup | |
135 | */ | |
136 | smp_wmb(); | |
137 | data->csd.flags &= ~CSD_FLAG_WAIT; | |
c2fc1198 NP |
138 | } |
139 | if (data->csd.flags & CSD_FLAG_ALLOC) | |
3d442233 JA |
140 | call_rcu(&data->rcu_head, rcu_free_call_data); |
141 | } | |
142 | rcu_read_unlock(); | |
143 | ||
144 | put_cpu(); | |
145 | } | |
146 | ||
147 | /* | |
148 | * Invoked by arch to handle an IPI for call function single. Must be called | |
149 | * from the arch with interrupts disabled. | |
150 | */ | |
151 | void generic_smp_call_function_single_interrupt(void) | |
152 | { | |
153 | struct call_single_queue *q = &__get_cpu_var(call_single_queue); | |
154 | LIST_HEAD(list); | |
155 | ||
156 | /* | |
157 | * Need to see other stores to list head for checking whether | |
158 | * list is empty without holding q->lock | |
159 | */ | |
160 | smp_mb(); | |
161 | while (!list_empty(&q->list)) { | |
162 | unsigned int data_flags; | |
163 | ||
164 | spin_lock(&q->lock); | |
165 | list_replace_init(&q->list, &list); | |
166 | spin_unlock(&q->lock); | |
167 | ||
168 | while (!list_empty(&list)) { | |
169 | struct call_single_data *data; | |
170 | ||
171 | data = list_entry(list.next, struct call_single_data, | |
172 | list); | |
173 | list_del(&data->list); | |
174 | ||
175 | /* | |
176 | * 'data' can be invalid after this call if | |
177 | * flags == 0 (when called through | |
178 | * generic_exec_single(), so save them away before | |
179 | * making the call. | |
180 | */ | |
181 | data_flags = data->flags; | |
182 | ||
183 | data->func(data->info); | |
184 | ||
185 | if (data_flags & CSD_FLAG_WAIT) { | |
186 | smp_wmb(); | |
187 | data->flags &= ~CSD_FLAG_WAIT; | |
188 | } else if (data_flags & CSD_FLAG_ALLOC) | |
189 | kfree(data); | |
190 | } | |
191 | /* | |
192 | * See comment on outer loop | |
193 | */ | |
194 | smp_mb(); | |
195 | } | |
196 | } | |
197 | ||
198 | /* | |
199 | * smp_call_function_single - Run a function on a specific CPU | |
200 | * @func: The function to run. This must be fast and non-blocking. | |
201 | * @info: An arbitrary pointer to pass to the function. | |
3d442233 JA |
202 | * @wait: If true, wait until function has completed on other CPUs. |
203 | * | |
204 | * Returns 0 on success, else a negative status code. Note that @wait | |
205 | * will be implicitly turned on in case of allocation failures, since | |
206 | * we fall back to on-stack allocation. | |
207 | */ | |
208 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |
8691e5a8 | 209 | int wait) |
3d442233 JA |
210 | { |
211 | struct call_single_data d; | |
212 | unsigned long flags; | |
213 | /* prevent preemption and reschedule on another processor */ | |
214 | int me = get_cpu(); | |
215 | ||
216 | /* Can deadlock when called with interrupts disabled */ | |
217 | WARN_ON(irqs_disabled()); | |
218 | ||
219 | if (cpu == me) { | |
220 | local_irq_save(flags); | |
221 | func(info); | |
222 | local_irq_restore(flags); | |
223 | } else { | |
224 | struct call_single_data *data = NULL; | |
225 | ||
226 | if (!wait) { | |
227 | data = kmalloc(sizeof(*data), GFP_ATOMIC); | |
228 | if (data) | |
229 | data->flags = CSD_FLAG_ALLOC; | |
230 | } | |
231 | if (!data) { | |
232 | data = &d; | |
233 | data->flags = CSD_FLAG_WAIT; | |
234 | } | |
235 | ||
236 | data->func = func; | |
237 | data->info = info; | |
238 | generic_exec_single(cpu, data); | |
239 | } | |
240 | ||
241 | put_cpu(); | |
242 | return 0; | |
243 | } | |
244 | EXPORT_SYMBOL(smp_call_function_single); | |
245 | ||
246 | /** | |
247 | * __smp_call_function_single(): Run a function on another CPU | |
248 | * @cpu: The CPU to run on. | |
249 | * @data: Pre-allocated and setup data structure | |
250 | * | |
251 | * Like smp_call_function_single(), but allow caller to pass in a pre-allocated | |
252 | * data structure. Useful for embedding @data inside other structures, for | |
253 | * instance. | |
254 | * | |
255 | */ | |
256 | void __smp_call_function_single(int cpu, struct call_single_data *data) | |
257 | { | |
258 | /* Can deadlock when called with interrupts disabled */ | |
259 | WARN_ON((data->flags & CSD_FLAG_WAIT) && irqs_disabled()); | |
260 | ||
261 | generic_exec_single(cpu, data); | |
262 | } | |
263 | ||
cc7a486c NP |
264 | /* Dummy function */ |
265 | static void quiesce_dummy(void *unused) | |
266 | { | |
267 | } | |
268 | ||
269 | /* | |
270 | * Ensure stack based data used in call function mask is safe to free. | |
271 | * | |
272 | * This is needed by smp_call_function_mask when using on-stack data, because | |
273 | * a single call function queue is shared by all CPUs, and any CPU may pick up | |
274 | * the data item on the queue at any time before it is deleted. So we need to | |
275 | * ensure that all CPUs have transitioned through a quiescent state after | |
276 | * this call. | |
277 | * | |
278 | * This is a very slow function, implemented by sending synchronous IPIs to | |
279 | * all possible CPUs. For this reason, we have to alloc data rather than use | |
280 | * stack based data even in the case of synchronous calls. The stack based | |
281 | * data is then just used for deadlock/oom fallback which will be very rare. | |
282 | * | |
283 | * If a faster scheme can be made, we could go back to preferring stack based | |
284 | * data -- the data allocation/free is non-zero cost. | |
285 | */ | |
286 | static void smp_call_function_mask_quiesce_stack(cpumask_t mask) | |
287 | { | |
288 | struct call_single_data data; | |
289 | int cpu; | |
290 | ||
291 | data.func = quiesce_dummy; | |
292 | data.info = NULL; | |
cc7a486c | 293 | |
c2fc1198 NP |
294 | for_each_cpu_mask(cpu, mask) { |
295 | data.flags = CSD_FLAG_WAIT; | |
cc7a486c | 296 | generic_exec_single(cpu, &data); |
c2fc1198 | 297 | } |
cc7a486c NP |
298 | } |
299 | ||
3d442233 JA |
300 | /** |
301 | * smp_call_function_mask(): Run a function on a set of other CPUs. | |
302 | * @mask: The set of cpus to run on. | |
303 | * @func: The function to run. This must be fast and non-blocking. | |
304 | * @info: An arbitrary pointer to pass to the function. | |
305 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | |
306 | * | |
307 | * Returns 0 on success, else a negative status code. | |
308 | * | |
309 | * If @wait is true, then returns once @func has returned. Note that @wait | |
310 | * will be implicitly turned on in case of allocation failures, since | |
311 | * we fall back to on-stack allocation. | |
312 | * | |
313 | * You must not call this function with disabled interrupts or from a | |
314 | * hardware interrupt handler or from a bottom half handler. Preemption | |
315 | * must be disabled when calling this function. | |
316 | */ | |
317 | int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | |
318 | int wait) | |
319 | { | |
320 | struct call_function_data d; | |
321 | struct call_function_data *data = NULL; | |
322 | cpumask_t allbutself; | |
323 | unsigned long flags; | |
324 | int cpu, num_cpus; | |
cc7a486c | 325 | int slowpath = 0; |
3d442233 JA |
326 | |
327 | /* Can deadlock when called with interrupts disabled */ | |
328 | WARN_ON(irqs_disabled()); | |
329 | ||
330 | cpu = smp_processor_id(); | |
331 | allbutself = cpu_online_map; | |
332 | cpu_clear(cpu, allbutself); | |
333 | cpus_and(mask, mask, allbutself); | |
334 | num_cpus = cpus_weight(mask); | |
335 | ||
336 | /* | |
337 | * If zero CPUs, return. If just a single CPU, turn this request | |
338 | * into a targetted single call instead since it's faster. | |
339 | */ | |
340 | if (!num_cpus) | |
341 | return 0; | |
342 | else if (num_cpus == 1) { | |
343 | cpu = first_cpu(mask); | |
ce0d1b6f | 344 | return smp_call_function_single(cpu, func, info, wait); |
3d442233 JA |
345 | } |
346 | ||
cc7a486c NP |
347 | data = kmalloc(sizeof(*data), GFP_ATOMIC); |
348 | if (data) { | |
349 | data->csd.flags = CSD_FLAG_ALLOC; | |
350 | if (wait) | |
351 | data->csd.flags |= CSD_FLAG_WAIT; | |
352 | } else { | |
3d442233 JA |
353 | data = &d; |
354 | data->csd.flags = CSD_FLAG_WAIT; | |
63cf13b7 | 355 | wait = 1; |
cc7a486c | 356 | slowpath = 1; |
3d442233 JA |
357 | } |
358 | ||
359 | spin_lock_init(&data->lock); | |
360 | data->csd.func = func; | |
361 | data->csd.info = info; | |
362 | data->refs = num_cpus; | |
363 | data->cpumask = mask; | |
364 | ||
365 | spin_lock_irqsave(&call_function_lock, flags); | |
366 | list_add_tail_rcu(&data->csd.list, &call_function_queue); | |
367 | spin_unlock_irqrestore(&call_function_lock, flags); | |
368 | ||
369 | /* Send a message to all CPUs in the map */ | |
370 | arch_send_call_function_ipi(mask); | |
371 | ||
372 | /* optionally wait for the CPUs to complete */ | |
cc7a486c | 373 | if (wait) { |
3d442233 | 374 | csd_flag_wait(&data->csd); |
cc7a486c | 375 | if (unlikely(slowpath)) |
c2fc1198 | 376 | smp_call_function_mask_quiesce_stack(mask); |
cc7a486c | 377 | } |
3d442233 JA |
378 | |
379 | return 0; | |
380 | } | |
381 | EXPORT_SYMBOL(smp_call_function_mask); | |
382 | ||
383 | /** | |
384 | * smp_call_function(): Run a function on all other CPUs. | |
385 | * @func: The function to run. This must be fast and non-blocking. | |
386 | * @info: An arbitrary pointer to pass to the function. | |
3d442233 JA |
387 | * @wait: If true, wait (atomically) until function has completed on other CPUs. |
388 | * | |
389 | * Returns 0 on success, else a negative status code. | |
390 | * | |
391 | * If @wait is true, then returns once @func has returned; otherwise | |
392 | * it returns just before the target cpu calls @func. In case of allocation | |
393 | * failure, @wait will be implicitly turned on. | |
394 | * | |
395 | * You must not call this function with disabled interrupts or from a | |
396 | * hardware interrupt handler or from a bottom half handler. | |
397 | */ | |
8691e5a8 | 398 | int smp_call_function(void (*func)(void *), void *info, int wait) |
3d442233 JA |
399 | { |
400 | int ret; | |
401 | ||
402 | preempt_disable(); | |
403 | ret = smp_call_function_mask(cpu_online_map, func, info, wait); | |
404 | preempt_enable(); | |
405 | return ret; | |
406 | } | |
407 | EXPORT_SYMBOL(smp_call_function); | |
408 | ||
409 | void ipi_call_lock(void) | |
410 | { | |
411 | spin_lock(&call_function_lock); | |
412 | } | |
413 | ||
414 | void ipi_call_unlock(void) | |
415 | { | |
416 | spin_unlock(&call_function_lock); | |
417 | } | |
418 | ||
419 | void ipi_call_lock_irq(void) | |
420 | { | |
421 | spin_lock_irq(&call_function_lock); | |
422 | } | |
423 | ||
424 | void ipi_call_unlock_irq(void) | |
425 | { | |
426 | spin_unlock_irq(&call_function_lock); | |
427 | } |