smp: quit unconditionally enabling irq in on_each_cpu_mask and on_each_cpu_cond
[deliverable/linux.git] / kernel / up.c
CommitLineData
53ce3d95
AM
1/*
2 * Uniprocessor-only support functions. The counterpart to kernel/smp.c
3 */
4
6e962814 5#include <linux/interrupt.h>
53ce3d95 6#include <linux/kernel.h>
9984de1a 7#include <linux/export.h>
53ce3d95
AM
8#include <linux/smp.h>
9
10int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
11 int wait)
12{
93423b86
IM
13 WARN_ON(cpu != 0);
14
53ce3d95
AM
15 local_irq_disable();
16 (func)(info);
17 local_irq_enable();
93423b86 18
53ce3d95
AM
19 return 0;
20}
21EXPORT_SYMBOL(smp_call_function_single);
fa688207
DD
22
23/*
24 * Note we still need to test the mask even for UP
25 * because we actually can get an empty mask from
26 * code that on SMP might call us without the local
27 * CPU in the mask.
28 */
29void on_each_cpu_mask(const struct cpumask *mask,
30 smp_call_func_t func, void *info, bool wait)
31{
32 unsigned long flags;
33
34 if (cpumask_test_cpu(0, mask)) {
35 local_irq_save(flags);
36 func(info);
37 local_irq_restore(flags);
38 }
39}
40EXPORT_SYMBOL(on_each_cpu_mask);
41
42/*
43 * Preemption is disabled here to make sure the cond_func is called under the
44 * same condtions in UP and SMP.
45 */
46void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
47 smp_call_func_t func, void *info, bool wait,
48 gfp_t gfp_flags)
49{
50 unsigned long flags;
51
52 preempt_disable();
53 if (cond_func(0, info)) {
54 local_irq_save(flags);
55 func(info);
56 local_irq_restore(flags);
57 }
58 preempt_enable();
59}
60EXPORT_SYMBOL(on_each_cpu_cond);
This page took 0.296958 seconds and 5 git commands to generate.