Merge git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6
[deliverable/linux.git] / arch / sh / kernel / smp.c
1 /*
2 * arch/sh/kernel/smp.c
3 *
4 * SMP support for the SuperH processors.
5 *
6 * Copyright (C) 2002, 2003 Paul Mundt
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14 #include <linux/err.h>
15 #include <linux/cache.h>
16 #include <linux/cpumask.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/spinlock.h>
21 #include <linux/threads.h>
22 #include <linux/module.h>
23 #include <linux/time.h>
24 #include <linux/timex.h>
25 #include <linux/sched.h>
26 #include <linux/module.h>
27
28 #include <asm/atomic.h>
29 #include <asm/processor.h>
30 #include <asm/system.h>
31 #include <asm/mmu_context.h>
32 #include <asm/smp.h>
33
34 /*
35 * This was written with the Sega Saturn (SMP SH-2 7604) in mind,
36 * but is designed to be usable regardless if there's an MMU
37 * present or not.
38 */
39 struct sh_cpuinfo cpu_data[NR_CPUS];
40
41 extern void per_cpu_trap_init(void);
42
43 cpumask_t cpu_possible_map;
44 EXPORT_SYMBOL(cpu_possible_map);
45
46 cpumask_t cpu_online_map;
47 EXPORT_SYMBOL(cpu_online_map);
48 static atomic_t cpus_booted = ATOMIC_INIT(0);
49
50 /* These are defined by the board-specific code. */
51
52 /*
53 * Cause the function described by call_data to be executed on the passed
54 * cpu. When the function has finished, increment the finished field of
55 * call_data.
56 */
57 void __smp_send_ipi(unsigned int cpu, unsigned int action);
58
59 /*
60 * Find the number of available processors
61 */
62 unsigned int __smp_probe_cpus(void);
63
64 /*
65 * Start a particular processor
66 */
67 void __smp_slave_init(unsigned int cpu);
68
69 /*
70 * Run specified function on a particular processor.
71 */
72 void __smp_call_function(unsigned int cpu);
73
74 static inline void __init smp_store_cpu_info(unsigned int cpu)
75 {
76 cpu_data[cpu].loops_per_jiffy = loops_per_jiffy;
77 }
78
79 void __init smp_prepare_cpus(unsigned int max_cpus)
80 {
81 unsigned int cpu = smp_processor_id();
82 int i;
83
84 atomic_set(&cpus_booted, 1);
85 smp_store_cpu_info(cpu);
86
87 for (i = 0; i < __smp_probe_cpus(); i++)
88 cpu_set(i, cpu_possible_map);
89 }
90
91 void __devinit smp_prepare_boot_cpu(void)
92 {
93 unsigned int cpu = smp_processor_id();
94
95 cpu_set(cpu, cpu_online_map);
96 cpu_set(cpu, cpu_possible_map);
97 }
98
99 int __cpu_up(unsigned int cpu)
100 {
101 struct task_struct *tsk;
102
103 tsk = fork_idle(cpu);
104
105 if (IS_ERR(tsk))
106 panic("Failed forking idle task for cpu %d\n", cpu);
107
108 task_thread_info(tsk)->cpu = cpu;
109
110 cpu_set(cpu, cpu_online_map);
111
112 return 0;
113 }
114
115 int start_secondary(void *unused)
116 {
117 unsigned int cpu;
118
119 cpu = smp_processor_id();
120
121 atomic_inc(&init_mm.mm_count);
122 current->active_mm = &init_mm;
123
124 smp_store_cpu_info(cpu);
125
126 __smp_slave_init(cpu);
127 preempt_disable();
128 per_cpu_trap_init();
129
130 atomic_inc(&cpus_booted);
131
132 cpu_idle();
133 return 0;
134 }
135
136 void __init smp_cpus_done(unsigned int max_cpus)
137 {
138 smp_mb();
139 }
140
141 void smp_send_reschedule(int cpu)
142 {
143 __smp_send_ipi(cpu, SMP_MSG_RESCHEDULE);
144 }
145
146 static void stop_this_cpu(void *unused)
147 {
148 cpu_clear(smp_processor_id(), cpu_online_map);
149 local_irq_disable();
150
151 for (;;)
152 cpu_relax();
153 }
154
155 void smp_send_stop(void)
156 {
157 smp_call_function(stop_this_cpu, 0, 1, 0);
158 }
159
160
161 struct smp_fn_call_struct smp_fn_call = {
162 .lock = SPIN_LOCK_UNLOCKED,
163 .finished = ATOMIC_INIT(0),
164 };
165
166 /*
167 * The caller of this wants the passed function to run on every cpu. If wait
168 * is set, wait until all cpus have finished the function before returning.
169 * The lock is here to protect the call structure.
170 * You must not call this function with disabled interrupts or from a
171 * hardware interrupt handler or from a bottom half handler.
172 */
173 int smp_call_function(void (*func)(void *info), void *info, int retry, int wait)
174 {
175 unsigned int nr_cpus = atomic_read(&cpus_booted);
176 int i;
177
178 if (nr_cpus < 2)
179 return 0;
180
181 /* Can deadlock when called with interrupts disabled */
182 WARN_ON(irqs_disabled());
183
184 spin_lock(&smp_fn_call.lock);
185
186 atomic_set(&smp_fn_call.finished, 0);
187 smp_fn_call.fn = func;
188 smp_fn_call.data = info;
189
190 for (i = 0; i < nr_cpus; i++)
191 if (i != smp_processor_id())
192 __smp_call_function(i);
193
194 if (wait)
195 while (atomic_read(&smp_fn_call.finished) != (nr_cpus - 1));
196
197 spin_unlock(&smp_fn_call.lock);
198
199 return 0;
200 }
201
202 /* Not really SMP stuff ... */
203 int setup_profiling_timer(unsigned int multiplier)
204 {
205 return 0;
206 }
207
This page took 0.035525 seconds and 6 git commands to generate.