Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux...
[deliverable/linux.git] / arch / sh / kernel / smp.c
1 /*
2 * arch/sh/kernel/smp.c
3 *
4 * SMP support for the SuperH processors.
5 *
6 * Copyright (C) 2002 - 2007 Paul Mundt
7 * Copyright (C) 2006 - 2007 Akio Idehara
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13 #include <linux/err.h>
14 #include <linux/cache.h>
15 #include <linux/cpumask.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/spinlock.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <asm/atomic.h>
23 #include <asm/processor.h>
24 #include <asm/system.h>
25 #include <asm/mmu_context.h>
26 #include <asm/smp.h>
27 #include <asm/cacheflush.h>
28 #include <asm/sections.h>
29
30 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
31 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
32
33 cpumask_t cpu_possible_map;
34 EXPORT_SYMBOL(cpu_possible_map);
35
36 cpumask_t cpu_online_map;
37 EXPORT_SYMBOL(cpu_online_map);
38
39 static atomic_t cpus_booted = ATOMIC_INIT(0);
40
41 /*
42 * Run specified function on a particular processor.
43 */
44 void __smp_call_function(unsigned int cpu);
45
46 static inline void __init smp_store_cpu_info(unsigned int cpu)
47 {
48 struct sh_cpuinfo *c = cpu_data + cpu;
49
50 c->loops_per_jiffy = loops_per_jiffy;
51 }
52
53 void __init smp_prepare_cpus(unsigned int max_cpus)
54 {
55 unsigned int cpu = smp_processor_id();
56
57 init_new_context(current, &init_mm);
58 current_thread_info()->cpu = cpu;
59 plat_prepare_cpus(max_cpus);
60
61 #ifndef CONFIG_HOTPLUG_CPU
62 cpu_present_map = cpu_possible_map;
63 #endif
64 }
65
66 void __devinit smp_prepare_boot_cpu(void)
67 {
68 unsigned int cpu = smp_processor_id();
69
70 __cpu_number_map[0] = cpu;
71 __cpu_logical_map[0] = cpu;
72
73 cpu_set(cpu, cpu_online_map);
74 cpu_set(cpu, cpu_possible_map);
75 }
76
77 asmlinkage void __cpuinit start_secondary(void)
78 {
79 unsigned int cpu;
80 struct mm_struct *mm = &init_mm;
81
82 atomic_inc(&mm->mm_count);
83 atomic_inc(&mm->mm_users);
84 current->active_mm = mm;
85 BUG_ON(current->mm);
86 enter_lazy_tlb(mm, current);
87
88 per_cpu_trap_init();
89
90 preempt_disable();
91
92 local_irq_enable();
93
94 calibrate_delay();
95
96 cpu = smp_processor_id();
97 smp_store_cpu_info(cpu);
98
99 cpu_set(cpu, cpu_online_map);
100
101 cpu_idle();
102 }
103
104 extern struct {
105 unsigned long sp;
106 unsigned long bss_start;
107 unsigned long bss_end;
108 void *start_kernel_fn;
109 void *cpu_init_fn;
110 void *thread_info;
111 } stack_start;
112
113 int __cpuinit __cpu_up(unsigned int cpu)
114 {
115 struct task_struct *tsk;
116 unsigned long timeout;
117
118 tsk = fork_idle(cpu);
119 if (IS_ERR(tsk)) {
120 printk(KERN_ERR "Failed forking idle task for cpu %d\n", cpu);
121 return PTR_ERR(tsk);
122 }
123
124 /* Fill in data in head.S for secondary cpus */
125 stack_start.sp = tsk->thread.sp;
126 stack_start.thread_info = tsk->stack;
127 stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
128 stack_start.start_kernel_fn = start_secondary;
129
130 flush_cache_all();
131
132 plat_start_cpu(cpu, (unsigned long)_stext);
133
134 timeout = jiffies + HZ;
135 while (time_before(jiffies, timeout)) {
136 if (cpu_online(cpu))
137 break;
138
139 udelay(10);
140 }
141
142 if (cpu_online(cpu))
143 return 0;
144
145 return -ENOENT;
146 }
147
148 void __init smp_cpus_done(unsigned int max_cpus)
149 {
150 unsigned long bogosum = 0;
151 int cpu;
152
153 for_each_online_cpu(cpu)
154 bogosum += cpu_data[cpu].loops_per_jiffy;
155
156 printk(KERN_INFO "SMP: Total of %d processors activated "
157 "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
158 bogosum / (500000/HZ),
159 (bogosum / (5000/HZ)) % 100);
160 }
161
162 void smp_send_reschedule(int cpu)
163 {
164 plat_send_ipi(cpu, SMP_MSG_RESCHEDULE);
165 }
166
167 static void stop_this_cpu(void *unused)
168 {
169 cpu_clear(smp_processor_id(), cpu_online_map);
170 local_irq_disable();
171
172 for (;;)
173 cpu_relax();
174 }
175
176 void smp_send_stop(void)
177 {
178 smp_call_function(stop_this_cpu, 0, 1, 0);
179 }
180
181 struct smp_fn_call_struct smp_fn_call = {
182 .lock = SPIN_LOCK_UNLOCKED,
183 .finished = ATOMIC_INIT(0),
184 };
185
186 /*
187 * The caller of this wants the passed function to run on every cpu. If wait
188 * is set, wait until all cpus have finished the function before returning.
189 * The lock is here to protect the call structure.
190 * You must not call this function with disabled interrupts or from a
191 * hardware interrupt handler or from a bottom half handler.
192 */
193 int smp_call_function(void (*func)(void *info), void *info, int retry, int wait)
194 {
195 unsigned int nr_cpus = atomic_read(&cpus_booted);
196 int i;
197
198 /* Can deadlock when called with interrupts disabled */
199 WARN_ON(irqs_disabled());
200
201 spin_lock(&smp_fn_call.lock);
202
203 atomic_set(&smp_fn_call.finished, 0);
204 smp_fn_call.fn = func;
205 smp_fn_call.data = info;
206
207 for (i = 0; i < nr_cpus; i++)
208 if (i != smp_processor_id())
209 plat_send_ipi(i, SMP_MSG_FUNCTION);
210
211 if (wait)
212 while (atomic_read(&smp_fn_call.finished) != (nr_cpus - 1));
213
214 spin_unlock(&smp_fn_call.lock);
215
216 return 0;
217 }
218
219 /* Not really SMP stuff ... */
220 int setup_profiling_timer(unsigned int multiplier)
221 {
222 return 0;
223 }
224
225 static void flush_tlb_all_ipi(void *info)
226 {
227 local_flush_tlb_all();
228 }
229
230 void flush_tlb_all(void)
231 {
232 on_each_cpu(flush_tlb_all_ipi, 0, 1, 1);
233 }
234
235 static void flush_tlb_mm_ipi(void *mm)
236 {
237 local_flush_tlb_mm((struct mm_struct *)mm);
238 }
239
240 /*
241 * The following tlb flush calls are invoked when old translations are
242 * being torn down, or pte attributes are changing. For single threaded
243 * address spaces, a new context is obtained on the current cpu, and tlb
244 * context on other cpus are invalidated to force a new context allocation
245 * at switch_mm time, should the mm ever be used on other cpus. For
246 * multithreaded address spaces, intercpu interrupts have to be sent.
247 * Another case where intercpu interrupts are required is when the target
248 * mm might be active on another cpu (eg debuggers doing the flushes on
249 * behalf of debugees, kswapd stealing pages from another process etc).
250 * Kanoj 07/00.
251 */
252
253 void flush_tlb_mm(struct mm_struct *mm)
254 {
255 preempt_disable();
256
257 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
258 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
259 } else {
260 int i;
261 for (i = 0; i < num_online_cpus(); i++)
262 if (smp_processor_id() != i)
263 cpu_context(i, mm) = 0;
264 }
265 local_flush_tlb_mm(mm);
266
267 preempt_enable();
268 }
269
270 struct flush_tlb_data {
271 struct vm_area_struct *vma;
272 unsigned long addr1;
273 unsigned long addr2;
274 };
275
276 static void flush_tlb_range_ipi(void *info)
277 {
278 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
279
280 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
281 }
282
283 void flush_tlb_range(struct vm_area_struct *vma,
284 unsigned long start, unsigned long end)
285 {
286 struct mm_struct *mm = vma->vm_mm;
287
288 preempt_disable();
289 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
290 struct flush_tlb_data fd;
291
292 fd.vma = vma;
293 fd.addr1 = start;
294 fd.addr2 = end;
295 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
296 } else {
297 int i;
298 for (i = 0; i < num_online_cpus(); i++)
299 if (smp_processor_id() != i)
300 cpu_context(i, mm) = 0;
301 }
302 local_flush_tlb_range(vma, start, end);
303 preempt_enable();
304 }
305
306 static void flush_tlb_kernel_range_ipi(void *info)
307 {
308 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
309
310 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
311 }
312
313 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
314 {
315 struct flush_tlb_data fd;
316
317 fd.addr1 = start;
318 fd.addr2 = end;
319 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1);
320 }
321
322 static void flush_tlb_page_ipi(void *info)
323 {
324 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
325
326 local_flush_tlb_page(fd->vma, fd->addr1);
327 }
328
329 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
330 {
331 preempt_disable();
332 if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
333 (current->mm != vma->vm_mm)) {
334 struct flush_tlb_data fd;
335
336 fd.vma = vma;
337 fd.addr1 = page;
338 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
339 } else {
340 int i;
341 for (i = 0; i < num_online_cpus(); i++)
342 if (smp_processor_id() != i)
343 cpu_context(i, vma->vm_mm) = 0;
344 }
345 local_flush_tlb_page(vma, page);
346 preempt_enable();
347 }
348
349 static void flush_tlb_one_ipi(void *info)
350 {
351 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
352 local_flush_tlb_one(fd->addr1, fd->addr2);
353 }
354
355 void flush_tlb_one(unsigned long asid, unsigned long vaddr)
356 {
357 struct flush_tlb_data fd;
358
359 fd.addr1 = asid;
360 fd.addr2 = vaddr;
361
362 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1, 1);
363 local_flush_tlb_one(asid, vaddr);
364 }
This page took 0.039305 seconds and 6 git commands to generate.