Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This program is free software; you can redistribute it and/or | |
3 | * modify it under the terms of the GNU General Public License | |
4 | * as published by the Free Software Foundation; either version 2 | |
5 | * of the License, or (at your option) any later version. | |
6 | * | |
7 | * This program is distributed in the hope that it will be useful, | |
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | * GNU General Public License for more details. | |
11 | * | |
12 | * You should have received a copy of the GNU General Public License | |
13 | * along with this program; if not, write to the Free Software | |
14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
15 | * | |
16 | * Copyright (C) 2000, 2001 Kanoj Sarcar | |
17 | * Copyright (C) 2000, 2001 Ralf Baechle | |
18 | * Copyright (C) 2000, 2001 Silicon Graphics, Inc. | |
19 | * Copyright (C) 2000, 2001, 2003 Broadcom Corporation | |
20 | */ | |
21 | #include <linux/cache.h> | |
22 | #include <linux/delay.h> | |
23 | #include <linux/init.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/spinlock.h> | |
26 | #include <linux/threads.h> | |
27 | #include <linux/module.h> | |
28 | #include <linux/time.h> | |
29 | #include <linux/timex.h> | |
30 | #include <linux/sched.h> | |
31 | #include <linux/cpumask.h> | |
1e35aaba | 32 | #include <linux/cpu.h> |
4e950f6f | 33 | #include <linux/err.h> |
1da177e4 LT |
34 | |
35 | #include <asm/atomic.h> | |
36 | #include <asm/cpu.h> | |
37 | #include <asm/processor.h> | |
38 | #include <asm/system.h> | |
39 | #include <asm/mmu_context.h> | |
40 | #include <asm/smp.h> | |
7bcf7717 | 41 | #include <asm/time.h> |
1da177e4 | 42 | |
41c594ab RB |
43 | #ifdef CONFIG_MIPS_MT_SMTC |
44 | #include <asm/mipsmtregs.h> | |
45 | #endif /* CONFIG_MIPS_MT_SMTC */ | |
46 | ||
1da177e4 LT |
47 | cpumask_t phys_cpu_present_map; /* Bitmask of available CPUs */ |
48 | volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ | |
49 | cpumask_t cpu_online_map; /* Bitmask of currently online CPUs */ | |
50 | int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ | |
51 | int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ | |
52 | ||
53 | EXPORT_SYMBOL(phys_cpu_present_map); | |
54 | EXPORT_SYMBOL(cpu_online_map); | |
55 | ||
1da177e4 | 56 | extern void __init calibrate_delay(void); |
b3f6df9f | 57 | extern void cpu_idle(void); |
1da177e4 LT |
58 | |
59 | /* | |
60 | * First C code run on the secondary CPUs after being started up by | |
61 | * the master. | |
62 | */ | |
4ebd5233 | 63 | asmlinkage __cpuinit void start_secondary(void) |
1da177e4 | 64 | { |
5bfb5d69 | 65 | unsigned int cpu; |
1da177e4 | 66 | |
41c594ab RB |
67 | #ifdef CONFIG_MIPS_MT_SMTC |
68 | /* Only do cpu_probe for first TC of CPU */ | |
69 | if ((read_c0_tcbind() & TCBIND_CURTC) == 0) | |
70 | #endif /* CONFIG_MIPS_MT_SMTC */ | |
1da177e4 LT |
71 | cpu_probe(); |
72 | cpu_report(); | |
73 | per_cpu_trap_init(); | |
7bcf7717 | 74 | mips_clockevent_init(); |
1da177e4 LT |
75 | prom_init_secondary(); |
76 | ||
77 | /* | |
78 | * XXX parity protection should be folded in here when it's converted | |
79 | * to an option instead of something based on .cputype | |
80 | */ | |
81 | ||
82 | calibrate_delay(); | |
5bfb5d69 NP |
83 | preempt_disable(); |
84 | cpu = smp_processor_id(); | |
1da177e4 LT |
85 | cpu_data[cpu].udelay_val = loops_per_jiffy; |
86 | ||
87 | prom_smp_finish(); | |
88 | ||
89 | cpu_set(cpu, cpu_callin_map); | |
90 | ||
91 | cpu_idle(); | |
92 | } | |
93 | ||
94 | DEFINE_SPINLOCK(smp_call_lock); | |
95 | ||
96 | struct call_data_struct *call_data; | |
97 | ||
98 | /* | |
99 | * Run a function on all other CPUs. | |
bd6aeeff RB |
100 | * |
101 | * <mask> cpuset_t of all processors to run the function on. | |
1da177e4 LT |
102 | * <func> The function to run. This must be fast and non-blocking. |
103 | * <info> An arbitrary pointer to pass to the function. | |
104 | * <retry> If true, keep retrying until ready. | |
105 | * <wait> If true, wait until function has completed on other CPUs. | |
106 | * [RETURNS] 0 on success, else a negative status code. | |
107 | * | |
108 | * Does not return until remote CPUs are nearly ready to execute <func> | |
109 | * or are or have executed. | |
110 | * | |
111 | * You must not call this function with disabled interrupts or from a | |
57f0060b RB |
112 | * hardware interrupt handler or from a bottom half handler: |
113 | * | |
114 | * CPU A CPU B | |
115 | * Disable interrupts | |
116 | * smp_call_function() | |
117 | * Take call_lock | |
118 | * Send IPIs | |
119 | * Wait for all cpus to acknowledge IPI | |
120 | * CPU A has not responded, spin waiting | |
121 | * for cpu A to respond, holding call_lock | |
122 | * smp_call_function() | |
123 | * Spin waiting for call_lock | |
124 | * Deadlock Deadlock | |
1da177e4 | 125 | */ |
bd6aeeff RB |
126 | int smp_call_function_mask(cpumask_t mask, void (*func) (void *info), |
127 | void *info, int retry, int wait) | |
1da177e4 LT |
128 | { |
129 | struct call_data_struct data; | |
1da177e4 | 130 | int cpu = smp_processor_id(); |
bd6aeeff | 131 | int cpus; |
1da177e4 | 132 | |
ae1b3d51 RB |
133 | /* |
134 | * Can die spectacularly if this CPU isn't yet marked online | |
135 | */ | |
136 | BUG_ON(!cpu_online(cpu)); | |
137 | ||
bd6aeeff RB |
138 | cpu_clear(cpu, mask); |
139 | cpus = cpus_weight(mask); | |
1da177e4 LT |
140 | if (!cpus) |
141 | return 0; | |
142 | ||
143 | /* Can deadlock when called with interrupts disabled */ | |
144 | WARN_ON(irqs_disabled()); | |
145 | ||
146 | data.func = func; | |
147 | data.info = info; | |
148 | atomic_set(&data.started, 0); | |
149 | data.wait = wait; | |
150 | if (wait) | |
151 | atomic_set(&data.finished, 0); | |
152 | ||
153 | spin_lock(&smp_call_lock); | |
154 | call_data = &data; | |
0004a9df | 155 | smp_mb(); |
1da177e4 LT |
156 | |
157 | /* Send a message to all other CPUs and wait for them to respond */ | |
bd6aeeff | 158 | core_send_ipi_mask(mask, SMP_CALL_FUNCTION); |
1da177e4 LT |
159 | |
160 | /* Wait for response */ | |
161 | /* FIXME: lock-up detection, backtrace on lock-up */ | |
162 | while (atomic_read(&data.started) != cpus) | |
163 | barrier(); | |
164 | ||
165 | if (wait) | |
166 | while (atomic_read(&data.finished) != cpus) | |
167 | barrier(); | |
41c594ab | 168 | call_data = NULL; |
1da177e4 LT |
169 | spin_unlock(&smp_call_lock); |
170 | ||
171 | return 0; | |
172 | } | |
173 | ||
bd6aeeff RB |
174 | int smp_call_function(void (*func) (void *info), void *info, int retry, |
175 | int wait) | |
176 | { | |
177 | return smp_call_function_mask(cpu_online_map, func, info, retry, wait); | |
178 | } | |
41c594ab | 179 | |
1da177e4 LT |
180 | void smp_call_function_interrupt(void) |
181 | { | |
182 | void (*func) (void *info) = call_data->func; | |
183 | void *info = call_data->info; | |
184 | int wait = call_data->wait; | |
185 | ||
186 | /* | |
187 | * Notify initiating CPU that I've grabbed the data and am | |
188 | * about to execute the function. | |
189 | */ | |
0004a9df | 190 | smp_mb(); |
1da177e4 LT |
191 | atomic_inc(&call_data->started); |
192 | ||
193 | /* | |
194 | * At this point the info structure may be out of scope unless wait==1. | |
195 | */ | |
196 | irq_enter(); | |
197 | (*func)(info); | |
198 | irq_exit(); | |
199 | ||
200 | if (wait) { | |
0004a9df | 201 | smp_mb(); |
1da177e4 LT |
202 | atomic_inc(&call_data->finished); |
203 | } | |
204 | } | |
205 | ||
b4b2917c PW |
206 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, |
207 | int retry, int wait) | |
208 | { | |
bd6aeeff | 209 | int ret, me; |
b4b2917c PW |
210 | |
211 | /* | |
212 | * Can die spectacularly if this CPU isn't yet marked online | |
213 | */ | |
214 | if (!cpu_online(cpu)) | |
215 | return 0; | |
216 | ||
217 | me = get_cpu(); | |
218 | BUG_ON(!cpu_online(me)); | |
219 | ||
220 | if (cpu == me) { | |
221 | local_irq_disable(); | |
222 | func(info); | |
223 | local_irq_enable(); | |
224 | put_cpu(); | |
225 | return 0; | |
226 | } | |
227 | ||
bd6aeeff RB |
228 | ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, retry, |
229 | wait); | |
b4b2917c PW |
230 | |
231 | put_cpu(); | |
232 | return 0; | |
233 | } | |
234 | ||
1da177e4 LT |
235 | static void stop_this_cpu(void *dummy) |
236 | { | |
237 | /* | |
238 | * Remove this CPU: | |
239 | */ | |
240 | cpu_clear(smp_processor_id(), cpu_online_map); | |
241 | local_irq_enable(); /* May need to service _machine_restart IPI */ | |
242 | for (;;); /* Wait if available. */ | |
243 | } | |
244 | ||
245 | void smp_send_stop(void) | |
246 | { | |
247 | smp_call_function(stop_this_cpu, NULL, 1, 0); | |
248 | } | |
249 | ||
250 | void __init smp_cpus_done(unsigned int max_cpus) | |
251 | { | |
252 | prom_cpus_done(); | |
253 | } | |
254 | ||
255 | /* called from main before smp_init() */ | |
256 | void __init smp_prepare_cpus(unsigned int max_cpus) | |
257 | { | |
1da177e4 LT |
258 | init_new_context(current, &init_mm); |
259 | current_thread_info()->cpu = 0; | |
9b6695a8 | 260 | plat_prepare_cpus(max_cpus); |
320e6aba RB |
261 | #ifndef CONFIG_HOTPLUG_CPU |
262 | cpu_present_map = cpu_possible_map; | |
263 | #endif | |
1da177e4 LT |
264 | } |
265 | ||
266 | /* preload SMP state for boot cpu */ | |
267 | void __devinit smp_prepare_boot_cpu(void) | |
268 | { | |
269 | /* | |
270 | * This assumes that bootup is always handled by the processor | |
271 | * with the logic and physical number 0. | |
272 | */ | |
273 | __cpu_number_map[0] = 0; | |
274 | __cpu_logical_map[0] = 0; | |
275 | cpu_set(0, phys_cpu_present_map); | |
276 | cpu_set(0, cpu_online_map); | |
277 | cpu_set(0, cpu_callin_map); | |
278 | } | |
279 | ||
280 | /* | |
b727a602 RB |
281 | * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu |
282 | * and keep control until "cpu_online(cpu)" is set. Note: cpu is | |
283 | * physical, not logical. | |
1da177e4 | 284 | */ |
b282b6f8 | 285 | int __cpuinit __cpu_up(unsigned int cpu) |
1da177e4 LT |
286 | { |
287 | struct task_struct *idle; | |
288 | ||
289 | /* | |
b727a602 | 290 | * Processor goes to start_secondary(), sets online flag |
1da177e4 LT |
291 | * The following code is purely to make sure |
292 | * Linux can schedule processes on this slave. | |
293 | */ | |
294 | idle = fork_idle(cpu); | |
295 | if (IS_ERR(idle)) | |
b727a602 | 296 | panic(KERN_ERR "Fork failed for CPU %d", cpu); |
1da177e4 LT |
297 | |
298 | prom_boot_secondary(cpu, idle); | |
299 | ||
b727a602 RB |
300 | /* |
301 | * Trust is futile. We should really have timeouts ... | |
302 | */ | |
1da177e4 LT |
303 | while (!cpu_isset(cpu, cpu_callin_map)) |
304 | udelay(100); | |
305 | ||
306 | cpu_set(cpu, cpu_online_map); | |
307 | ||
308 | return 0; | |
309 | } | |
310 | ||
1da177e4 LT |
311 | /* Not really SMP stuff ... */ |
312 | int setup_profiling_timer(unsigned int multiplier) | |
313 | { | |
314 | return 0; | |
315 | } | |
316 | ||
317 | static void flush_tlb_all_ipi(void *info) | |
318 | { | |
319 | local_flush_tlb_all(); | |
320 | } | |
321 | ||
322 | void flush_tlb_all(void) | |
323 | { | |
9a244b95 | 324 | on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1); |
1da177e4 LT |
325 | } |
326 | ||
327 | static void flush_tlb_mm_ipi(void *mm) | |
328 | { | |
329 | local_flush_tlb_mm((struct mm_struct *)mm); | |
330 | } | |
331 | ||
25969354 RB |
332 | /* |
333 | * Special Variant of smp_call_function for use by TLB functions: | |
334 | * | |
335 | * o No return value | |
336 | * o collapses to normal function call on UP kernels | |
337 | * o collapses to normal function call on systems with a single shared | |
338 | * primary cache. | |
339 | * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core. | |
340 | */ | |
341 | static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) | |
342 | { | |
343 | #ifndef CONFIG_MIPS_MT_SMTC | |
344 | smp_call_function(func, info, 1, 1); | |
345 | #endif | |
346 | } | |
347 | ||
348 | static inline void smp_on_each_tlb(void (*func) (void *info), void *info) | |
349 | { | |
350 | preempt_disable(); | |
351 | ||
352 | smp_on_other_tlbs(func, info); | |
353 | func(info); | |
354 | ||
355 | preempt_enable(); | |
356 | } | |
357 | ||
1da177e4 LT |
358 | /* |
359 | * The following tlb flush calls are invoked when old translations are | |
360 | * being torn down, or pte attributes are changing. For single threaded | |
361 | * address spaces, a new context is obtained on the current cpu, and tlb | |
362 | * context on other cpus are invalidated to force a new context allocation | |
363 | * at switch_mm time, should the mm ever be used on other cpus. For | |
364 | * multithreaded address spaces, intercpu interrupts have to be sent. | |
365 | * Another case where intercpu interrupts are required is when the target | |
366 | * mm might be active on another cpu (eg debuggers doing the flushes on | |
367 | * behalf of debugees, kswapd stealing pages from another process etc). | |
368 | * Kanoj 07/00. | |
369 | */ | |
370 | ||
371 | void flush_tlb_mm(struct mm_struct *mm) | |
372 | { | |
373 | preempt_disable(); | |
374 | ||
375 | if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { | |
25969354 | 376 | smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm); |
1da177e4 | 377 | } else { |
b5eb5511 RB |
378 | cpumask_t mask = cpu_online_map; |
379 | unsigned int cpu; | |
380 | ||
381 | cpu_clear(smp_processor_id(), mask); | |
382 | for_each_online_cpu(cpu) | |
383 | if (cpu_context(cpu, mm)) | |
384 | cpu_context(cpu, mm) = 0; | |
1da177e4 LT |
385 | } |
386 | local_flush_tlb_mm(mm); | |
387 | ||
388 | preempt_enable(); | |
389 | } | |
390 | ||
391 | struct flush_tlb_data { | |
392 | struct vm_area_struct *vma; | |
393 | unsigned long addr1; | |
394 | unsigned long addr2; | |
395 | }; | |
396 | ||
397 | static void flush_tlb_range_ipi(void *info) | |
398 | { | |
399 | struct flush_tlb_data *fd = (struct flush_tlb_data *)info; | |
400 | ||
401 | local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); | |
402 | } | |
403 | ||
404 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | |
405 | { | |
406 | struct mm_struct *mm = vma->vm_mm; | |
407 | ||
408 | preempt_disable(); | |
409 | if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { | |
410 | struct flush_tlb_data fd; | |
411 | ||
412 | fd.vma = vma; | |
413 | fd.addr1 = start; | |
414 | fd.addr2 = end; | |
25969354 | 415 | smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd); |
1da177e4 | 416 | } else { |
b5eb5511 RB |
417 | cpumask_t mask = cpu_online_map; |
418 | unsigned int cpu; | |
419 | ||
420 | cpu_clear(smp_processor_id(), mask); | |
421 | for_each_online_cpu(cpu) | |
422 | if (cpu_context(cpu, mm)) | |
423 | cpu_context(cpu, mm) = 0; | |
1da177e4 LT |
424 | } |
425 | local_flush_tlb_range(vma, start, end); | |
426 | preempt_enable(); | |
427 | } | |
428 | ||
429 | static void flush_tlb_kernel_range_ipi(void *info) | |
430 | { | |
431 | struct flush_tlb_data *fd = (struct flush_tlb_data *)info; | |
432 | ||
433 | local_flush_tlb_kernel_range(fd->addr1, fd->addr2); | |
434 | } | |
435 | ||
436 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |
437 | { | |
438 | struct flush_tlb_data fd; | |
439 | ||
440 | fd.addr1 = start; | |
441 | fd.addr2 = end; | |
442 | on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1); | |
443 | } | |
444 | ||
445 | static void flush_tlb_page_ipi(void *info) | |
446 | { | |
447 | struct flush_tlb_data *fd = (struct flush_tlb_data *)info; | |
448 | ||
449 | local_flush_tlb_page(fd->vma, fd->addr1); | |
450 | } | |
451 | ||
452 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |
453 | { | |
454 | preempt_disable(); | |
455 | if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { | |
456 | struct flush_tlb_data fd; | |
457 | ||
458 | fd.vma = vma; | |
459 | fd.addr1 = page; | |
25969354 | 460 | smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd); |
1da177e4 | 461 | } else { |
b5eb5511 RB |
462 | cpumask_t mask = cpu_online_map; |
463 | unsigned int cpu; | |
464 | ||
465 | cpu_clear(smp_processor_id(), mask); | |
466 | for_each_online_cpu(cpu) | |
467 | if (cpu_context(cpu, vma->vm_mm)) | |
468 | cpu_context(cpu, vma->vm_mm) = 0; | |
1da177e4 LT |
469 | } |
470 | local_flush_tlb_page(vma, page); | |
471 | preempt_enable(); | |
472 | } | |
473 | ||
474 | static void flush_tlb_one_ipi(void *info) | |
475 | { | |
476 | unsigned long vaddr = (unsigned long) info; | |
477 | ||
478 | local_flush_tlb_one(vaddr); | |
479 | } | |
480 | ||
481 | void flush_tlb_one(unsigned long vaddr) | |
482 | { | |
25969354 | 483 | smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr); |
1da177e4 LT |
484 | } |
485 | ||
486 | EXPORT_SYMBOL(flush_tlb_page); | |
487 | EXPORT_SYMBOL(flush_tlb_one); |