Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux...
[deliverable/linux.git] / arch / arm / kernel / smp.c
1 /*
2 * linux/arch/arm/kernel/smp.c
3 *
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/cache.h>
17 #include <linux/profile.h>
18 #include <linux/errno.h>
19 #include <linux/mm.h>
20 #include <linux/err.h>
21 #include <linux/cpu.h>
22 #include <linux/seq_file.h>
23 #include <linux/irq.h>
24 #include <linux/percpu.h>
25 #include <linux/clockchips.h>
26 #include <linux/completion.h>
27 #include <linux/cpufreq.h>
28 #include <linux/irq_work.h>
29
30 #include <linux/atomic.h>
31 #include <asm/smp.h>
32 #include <asm/cacheflush.h>
33 #include <asm/cpu.h>
34 #include <asm/cputype.h>
35 #include <asm/exception.h>
36 #include <asm/idmap.h>
37 #include <asm/topology.h>
38 #include <asm/mmu_context.h>
39 #include <asm/pgtable.h>
40 #include <asm/pgalloc.h>
41 #include <asm/processor.h>
42 #include <asm/sections.h>
43 #include <asm/tlbflush.h>
44 #include <asm/ptrace.h>
45 #include <asm/smp_plat.h>
46 #include <asm/virt.h>
47 #include <asm/mach/arch.h>
48 #include <asm/mpu.h>
49
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/ipi.h>
52
53 /*
54 * as from 2.5, kernels no longer have an init_tasks structure
55 * so we need some other way of telling a new secondary core
56 * where to place its SVC stack
57 */
58 struct secondary_data secondary_data;
59
60 /*
61 * control for which core is the next to come out of the secondary
62 * boot "holding pen"
63 */
64 volatile int pen_release = -1;
65
66 enum ipi_msg_type {
67 IPI_WAKEUP,
68 IPI_TIMER,
69 IPI_RESCHEDULE,
70 IPI_CALL_FUNC,
71 IPI_CALL_FUNC_SINGLE,
72 IPI_CPU_STOP,
73 IPI_IRQ_WORK,
74 IPI_COMPLETION,
75 };
76
77 static DECLARE_COMPLETION(cpu_running);
78
79 static struct smp_operations smp_ops;
80
81 void __init smp_set_ops(struct smp_operations *ops)
82 {
83 if (ops)
84 smp_ops = *ops;
85 };
86
87 static unsigned long get_arch_pgd(pgd_t *pgd)
88 {
89 phys_addr_t pgdir = virt_to_idmap(pgd);
90 BUG_ON(pgdir & ARCH_PGD_MASK);
91 return pgdir >> ARCH_PGD_SHIFT;
92 }
93
94 int __cpu_up(unsigned int cpu, struct task_struct *idle)
95 {
96 int ret;
97
98 /*
99 * We need to tell the secondary core where to find
100 * its stack and the page tables.
101 */
102 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
103 #ifdef CONFIG_ARM_MPU
104 secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr;
105 #endif
106
107 #ifdef CONFIG_MMU
108 secondary_data.pgdir = get_arch_pgd(idmap_pgd);
109 secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
110 #endif
111 sync_cache_w(&secondary_data);
112
113 /*
114 * Now bring the CPU into our world.
115 */
116 ret = boot_secondary(cpu, idle);
117 if (ret == 0) {
118 /*
119 * CPU was successfully started, wait for it
120 * to come online or time out.
121 */
122 wait_for_completion_timeout(&cpu_running,
123 msecs_to_jiffies(1000));
124
125 if (!cpu_online(cpu)) {
126 pr_crit("CPU%u: failed to come online\n", cpu);
127 ret = -EIO;
128 }
129 } else {
130 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
131 }
132
133
134 memset(&secondary_data, 0, sizeof(secondary_data));
135 return ret;
136 }
137
138 /* platform specific SMP operations */
139 void __init smp_init_cpus(void)
140 {
141 if (smp_ops.smp_init_cpus)
142 smp_ops.smp_init_cpus();
143 }
144
145 int boot_secondary(unsigned int cpu, struct task_struct *idle)
146 {
147 if (smp_ops.smp_boot_secondary)
148 return smp_ops.smp_boot_secondary(cpu, idle);
149 return -ENOSYS;
150 }
151
152 int platform_can_cpu_hotplug(void)
153 {
154 #ifdef CONFIG_HOTPLUG_CPU
155 if (smp_ops.cpu_kill)
156 return 1;
157 #endif
158
159 return 0;
160 }
161
162 #ifdef CONFIG_HOTPLUG_CPU
163 static int platform_cpu_kill(unsigned int cpu)
164 {
165 if (smp_ops.cpu_kill)
166 return smp_ops.cpu_kill(cpu);
167 return 1;
168 }
169
170 static int platform_cpu_disable(unsigned int cpu)
171 {
172 if (smp_ops.cpu_disable)
173 return smp_ops.cpu_disable(cpu);
174
175 /*
176 * By default, allow disabling all CPUs except the first one,
177 * since this is special on a lot of platforms, e.g. because
178 * of clock tick interrupts.
179 */
180 return cpu == 0 ? -EPERM : 0;
181 }
182 /*
183 * __cpu_disable runs on the processor to be shutdown.
184 */
185 int __cpu_disable(void)
186 {
187 unsigned int cpu = smp_processor_id();
188 int ret;
189
190 ret = platform_cpu_disable(cpu);
191 if (ret)
192 return ret;
193
194 /*
195 * Take this CPU offline. Once we clear this, we can't return,
196 * and we must not schedule until we're ready to give up the cpu.
197 */
198 set_cpu_online(cpu, false);
199
200 /*
201 * OK - migrate IRQs away from this CPU
202 */
203 migrate_irqs();
204
205 /*
206 * Flush user cache and TLB mappings, and then remove this CPU
207 * from the vm mask set of all processes.
208 *
209 * Caches are flushed to the Level of Unification Inner Shareable
210 * to write-back dirty lines to unified caches shared by all CPUs.
211 */
212 flush_cache_louis();
213 local_flush_tlb_all();
214
215 clear_tasks_mm_cpumask(cpu);
216
217 return 0;
218 }
219
220 static DECLARE_COMPLETION(cpu_died);
221
222 /*
223 * called on the thread which is asking for a CPU to be shutdown -
224 * waits until shutdown has completed, or it is timed out.
225 */
226 void __cpu_die(unsigned int cpu)
227 {
228 if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
229 pr_err("CPU%u: cpu didn't die\n", cpu);
230 return;
231 }
232 printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
233
234 /*
235 * platform_cpu_kill() is generally expected to do the powering off
236 * and/or cutting of clocks to the dying CPU. Optionally, this may
237 * be done by the CPU which is dying in preference to supporting
238 * this call, but that means there is _no_ synchronisation between
239 * the requesting CPU and the dying CPU actually losing power.
240 */
241 if (!platform_cpu_kill(cpu))
242 printk("CPU%u: unable to kill\n", cpu);
243 }
244
245 /*
246 * Called from the idle thread for the CPU which has been shutdown.
247 *
248 * Note that we disable IRQs here, but do not re-enable them
249 * before returning to the caller. This is also the behaviour
250 * of the other hotplug-cpu capable cores, so presumably coming
251 * out of idle fixes this.
252 */
253 void __ref cpu_die(void)
254 {
255 unsigned int cpu = smp_processor_id();
256
257 idle_task_exit();
258
259 local_irq_disable();
260
261 /*
262 * Flush the data out of the L1 cache for this CPU. This must be
263 * before the completion to ensure that data is safely written out
264 * before platform_cpu_kill() gets called - which may disable
265 * *this* CPU and power down its cache.
266 */
267 flush_cache_louis();
268
269 /*
270 * Tell __cpu_die() that this CPU is now safe to dispose of. Once
271 * this returns, power and/or clocks can be removed at any point
272 * from this CPU and its cache by platform_cpu_kill().
273 */
274 complete(&cpu_died);
275
276 /*
277 * Ensure that the cache lines associated with that completion are
278 * written out. This covers the case where _this_ CPU is doing the
279 * powering down, to ensure that the completion is visible to the
280 * CPU waiting for this one.
281 */
282 flush_cache_louis();
283
284 /*
285 * The actual CPU shutdown procedure is at least platform (if not
286 * CPU) specific. This may remove power, or it may simply spin.
287 *
288 * Platforms are generally expected *NOT* to return from this call,
289 * although there are some which do because they have no way to
290 * power down the CPU. These platforms are the _only_ reason we
291 * have a return path which uses the fragment of assembly below.
292 *
293 * The return path should not be used for platforms which can
294 * power off the CPU.
295 */
296 if (smp_ops.cpu_die)
297 smp_ops.cpu_die(cpu);
298
299 pr_warn("CPU%u: smp_ops.cpu_die() returned, trying to resuscitate\n",
300 cpu);
301
302 /*
303 * Do not return to the idle loop - jump back to the secondary
304 * cpu initialisation. There's some initialisation which needs
305 * to be repeated to undo the effects of taking the CPU offline.
306 */
307 __asm__("mov sp, %0\n"
308 " mov fp, #0\n"
309 " b secondary_start_kernel"
310 :
311 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
312 }
313 #endif /* CONFIG_HOTPLUG_CPU */
314
315 /*
316 * Called by both boot and secondaries to move global data into
317 * per-processor storage.
318 */
319 static void smp_store_cpu_info(unsigned int cpuid)
320 {
321 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
322
323 cpu_info->loops_per_jiffy = loops_per_jiffy;
324 cpu_info->cpuid = read_cpuid_id();
325
326 store_cpu_topology(cpuid);
327 }
328
329 /*
330 * This is the secondary CPU boot entry. We're using this CPUs
331 * idle thread stack, but a set of temporary page tables.
332 */
333 asmlinkage void secondary_start_kernel(void)
334 {
335 struct mm_struct *mm = &init_mm;
336 unsigned int cpu;
337
338 /*
339 * The identity mapping is uncached (strongly ordered), so
340 * switch away from it before attempting any exclusive accesses.
341 */
342 cpu_switch_mm(mm->pgd, mm);
343 local_flush_bp_all();
344 enter_lazy_tlb(mm, current);
345 local_flush_tlb_all();
346
347 /*
348 * All kernel threads share the same mm context; grab a
349 * reference and switch to it.
350 */
351 cpu = smp_processor_id();
352 atomic_inc(&mm->mm_count);
353 current->active_mm = mm;
354 cpumask_set_cpu(cpu, mm_cpumask(mm));
355
356 cpu_init();
357
358 printk("CPU%u: Booted secondary processor\n", cpu);
359
360 preempt_disable();
361 trace_hardirqs_off();
362
363 /*
364 * Give the platform a chance to do its own initialisation.
365 */
366 if (smp_ops.smp_secondary_init)
367 smp_ops.smp_secondary_init(cpu);
368
369 notify_cpu_starting(cpu);
370
371 calibrate_delay();
372
373 smp_store_cpu_info(cpu);
374
375 /*
376 * OK, now it's safe to let the boot CPU continue. Wait for
377 * the CPU migration code to notice that the CPU is online
378 * before we continue - which happens after __cpu_up returns.
379 */
380 set_cpu_online(cpu, true);
381 complete(&cpu_running);
382
383 local_irq_enable();
384 local_fiq_enable();
385
386 /*
387 * OK, it's off to the idle thread for us
388 */
389 cpu_startup_entry(CPUHP_ONLINE);
390 }
391
392 void __init smp_cpus_done(unsigned int max_cpus)
393 {
394 printk(KERN_INFO "SMP: Total of %d processors activated.\n",
395 num_online_cpus());
396
397 hyp_mode_check();
398 }
399
400 void __init smp_prepare_boot_cpu(void)
401 {
402 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
403 }
404
405 void __init smp_prepare_cpus(unsigned int max_cpus)
406 {
407 unsigned int ncores = num_possible_cpus();
408
409 init_cpu_topology();
410
411 smp_store_cpu_info(smp_processor_id());
412
413 /*
414 * are we trying to boot more cores than exist?
415 */
416 if (max_cpus > ncores)
417 max_cpus = ncores;
418 if (ncores > 1 && max_cpus) {
419 /*
420 * Initialise the present map, which describes the set of CPUs
421 * actually populated at the present time. A platform should
422 * re-initialize the map in the platforms smp_prepare_cpus()
423 * if present != possible (e.g. physical hotplug).
424 */
425 init_cpu_present(cpu_possible_mask);
426
427 /*
428 * Initialise the SCU if there are more than one CPU
429 * and let them know where to start.
430 */
431 if (smp_ops.smp_prepare_cpus)
432 smp_ops.smp_prepare_cpus(max_cpus);
433 }
434 }
435
436 static void (*__smp_cross_call)(const struct cpumask *, unsigned int);
437
438 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
439 {
440 if (!__smp_cross_call)
441 __smp_cross_call = fn;
442 }
443
444 static const char *ipi_types[NR_IPI] __tracepoint_string = {
445 #define S(x,s) [x] = s
446 S(IPI_WAKEUP, "CPU wakeup interrupts"),
447 S(IPI_TIMER, "Timer broadcast interrupts"),
448 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
449 S(IPI_CALL_FUNC, "Function call interrupts"),
450 S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
451 S(IPI_CPU_STOP, "CPU stop interrupts"),
452 S(IPI_IRQ_WORK, "IRQ work interrupts"),
453 S(IPI_COMPLETION, "completion interrupts"),
454 };
455
456 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
457 {
458 trace_ipi_raise(target, ipi_types[ipinr]);
459 __smp_cross_call(target, ipinr);
460 }
461
462 void show_ipi_list(struct seq_file *p, int prec)
463 {
464 unsigned int cpu, i;
465
466 for (i = 0; i < NR_IPI; i++) {
467 seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
468
469 for_each_online_cpu(cpu)
470 seq_printf(p, "%10u ",
471 __get_irq_stat(cpu, ipi_irqs[i]));
472
473 seq_printf(p, " %s\n", ipi_types[i]);
474 }
475 }
476
477 u64 smp_irq_stat_cpu(unsigned int cpu)
478 {
479 u64 sum = 0;
480 int i;
481
482 for (i = 0; i < NR_IPI; i++)
483 sum += __get_irq_stat(cpu, ipi_irqs[i]);
484
485 return sum;
486 }
487
488 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
489 {
490 smp_cross_call(mask, IPI_CALL_FUNC);
491 }
492
493 void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
494 {
495 smp_cross_call(mask, IPI_WAKEUP);
496 }
497
498 void arch_send_call_function_single_ipi(int cpu)
499 {
500 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
501 }
502
503 #ifdef CONFIG_IRQ_WORK
504 void arch_irq_work_raise(void)
505 {
506 if (is_smp())
507 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
508 }
509 #endif
510
511 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
512 void tick_broadcast(const struct cpumask *mask)
513 {
514 smp_cross_call(mask, IPI_TIMER);
515 }
516 #endif
517
518 static DEFINE_RAW_SPINLOCK(stop_lock);
519
520 /*
521 * ipi_cpu_stop - handle IPI from smp_send_stop()
522 */
523 static void ipi_cpu_stop(unsigned int cpu)
524 {
525 if (system_state == SYSTEM_BOOTING ||
526 system_state == SYSTEM_RUNNING) {
527 raw_spin_lock(&stop_lock);
528 printk(KERN_CRIT "CPU%u: stopping\n", cpu);
529 dump_stack();
530 raw_spin_unlock(&stop_lock);
531 }
532
533 set_cpu_online(cpu, false);
534
535 local_fiq_disable();
536 local_irq_disable();
537
538 while (1)
539 cpu_relax();
540 }
541
542 static DEFINE_PER_CPU(struct completion *, cpu_completion);
543
544 int register_ipi_completion(struct completion *completion, int cpu)
545 {
546 per_cpu(cpu_completion, cpu) = completion;
547 return IPI_COMPLETION;
548 }
549
550 static void ipi_complete(unsigned int cpu)
551 {
552 complete(per_cpu(cpu_completion, cpu));
553 }
554
555 /*
556 * Main handler for inter-processor interrupts
557 */
558 asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
559 {
560 handle_IPI(ipinr, regs);
561 }
562
563 void handle_IPI(int ipinr, struct pt_regs *regs)
564 {
565 unsigned int cpu = smp_processor_id();
566 struct pt_regs *old_regs = set_irq_regs(regs);
567
568 if ((unsigned)ipinr < NR_IPI) {
569 trace_ipi_entry(ipi_types[ipinr]);
570 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
571 }
572
573 switch (ipinr) {
574 case IPI_WAKEUP:
575 break;
576
577 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
578 case IPI_TIMER:
579 irq_enter();
580 tick_receive_broadcast();
581 irq_exit();
582 break;
583 #endif
584
585 case IPI_RESCHEDULE:
586 scheduler_ipi();
587 break;
588
589 case IPI_CALL_FUNC:
590 irq_enter();
591 generic_smp_call_function_interrupt();
592 irq_exit();
593 break;
594
595 case IPI_CALL_FUNC_SINGLE:
596 irq_enter();
597 generic_smp_call_function_single_interrupt();
598 irq_exit();
599 break;
600
601 case IPI_CPU_STOP:
602 irq_enter();
603 ipi_cpu_stop(cpu);
604 irq_exit();
605 break;
606
607 #ifdef CONFIG_IRQ_WORK
608 case IPI_IRQ_WORK:
609 irq_enter();
610 irq_work_run();
611 irq_exit();
612 break;
613 #endif
614
615 case IPI_COMPLETION:
616 irq_enter();
617 ipi_complete(cpu);
618 irq_exit();
619 break;
620
621 default:
622 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
623 cpu, ipinr);
624 break;
625 }
626
627 if ((unsigned)ipinr < NR_IPI)
628 trace_ipi_exit(ipi_types[ipinr]);
629 set_irq_regs(old_regs);
630 }
631
632 void smp_send_reschedule(int cpu)
633 {
634 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
635 }
636
637 void smp_send_stop(void)
638 {
639 unsigned long timeout;
640 struct cpumask mask;
641
642 cpumask_copy(&mask, cpu_online_mask);
643 cpumask_clear_cpu(smp_processor_id(), &mask);
644 if (!cpumask_empty(&mask))
645 smp_cross_call(&mask, IPI_CPU_STOP);
646
647 /* Wait up to one second for other CPUs to stop */
648 timeout = USEC_PER_SEC;
649 while (num_online_cpus() > 1 && timeout--)
650 udelay(1);
651
652 if (num_online_cpus() > 1)
653 pr_warning("SMP: failed to stop secondary CPUs\n");
654 }
655
656 /*
657 * not supported here
658 */
659 int setup_profiling_timer(unsigned int multiplier)
660 {
661 return -EINVAL;
662 }
663
664 #ifdef CONFIG_CPU_FREQ
665
666 static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
667 static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
668 static unsigned long global_l_p_j_ref;
669 static unsigned long global_l_p_j_ref_freq;
670
671 static int cpufreq_callback(struct notifier_block *nb,
672 unsigned long val, void *data)
673 {
674 struct cpufreq_freqs *freq = data;
675 int cpu = freq->cpu;
676
677 if (freq->flags & CPUFREQ_CONST_LOOPS)
678 return NOTIFY_OK;
679
680 if (!per_cpu(l_p_j_ref, cpu)) {
681 per_cpu(l_p_j_ref, cpu) =
682 per_cpu(cpu_data, cpu).loops_per_jiffy;
683 per_cpu(l_p_j_ref_freq, cpu) = freq->old;
684 if (!global_l_p_j_ref) {
685 global_l_p_j_ref = loops_per_jiffy;
686 global_l_p_j_ref_freq = freq->old;
687 }
688 }
689
690 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
691 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
692 loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
693 global_l_p_j_ref_freq,
694 freq->new);
695 per_cpu(cpu_data, cpu).loops_per_jiffy =
696 cpufreq_scale(per_cpu(l_p_j_ref, cpu),
697 per_cpu(l_p_j_ref_freq, cpu),
698 freq->new);
699 }
700 return NOTIFY_OK;
701 }
702
703 static struct notifier_block cpufreq_notifier = {
704 .notifier_call = cpufreq_callback,
705 };
706
707 static int __init register_cpufreq_notifier(void)
708 {
709 return cpufreq_register_notifier(&cpufreq_notifier,
710 CPUFREQ_TRANSITION_NOTIFIER);
711 }
712 core_initcall(register_cpufreq_notifier);
713
714 #endif
This page took 0.064605 seconds and 6 git commands to generate.