Merge branch 'for-4.6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[deliverable/linux.git] / arch / mips / kernel / smp.c
1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2000, 2001 Kanoj Sarcar
17 * Copyright (C) 2000, 2001 Ralf Baechle
18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
20 */
21 #include <linux/cache.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/spinlock.h>
27 #include <linux/threads.h>
28 #include <linux/module.h>
29 #include <linux/time.h>
30 #include <linux/timex.h>
31 #include <linux/sched.h>
32 #include <linux/cpumask.h>
33 #include <linux/cpu.h>
34 #include <linux/err.h>
35 #include <linux/ftrace.h>
36 #include <linux/irqdomain.h>
37 #include <linux/of.h>
38 #include <linux/of_irq.h>
39
40 #include <linux/atomic.h>
41 #include <asm/cpu.h>
42 #include <asm/processor.h>
43 #include <asm/idle.h>
44 #include <asm/r4k-timer.h>
45 #include <asm/mips-cpc.h>
46 #include <asm/mmu_context.h>
47 #include <asm/time.h>
48 #include <asm/setup.h>
49 #include <asm/maar.h>
50
51 cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
52
53 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
54 EXPORT_SYMBOL(__cpu_number_map);
55
56 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
57 EXPORT_SYMBOL(__cpu_logical_map);
58
59 /* Number of TCs (or siblings in Intel speak) per CPU core */
60 int smp_num_siblings = 1;
61 EXPORT_SYMBOL(smp_num_siblings);
62
63 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
64 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
65 EXPORT_SYMBOL(cpu_sibling_map);
66
67 /* representing the core map of multi-core chips of each logical CPU */
68 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
69 EXPORT_SYMBOL(cpu_core_map);
70
71 /*
72 * A logcal cpu mask containing only one VPE per core to
73 * reduce the number of IPIs on large MT systems.
74 */
75 cpumask_t cpu_foreign_map __read_mostly;
76 EXPORT_SYMBOL(cpu_foreign_map);
77
78 /* representing cpus for which sibling maps can be computed */
79 static cpumask_t cpu_sibling_setup_map;
80
81 /* representing cpus for which core maps can be computed */
82 static cpumask_t cpu_core_setup_map;
83
84 cpumask_t cpu_coherent_mask;
85
86 #ifdef CONFIG_GENERIC_IRQ_IPI
87 static struct irq_desc *call_desc;
88 static struct irq_desc *sched_desc;
89 #endif
90
91 static inline void set_cpu_sibling_map(int cpu)
92 {
93 int i;
94
95 cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
96
97 if (smp_num_siblings > 1) {
98 for_each_cpu(i, &cpu_sibling_setup_map) {
99 if (cpu_data[cpu].package == cpu_data[i].package &&
100 cpu_data[cpu].core == cpu_data[i].core) {
101 cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
102 cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
103 }
104 }
105 } else
106 cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
107 }
108
109 static inline void set_cpu_core_map(int cpu)
110 {
111 int i;
112
113 cpumask_set_cpu(cpu, &cpu_core_setup_map);
114
115 for_each_cpu(i, &cpu_core_setup_map) {
116 if (cpu_data[cpu].package == cpu_data[i].package) {
117 cpumask_set_cpu(i, &cpu_core_map[cpu]);
118 cpumask_set_cpu(cpu, &cpu_core_map[i]);
119 }
120 }
121 }
122
123 /*
124 * Calculate a new cpu_foreign_map mask whenever a
125 * new cpu appears or disappears.
126 */
127 static inline void calculate_cpu_foreign_map(void)
128 {
129 int i, k, core_present;
130 cpumask_t temp_foreign_map;
131
132 /* Re-calculate the mask */
133 cpumask_clear(&temp_foreign_map);
134 for_each_online_cpu(i) {
135 core_present = 0;
136 for_each_cpu(k, &temp_foreign_map)
137 if (cpu_data[i].package == cpu_data[k].package &&
138 cpu_data[i].core == cpu_data[k].core)
139 core_present = 1;
140 if (!core_present)
141 cpumask_set_cpu(i, &temp_foreign_map);
142 }
143
144 cpumask_copy(&cpu_foreign_map, &temp_foreign_map);
145 }
146
147 struct plat_smp_ops *mp_ops;
148 EXPORT_SYMBOL(mp_ops);
149
150 void register_smp_ops(struct plat_smp_ops *ops)
151 {
152 if (mp_ops)
153 printk(KERN_WARNING "Overriding previously set SMP ops\n");
154
155 mp_ops = ops;
156 }
157
158 #ifdef CONFIG_GENERIC_IRQ_IPI
159 void mips_smp_send_ipi_single(int cpu, unsigned int action)
160 {
161 mips_smp_send_ipi_mask(cpumask_of(cpu), action);
162 }
163
164 void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
165 {
166 unsigned long flags;
167 unsigned int core;
168 int cpu;
169
170 local_irq_save(flags);
171
172 switch (action) {
173 case SMP_CALL_FUNCTION:
174 __ipi_send_mask(call_desc, mask);
175 break;
176
177 case SMP_RESCHEDULE_YOURSELF:
178 __ipi_send_mask(sched_desc, mask);
179 break;
180
181 default:
182 BUG();
183 }
184
185 if (mips_cpc_present()) {
186 for_each_cpu(cpu, mask) {
187 core = cpu_data[cpu].core;
188
189 if (core == current_cpu_data.core)
190 continue;
191
192 while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
193 mips_cpc_lock_other(core);
194 write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
195 mips_cpc_unlock_other();
196 }
197 }
198 }
199
200 local_irq_restore(flags);
201 }
202
203
204 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
205 {
206 scheduler_ipi();
207
208 return IRQ_HANDLED;
209 }
210
211 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
212 {
213 generic_smp_call_function_interrupt();
214
215 return IRQ_HANDLED;
216 }
217
218 static struct irqaction irq_resched = {
219 .handler = ipi_resched_interrupt,
220 .flags = IRQF_PERCPU,
221 .name = "IPI resched"
222 };
223
224 static struct irqaction irq_call = {
225 .handler = ipi_call_interrupt,
226 .flags = IRQF_PERCPU,
227 .name = "IPI call"
228 };
229
230 static __init void smp_ipi_init_one(unsigned int virq,
231 struct irqaction *action)
232 {
233 int ret;
234
235 irq_set_handler(virq, handle_percpu_irq);
236 ret = setup_irq(virq, action);
237 BUG_ON(ret);
238 }
239
240 static int __init mips_smp_ipi_init(void)
241 {
242 unsigned int call_virq, sched_virq;
243 struct irq_domain *ipidomain;
244 struct device_node *node;
245
246 /*
247 * In some cases like qemu-malta, it is desired to try SMP with
248 * a single core. Qemu-malta has no GIC, so an attempt to set any IPIs
249 * would cause a BUG_ON() to be triggered since there's no ipidomain.
250 *
251 * Since for a single core system IPIs aren't required really, skip the
252 * initialisation which should generally keep any such configurations
253 * happy and only fail hard when trying to truely run SMP.
254 */
255 if (cpumask_weight(cpu_possible_mask) == 1)
256 return 0;
257
258 node = of_irq_find_parent(of_root);
259 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
260
261 /*
262 * Some platforms have half DT setup. So if we found irq node but
263 * didn't find an ipidomain, try to search for one that is not in the
264 * DT.
265 */
266 if (node && !ipidomain)
267 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
268
269 BUG_ON(!ipidomain);
270
271 call_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask);
272 BUG_ON(!call_virq);
273
274 sched_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask);
275 BUG_ON(!sched_virq);
276
277 if (irq_domain_is_ipi_per_cpu(ipidomain)) {
278 int cpu;
279
280 for_each_cpu(cpu, cpu_possible_mask) {
281 smp_ipi_init_one(call_virq + cpu, &irq_call);
282 smp_ipi_init_one(sched_virq + cpu, &irq_resched);
283 }
284 } else {
285 smp_ipi_init_one(call_virq, &irq_call);
286 smp_ipi_init_one(sched_virq, &irq_resched);
287 }
288
289 call_desc = irq_to_desc(call_virq);
290 sched_desc = irq_to_desc(sched_virq);
291
292 return 0;
293 }
294 early_initcall(mips_smp_ipi_init);
295 #endif
296
297 /*
298 * First C code run on the secondary CPUs after being started up by
299 * the master.
300 */
301 asmlinkage void start_secondary(void)
302 {
303 unsigned int cpu;
304
305 cpu_probe();
306 per_cpu_trap_init(false);
307 mips_clockevent_init();
308 mp_ops->init_secondary();
309 cpu_report();
310 maar_init();
311
312 /*
313 * XXX parity protection should be folded in here when it's converted
314 * to an option instead of something based on .cputype
315 */
316
317 calibrate_delay();
318 preempt_disable();
319 cpu = smp_processor_id();
320 cpu_data[cpu].udelay_val = loops_per_jiffy;
321
322 cpumask_set_cpu(cpu, &cpu_coherent_mask);
323 notify_cpu_starting(cpu);
324
325 set_cpu_online(cpu, true);
326
327 set_cpu_sibling_map(cpu);
328 set_cpu_core_map(cpu);
329
330 calculate_cpu_foreign_map();
331
332 cpumask_set_cpu(cpu, &cpu_callin_map);
333
334 synchronise_count_slave(cpu);
335
336 /*
337 * irq will be enabled in ->smp_finish(), enabling it too early
338 * is dangerous.
339 */
340 WARN_ON_ONCE(!irqs_disabled());
341 mp_ops->smp_finish();
342
343 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
344 }
345
346 static void stop_this_cpu(void *dummy)
347 {
348 /*
349 * Remove this CPU. Be a bit slow here and
350 * set the bits for every online CPU so we don't miss
351 * any IPI whilst taking this VPE down.
352 */
353
354 cpumask_copy(&cpu_foreign_map, cpu_online_mask);
355
356 /* Make it visible to every other CPU */
357 smp_mb();
358
359 set_cpu_online(smp_processor_id(), false);
360 calculate_cpu_foreign_map();
361 local_irq_disable();
362 while (1);
363 }
364
365 void smp_send_stop(void)
366 {
367 smp_call_function(stop_this_cpu, NULL, 0);
368 }
369
370 void __init smp_cpus_done(unsigned int max_cpus)
371 {
372 }
373
374 /* called from main before smp_init() */
375 void __init smp_prepare_cpus(unsigned int max_cpus)
376 {
377 init_new_context(current, &init_mm);
378 current_thread_info()->cpu = 0;
379 mp_ops->prepare_cpus(max_cpus);
380 set_cpu_sibling_map(0);
381 set_cpu_core_map(0);
382 calculate_cpu_foreign_map();
383 #ifndef CONFIG_HOTPLUG_CPU
384 init_cpu_present(cpu_possible_mask);
385 #endif
386 cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
387 }
388
389 /* preload SMP state for boot cpu */
390 void smp_prepare_boot_cpu(void)
391 {
392 set_cpu_possible(0, true);
393 set_cpu_online(0, true);
394 cpumask_set_cpu(0, &cpu_callin_map);
395 }
396
397 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
398 {
399 mp_ops->boot_secondary(cpu, tidle);
400
401 /*
402 * Trust is futile. We should really have timeouts ...
403 */
404 while (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
405 udelay(100);
406 schedule();
407 }
408
409 synchronise_count_master(cpu);
410 return 0;
411 }
412
413 /* Not really SMP stuff ... */
414 int setup_profiling_timer(unsigned int multiplier)
415 {
416 return 0;
417 }
418
419 static void flush_tlb_all_ipi(void *info)
420 {
421 local_flush_tlb_all();
422 }
423
424 void flush_tlb_all(void)
425 {
426 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
427 }
428
429 static void flush_tlb_mm_ipi(void *mm)
430 {
431 local_flush_tlb_mm((struct mm_struct *)mm);
432 }
433
434 /*
435 * Special Variant of smp_call_function for use by TLB functions:
436 *
437 * o No return value
438 * o collapses to normal function call on UP kernels
439 * o collapses to normal function call on systems with a single shared
440 * primary cache.
441 */
442 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
443 {
444 smp_call_function(func, info, 1);
445 }
446
447 static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
448 {
449 preempt_disable();
450
451 smp_on_other_tlbs(func, info);
452 func(info);
453
454 preempt_enable();
455 }
456
457 /*
458 * The following tlb flush calls are invoked when old translations are
459 * being torn down, or pte attributes are changing. For single threaded
460 * address spaces, a new context is obtained on the current cpu, and tlb
461 * context on other cpus are invalidated to force a new context allocation
462 * at switch_mm time, should the mm ever be used on other cpus. For
463 * multithreaded address spaces, intercpu interrupts have to be sent.
464 * Another case where intercpu interrupts are required is when the target
465 * mm might be active on another cpu (eg debuggers doing the flushes on
466 * behalf of debugees, kswapd stealing pages from another process etc).
467 * Kanoj 07/00.
468 */
469
470 void flush_tlb_mm(struct mm_struct *mm)
471 {
472 preempt_disable();
473
474 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
475 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
476 } else {
477 unsigned int cpu;
478
479 for_each_online_cpu(cpu) {
480 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
481 cpu_context(cpu, mm) = 0;
482 }
483 }
484 local_flush_tlb_mm(mm);
485
486 preempt_enable();
487 }
488
489 struct flush_tlb_data {
490 struct vm_area_struct *vma;
491 unsigned long addr1;
492 unsigned long addr2;
493 };
494
495 static void flush_tlb_range_ipi(void *info)
496 {
497 struct flush_tlb_data *fd = info;
498
499 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
500 }
501
502 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
503 {
504 struct mm_struct *mm = vma->vm_mm;
505
506 preempt_disable();
507 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
508 struct flush_tlb_data fd = {
509 .vma = vma,
510 .addr1 = start,
511 .addr2 = end,
512 };
513
514 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
515 } else {
516 unsigned int cpu;
517
518 for_each_online_cpu(cpu) {
519 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
520 cpu_context(cpu, mm) = 0;
521 }
522 }
523 local_flush_tlb_range(vma, start, end);
524 preempt_enable();
525 }
526
527 static void flush_tlb_kernel_range_ipi(void *info)
528 {
529 struct flush_tlb_data *fd = info;
530
531 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
532 }
533
534 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
535 {
536 struct flush_tlb_data fd = {
537 .addr1 = start,
538 .addr2 = end,
539 };
540
541 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
542 }
543
544 static void flush_tlb_page_ipi(void *info)
545 {
546 struct flush_tlb_data *fd = info;
547
548 local_flush_tlb_page(fd->vma, fd->addr1);
549 }
550
551 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
552 {
553 preempt_disable();
554 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
555 struct flush_tlb_data fd = {
556 .vma = vma,
557 .addr1 = page,
558 };
559
560 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
561 } else {
562 unsigned int cpu;
563
564 for_each_online_cpu(cpu) {
565 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
566 cpu_context(cpu, vma->vm_mm) = 0;
567 }
568 }
569 local_flush_tlb_page(vma, page);
570 preempt_enable();
571 }
572
573 static void flush_tlb_one_ipi(void *info)
574 {
575 unsigned long vaddr = (unsigned long) info;
576
577 local_flush_tlb_one(vaddr);
578 }
579
580 void flush_tlb_one(unsigned long vaddr)
581 {
582 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
583 }
584
585 EXPORT_SYMBOL(flush_tlb_page);
586 EXPORT_SYMBOL(flush_tlb_one);
587
588 #if defined(CONFIG_KEXEC)
589 void (*dump_ipi_function_ptr)(void *) = NULL;
590 void dump_send_ipi(void (*dump_ipi_callback)(void *))
591 {
592 int i;
593 int cpu = smp_processor_id();
594
595 dump_ipi_function_ptr = dump_ipi_callback;
596 smp_mb();
597 for_each_online_cpu(i)
598 if (i != cpu)
599 mp_ops->send_ipi_single(i, SMP_DUMP);
600
601 }
602 EXPORT_SYMBOL(dump_send_ipi);
603 #endif
604
605 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
606
607 static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
608 static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd);
609
610 void tick_broadcast(const struct cpumask *mask)
611 {
612 atomic_t *count;
613 struct call_single_data *csd;
614 int cpu;
615
616 for_each_cpu(cpu, mask) {
617 count = &per_cpu(tick_broadcast_count, cpu);
618 csd = &per_cpu(tick_broadcast_csd, cpu);
619
620 if (atomic_inc_return(count) == 1)
621 smp_call_function_single_async(cpu, csd);
622 }
623 }
624
625 static void tick_broadcast_callee(void *info)
626 {
627 int cpu = smp_processor_id();
628 tick_receive_broadcast();
629 atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
630 }
631
632 static int __init tick_broadcast_init(void)
633 {
634 struct call_single_data *csd;
635 int cpu;
636
637 for (cpu = 0; cpu < NR_CPUS; cpu++) {
638 csd = &per_cpu(tick_broadcast_csd, cpu);
639 csd->func = tick_broadcast_callee;
640 }
641
642 return 0;
643 }
644 early_initcall(tick_broadcast_init);
645
646 #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */
This page took 0.04251 seconds and 5 git commands to generate.