MIPS: OCTEON: support disabling HOTPLUG_CPU run-time
[deliverable/linux.git] / arch / mips / cavium-octeon / smp.c
CommitLineData
5b3b1688
DD
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
edfcbb8c 6 * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks
5b3b1688 7 */
773cb77d 8#include <linux/cpu.h>
5b3b1688
DD
9#include <linux/delay.h>
10#include <linux/smp.h>
11#include <linux/interrupt.h>
12#include <linux/kernel_stat.h>
13#include <linux/sched.h>
14#include <linux/module.h>
15
16#include <asm/mmu_context.h>
5b3b1688 17#include <asm/time.h>
b81947c6 18#include <asm/setup.h>
5b3b1688
DD
19
20#include <asm/octeon/octeon.h>
21
773cb77d
RB
22#include "octeon_boot.h"
23
5b3b1688
DD
24volatile unsigned long octeon_processor_boot = 0xff;
25volatile unsigned long octeon_processor_sp;
26volatile unsigned long octeon_processor_gp;
27
773cb77d 28#ifdef CONFIG_HOTPLUG_CPU
babba4f1
DD
29uint64_t octeon_bootloader_entry_addr;
30EXPORT_SYMBOL(octeon_bootloader_entry_addr);
773cb77d
RB
31#endif
32
5b3b1688
DD
33static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
34{
35 const int coreid = cvmx_get_core_num();
36 uint64_t action;
37
38 /* Load the mailbox register to figure out what we're supposed to do */
e650ce0f 39 action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)) & 0xffff;
5b3b1688
DD
40
41 /* Clear the mailbox to clear the interrupt */
42 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
43
44 if (action & SMP_CALL_FUNCTION)
45 smp_call_function_interrupt();
184748cc
PZ
46 if (action & SMP_RESCHEDULE_YOURSELF)
47 scheduler_ipi();
5b3b1688
DD
48
49 /* Check if we've been told to flush the icache */
50 if (action & SMP_ICACHE_FLUSH)
51 asm volatile ("synci 0($0)\n");
52 return IRQ_HANDLED;
53}
54
55/**
56 * Cause the function described by call_data to be executed on the passed
70342287 57 * cpu. When the function has finished, increment the finished field of
5b3b1688
DD
58 * call_data.
59 */
60void octeon_send_ipi_single(int cpu, unsigned int action)
61{
62 int coreid = cpu_logical_map(cpu);
63 /*
64 pr_info("SMP: Mailbox send cpu=%d, coreid=%d, action=%u\n", cpu,
65 coreid, action);
66 */
67 cvmx_write_csr(CVMX_CIU_MBOX_SETX(coreid), action);
68}
69
067f3290
DD
70static inline void octeon_send_ipi_mask(const struct cpumask *mask,
71 unsigned int action)
5b3b1688
DD
72{
73 unsigned int i;
74
067f3290 75 for_each_cpu_mask(i, *mask)
5b3b1688
DD
76 octeon_send_ipi_single(i, action);
77}
78
79/**
5f054e31 80 * Detect available CPUs, populate cpu_possible_mask
5b3b1688 81 */
773cb77d
RB
82static void octeon_smp_hotplug_setup(void)
83{
84#ifdef CONFIG_HOTPLUG_CPU
babba4f1
DD
85 struct linux_app_boot_info *labi;
86
5ca0e377
AK
87 if (!setup_max_cpus)
88 return;
89
babba4f1
DD
90 labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
91 if (labi->labi_signature != LABI_SIGNATURE)
92 panic("The bootloader version on this board is incorrect.");
93
94 octeon_bootloader_entry_addr = labi->InitTLBStart_addr;
773cb77d
RB
95#endif
96}
97
5b3b1688
DD
98static void octeon_smp_setup(void)
99{
100 const int coreid = cvmx_get_core_num();
101 int cpus;
102 int id;
5b3b1688 103 int core_mask = octeon_get_boot_coremask();
edfcbb8c
DD
104#ifdef CONFIG_HOTPLUG_CPU
105 unsigned int num_cores = cvmx_octeon_num_cores();
106#endif
107
108 /* The present CPUs are initially just the boot cpu (CPU 0). */
109 for (id = 0; id < NR_CPUS; id++) {
110 set_cpu_possible(id, id == 0);
111 set_cpu_present(id, id == 0);
112 }
5b3b1688 113
5b3b1688
DD
114 __cpu_number_map[coreid] = 0;
115 __cpu_logical_map[0] = coreid;
5b3b1688 116
edfcbb8c 117 /* The present CPUs get the lowest CPU numbers. */
5b3b1688 118 cpus = 1;
edfcbb8c 119 for (id = 0; id < NR_CPUS; id++) {
5b3b1688 120 if ((id != coreid) && (core_mask & (1 << id))) {
edfcbb8c
DD
121 set_cpu_possible(cpus, true);
122 set_cpu_present(cpus, true);
123 __cpu_number_map[id] = cpus;
124 __cpu_logical_map[cpus] = id;
125 cpus++;
126 }
127 }
128
129#ifdef CONFIG_HOTPLUG_CPU
130 /*
70342287
RB
131 * The possible CPUs are all those present on the chip. We
132 * will assign CPU numbers for possible cores as well. Cores
edfcbb8c
DD
133 * are always consecutively numberd from 0.
134 */
5ca0e377 135 for (id = 0; setup_max_cpus && id < num_cores && id < NR_CPUS; id++) {
edfcbb8c
DD
136 if (!(core_mask & (1 << id))) {
137 set_cpu_possible(cpus, true);
5b3b1688
DD
138 __cpu_number_map[id] = cpus;
139 __cpu_logical_map[cpus] = id;
140 cpus++;
141 }
142 }
edfcbb8c 143#endif
773cb77d
RB
144
145 octeon_smp_hotplug_setup();
5b3b1688
DD
146}
147
148/**
149 * Firmware CPU startup hook
150 *
151 */
152static void octeon_boot_secondary(int cpu, struct task_struct *idle)
153{
154 int count;
155
156 pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu,
157 cpu_logical_map(cpu));
158
159 octeon_processor_sp = __KSTK_TOS(idle);
160 octeon_processor_gp = (unsigned long)(task_thread_info(idle));
161 octeon_processor_boot = cpu_logical_map(cpu);
162 mb();
163
164 count = 10000;
165 while (octeon_processor_sp && count) {
166 /* Waiting for processor to get the SP and GP */
167 udelay(1);
168 count--;
169 }
170 if (count == 0)
171 pr_err("Secondary boot timeout\n");
172}
173
174/**
175 * After we've done initial boot, this function is called to allow the
176 * board code to clean up state, if needed
177 */
078a55fc 178static void octeon_init_secondary(void)
5b3b1688 179{
babba4f1 180 unsigned int sr;
5b3b1688 181
babba4f1
DD
182 sr = set_c0_status(ST0_BEV);
183 write_c0_ebase((u32)ebase);
184 write_c0_status(sr);
185
5b3b1688
DD
186 octeon_check_cpu_bist();
187 octeon_init_cvmcount();
0c326387
DD
188
189 octeon_irq_setup_secondary();
5b3b1688
DD
190}
191
192/**
193 * Callout to firmware before smp_init
194 *
195 */
196void octeon_prepare_cpus(unsigned int max_cpus)
197{
e650ce0f
DD
198 /*
199 * Only the low order mailbox bits are used for IPIs, leave
200 * the other bits alone.
201 */
202 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff);
e63fb7a9
VS
203 if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt,
204 IRQF_PERCPU | IRQF_NO_THREAD, "SMP-IPI",
205 mailbox_interrupt)) {
ab75dc02 206 panic("Cannot request_irq(OCTEON_IRQ_MBOX0)");
5b3b1688 207 }
5b3b1688
DD
208}
209
210/**
211 * Last chance for the board code to finish SMP initialization before
212 * the CPU is "online".
213 */
214static void octeon_smp_finish(void)
215{
5b3b1688
DD
216 octeon_user_io_init();
217
218 /* to generate the first CPU timer interrupt */
219 write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
1bcfecc0 220 local_irq_enable();
5b3b1688
DD
221}
222
773cb77d
RB
223#ifdef CONFIG_HOTPLUG_CPU
224
225/* State of each CPU. */
226DEFINE_PER_CPU(int, cpu_state);
227
773cb77d
RB
228static int octeon_cpu_disable(void)
229{
230 unsigned int cpu = smp_processor_id();
231
232 if (cpu == 0)
233 return -EBUSY;
234
0b5f9c00 235 set_cpu_online(cpu, false);
773cb77d
RB
236 cpu_clear(cpu, cpu_callin_map);
237 local_irq_disable();
17efb59a 238 octeon_fixup_irqs();
773cb77d
RB
239 local_irq_enable();
240
241 flush_cache_all();
242 local_flush_tlb_all();
243
773cb77d
RB
244 return 0;
245}
246
247static void octeon_cpu_die(unsigned int cpu)
248{
249 int coreid = cpu_logical_map(cpu);
babba4f1
DD
250 uint32_t mask, new_mask;
251 const struct cvmx_bootmem_named_block_desc *block_desc;
773cb77d 252
773cb77d
RB
253 while (per_cpu(cpu_state, cpu) != CPU_DEAD)
254 cpu_relax();
255
256 /*
257 * This is a bit complicated strategics of getting/settig available
258 * cores mask, copied from bootloader
259 */
babba4f1
DD
260
261 mask = 1 << coreid;
773cb77d
RB
262 /* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */
263 block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
264
265 if (!block_desc) {
babba4f1 266 struct linux_app_boot_info *labi;
773cb77d 267
babba4f1 268 labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
773cb77d 269
babba4f1
DD
270 labi->avail_coremask |= mask;
271 new_mask = labi->avail_coremask;
272 } else { /* alternative, already initialized */
273 uint32_t *p = (uint32_t *)PHYS_TO_XKSEG_CACHED(block_desc->base_addr +
274 AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
275 *p |= mask;
276 new_mask = *p;
773cb77d
RB
277 }
278
babba4f1
DD
279 pr_info("Reset core %d. Available Coremask = 0x%x \n", coreid, new_mask);
280 mb();
773cb77d
RB
281 cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
282 cvmx_write_csr(CVMX_CIU_PP_RST, 0);
283}
284
285void play_dead(void)
286{
babba4f1 287 int cpu = cpu_number_map(cvmx_get_core_num());
773cb77d
RB
288
289 idle_task_exit();
290 octeon_processor_boot = 0xff;
babba4f1
DD
291 per_cpu(cpu_state, cpu) = CPU_DEAD;
292
293 mb();
773cb77d
RB
294
295 while (1) /* core will be reset here */
296 ;
297}
298
299extern void kernel_entry(unsigned long arg1, ...);
300
301static void start_after_reset(void)
302{
70342287 303 kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */
773cb77d
RB
304}
305
babba4f1 306static int octeon_update_boot_vector(unsigned int cpu)
773cb77d
RB
307{
308
309 int coreid = cpu_logical_map(cpu);
babba4f1
DD
310 uint32_t avail_coremask;
311 const struct cvmx_bootmem_named_block_desc *block_desc;
773cb77d 312 struct boot_init_vector *boot_vect =
babba4f1 313 (struct boot_init_vector *)PHYS_TO_XKSEG_CACHED(BOOTLOADER_BOOT_VECTOR);
773cb77d
RB
314
315 block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
316
317 if (!block_desc) {
babba4f1
DD
318 struct linux_app_boot_info *labi;
319
320 labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
321
322 avail_coremask = labi->avail_coremask;
323 labi->avail_coremask &= ~(1 << coreid);
773cb77d 324 } else { /* alternative, already initialized */
babba4f1
DD
325 avail_coremask = *(uint32_t *)PHYS_TO_XKSEG_CACHED(
326 block_desc->base_addr + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
773cb77d
RB
327 }
328
329 if (!(avail_coremask & (1 << coreid))) {
330 /* core not available, assume, that catched by simple-executive */
331 cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
332 cvmx_write_csr(CVMX_CIU_PP_RST, 0);
333 }
334
335 boot_vect[coreid].app_start_func_addr =
336 (uint32_t) (unsigned long) start_after_reset;
babba4f1 337 boot_vect[coreid].code_addr = octeon_bootloader_entry_addr;
773cb77d 338
babba4f1 339 mb();
773cb77d
RB
340
341 cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask);
342
343 return 0;
344}
345
078a55fc 346static int octeon_cpu_callback(struct notifier_block *nfb,
773cb77d
RB
347 unsigned long action, void *hcpu)
348{
349 unsigned int cpu = (unsigned long)hcpu;
350
351 switch (action) {
352 case CPU_UP_PREPARE:
353 octeon_update_boot_vector(cpu);
354 break;
355 case CPU_ONLINE:
356 pr_info("Cpu %d online\n", cpu);
357 break;
358 case CPU_DEAD:
359 break;
360 }
361
362 return NOTIFY_OK;
363}
364
078a55fc 365static int register_cavium_notifier(void)
773cb77d 366{
442f2012 367 hotcpu_notifier(octeon_cpu_callback, 0);
773cb77d
RB
368 return 0;
369}
773cb77d
RB
370late_initcall(register_cavium_notifier);
371
70342287 372#endif /* CONFIG_HOTPLUG_CPU */
773cb77d 373
5b3b1688
DD
374struct plat_smp_ops octeon_smp_ops = {
375 .send_ipi_single = octeon_send_ipi_single,
376 .send_ipi_mask = octeon_send_ipi_mask,
377 .init_secondary = octeon_init_secondary,
378 .smp_finish = octeon_smp_finish,
5b3b1688
DD
379 .boot_secondary = octeon_boot_secondary,
380 .smp_setup = octeon_smp_setup,
381 .prepare_cpus = octeon_prepare_cpus,
773cb77d
RB
382#ifdef CONFIG_HOTPLUG_CPU
383 .cpu_disable = octeon_cpu_disable,
384 .cpu_die = octeon_cpu_die,
385#endif
5b3b1688 386};
This page took 0.399399 seconds and 5 git commands to generate.