MIPS: OCTEON: support disabling HOTPLUG_CPU run-time
[deliverable/linux.git] / arch / mips / cavium-octeon / smp.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks
7 */
8 #include <linux/cpu.h>
9 #include <linux/delay.h>
10 #include <linux/smp.h>
11 #include <linux/interrupt.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/sched.h>
14 #include <linux/module.h>
15
16 #include <asm/mmu_context.h>
17 #include <asm/time.h>
18 #include <asm/setup.h>
19
20 #include <asm/octeon/octeon.h>
21
22 #include "octeon_boot.h"
23
24 volatile unsigned long octeon_processor_boot = 0xff;
25 volatile unsigned long octeon_processor_sp;
26 volatile unsigned long octeon_processor_gp;
27
28 #ifdef CONFIG_HOTPLUG_CPU
29 uint64_t octeon_bootloader_entry_addr;
30 EXPORT_SYMBOL(octeon_bootloader_entry_addr);
31 #endif
32
33 static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
34 {
35 const int coreid = cvmx_get_core_num();
36 uint64_t action;
37
38 /* Load the mailbox register to figure out what we're supposed to do */
39 action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)) & 0xffff;
40
41 /* Clear the mailbox to clear the interrupt */
42 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
43
44 if (action & SMP_CALL_FUNCTION)
45 smp_call_function_interrupt();
46 if (action & SMP_RESCHEDULE_YOURSELF)
47 scheduler_ipi();
48
49 /* Check if we've been told to flush the icache */
50 if (action & SMP_ICACHE_FLUSH)
51 asm volatile ("synci 0($0)\n");
52 return IRQ_HANDLED;
53 }
54
55 /**
56 * Cause the function described by call_data to be executed on the passed
57 * cpu. When the function has finished, increment the finished field of
58 * call_data.
59 */
60 void octeon_send_ipi_single(int cpu, unsigned int action)
61 {
62 int coreid = cpu_logical_map(cpu);
63 /*
64 pr_info("SMP: Mailbox send cpu=%d, coreid=%d, action=%u\n", cpu,
65 coreid, action);
66 */
67 cvmx_write_csr(CVMX_CIU_MBOX_SETX(coreid), action);
68 }
69
70 static inline void octeon_send_ipi_mask(const struct cpumask *mask,
71 unsigned int action)
72 {
73 unsigned int i;
74
75 for_each_cpu_mask(i, *mask)
76 octeon_send_ipi_single(i, action);
77 }
78
79 /**
80 * Detect available CPUs, populate cpu_possible_mask
81 */
82 static void octeon_smp_hotplug_setup(void)
83 {
84 #ifdef CONFIG_HOTPLUG_CPU
85 struct linux_app_boot_info *labi;
86
87 if (!setup_max_cpus)
88 return;
89
90 labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
91 if (labi->labi_signature != LABI_SIGNATURE)
92 panic("The bootloader version on this board is incorrect.");
93
94 octeon_bootloader_entry_addr = labi->InitTLBStart_addr;
95 #endif
96 }
97
98 static void octeon_smp_setup(void)
99 {
100 const int coreid = cvmx_get_core_num();
101 int cpus;
102 int id;
103 int core_mask = octeon_get_boot_coremask();
104 #ifdef CONFIG_HOTPLUG_CPU
105 unsigned int num_cores = cvmx_octeon_num_cores();
106 #endif
107
108 /* The present CPUs are initially just the boot cpu (CPU 0). */
109 for (id = 0; id < NR_CPUS; id++) {
110 set_cpu_possible(id, id == 0);
111 set_cpu_present(id, id == 0);
112 }
113
114 __cpu_number_map[coreid] = 0;
115 __cpu_logical_map[0] = coreid;
116
117 /* The present CPUs get the lowest CPU numbers. */
118 cpus = 1;
119 for (id = 0; id < NR_CPUS; id++) {
120 if ((id != coreid) && (core_mask & (1 << id))) {
121 set_cpu_possible(cpus, true);
122 set_cpu_present(cpus, true);
123 __cpu_number_map[id] = cpus;
124 __cpu_logical_map[cpus] = id;
125 cpus++;
126 }
127 }
128
129 #ifdef CONFIG_HOTPLUG_CPU
130 /*
131 * The possible CPUs are all those present on the chip. We
132 * will assign CPU numbers for possible cores as well. Cores
133 * are always consecutively numberd from 0.
134 */
135 for (id = 0; setup_max_cpus && id < num_cores && id < NR_CPUS; id++) {
136 if (!(core_mask & (1 << id))) {
137 set_cpu_possible(cpus, true);
138 __cpu_number_map[id] = cpus;
139 __cpu_logical_map[cpus] = id;
140 cpus++;
141 }
142 }
143 #endif
144
145 octeon_smp_hotplug_setup();
146 }
147
148 /**
149 * Firmware CPU startup hook
150 *
151 */
152 static void octeon_boot_secondary(int cpu, struct task_struct *idle)
153 {
154 int count;
155
156 pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu,
157 cpu_logical_map(cpu));
158
159 octeon_processor_sp = __KSTK_TOS(idle);
160 octeon_processor_gp = (unsigned long)(task_thread_info(idle));
161 octeon_processor_boot = cpu_logical_map(cpu);
162 mb();
163
164 count = 10000;
165 while (octeon_processor_sp && count) {
166 /* Waiting for processor to get the SP and GP */
167 udelay(1);
168 count--;
169 }
170 if (count == 0)
171 pr_err("Secondary boot timeout\n");
172 }
173
174 /**
175 * After we've done initial boot, this function is called to allow the
176 * board code to clean up state, if needed
177 */
178 static void octeon_init_secondary(void)
179 {
180 unsigned int sr;
181
182 sr = set_c0_status(ST0_BEV);
183 write_c0_ebase((u32)ebase);
184 write_c0_status(sr);
185
186 octeon_check_cpu_bist();
187 octeon_init_cvmcount();
188
189 octeon_irq_setup_secondary();
190 }
191
192 /**
193 * Callout to firmware before smp_init
194 *
195 */
196 void octeon_prepare_cpus(unsigned int max_cpus)
197 {
198 /*
199 * Only the low order mailbox bits are used for IPIs, leave
200 * the other bits alone.
201 */
202 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff);
203 if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt,
204 IRQF_PERCPU | IRQF_NO_THREAD, "SMP-IPI",
205 mailbox_interrupt)) {
206 panic("Cannot request_irq(OCTEON_IRQ_MBOX0)");
207 }
208 }
209
210 /**
211 * Last chance for the board code to finish SMP initialization before
212 * the CPU is "online".
213 */
214 static void octeon_smp_finish(void)
215 {
216 octeon_user_io_init();
217
218 /* to generate the first CPU timer interrupt */
219 write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
220 local_irq_enable();
221 }
222
223 #ifdef CONFIG_HOTPLUG_CPU
224
225 /* State of each CPU. */
226 DEFINE_PER_CPU(int, cpu_state);
227
228 static int octeon_cpu_disable(void)
229 {
230 unsigned int cpu = smp_processor_id();
231
232 if (cpu == 0)
233 return -EBUSY;
234
235 set_cpu_online(cpu, false);
236 cpu_clear(cpu, cpu_callin_map);
237 local_irq_disable();
238 octeon_fixup_irqs();
239 local_irq_enable();
240
241 flush_cache_all();
242 local_flush_tlb_all();
243
244 return 0;
245 }
246
247 static void octeon_cpu_die(unsigned int cpu)
248 {
249 int coreid = cpu_logical_map(cpu);
250 uint32_t mask, new_mask;
251 const struct cvmx_bootmem_named_block_desc *block_desc;
252
253 while (per_cpu(cpu_state, cpu) != CPU_DEAD)
254 cpu_relax();
255
256 /*
257 * This is a bit complicated strategics of getting/settig available
258 * cores mask, copied from bootloader
259 */
260
261 mask = 1 << coreid;
262 /* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */
263 block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
264
265 if (!block_desc) {
266 struct linux_app_boot_info *labi;
267
268 labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
269
270 labi->avail_coremask |= mask;
271 new_mask = labi->avail_coremask;
272 } else { /* alternative, already initialized */
273 uint32_t *p = (uint32_t *)PHYS_TO_XKSEG_CACHED(block_desc->base_addr +
274 AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
275 *p |= mask;
276 new_mask = *p;
277 }
278
279 pr_info("Reset core %d. Available Coremask = 0x%x \n", coreid, new_mask);
280 mb();
281 cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
282 cvmx_write_csr(CVMX_CIU_PP_RST, 0);
283 }
284
285 void play_dead(void)
286 {
287 int cpu = cpu_number_map(cvmx_get_core_num());
288
289 idle_task_exit();
290 octeon_processor_boot = 0xff;
291 per_cpu(cpu_state, cpu) = CPU_DEAD;
292
293 mb();
294
295 while (1) /* core will be reset here */
296 ;
297 }
298
299 extern void kernel_entry(unsigned long arg1, ...);
300
301 static void start_after_reset(void)
302 {
303 kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */
304 }
305
306 static int octeon_update_boot_vector(unsigned int cpu)
307 {
308
309 int coreid = cpu_logical_map(cpu);
310 uint32_t avail_coremask;
311 const struct cvmx_bootmem_named_block_desc *block_desc;
312 struct boot_init_vector *boot_vect =
313 (struct boot_init_vector *)PHYS_TO_XKSEG_CACHED(BOOTLOADER_BOOT_VECTOR);
314
315 block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
316
317 if (!block_desc) {
318 struct linux_app_boot_info *labi;
319
320 labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
321
322 avail_coremask = labi->avail_coremask;
323 labi->avail_coremask &= ~(1 << coreid);
324 } else { /* alternative, already initialized */
325 avail_coremask = *(uint32_t *)PHYS_TO_XKSEG_CACHED(
326 block_desc->base_addr + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
327 }
328
329 if (!(avail_coremask & (1 << coreid))) {
330 /* core not available, assume, that catched by simple-executive */
331 cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
332 cvmx_write_csr(CVMX_CIU_PP_RST, 0);
333 }
334
335 boot_vect[coreid].app_start_func_addr =
336 (uint32_t) (unsigned long) start_after_reset;
337 boot_vect[coreid].code_addr = octeon_bootloader_entry_addr;
338
339 mb();
340
341 cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask);
342
343 return 0;
344 }
345
346 static int octeon_cpu_callback(struct notifier_block *nfb,
347 unsigned long action, void *hcpu)
348 {
349 unsigned int cpu = (unsigned long)hcpu;
350
351 switch (action) {
352 case CPU_UP_PREPARE:
353 octeon_update_boot_vector(cpu);
354 break;
355 case CPU_ONLINE:
356 pr_info("Cpu %d online\n", cpu);
357 break;
358 case CPU_DEAD:
359 break;
360 }
361
362 return NOTIFY_OK;
363 }
364
365 static int register_cavium_notifier(void)
366 {
367 hotcpu_notifier(octeon_cpu_callback, 0);
368 return 0;
369 }
370 late_initcall(register_cavium_notifier);
371
372 #endif /* CONFIG_HOTPLUG_CPU */
373
374 struct plat_smp_ops octeon_smp_ops = {
375 .send_ipi_single = octeon_send_ipi_single,
376 .send_ipi_mask = octeon_send_ipi_mask,
377 .init_secondary = octeon_init_secondary,
378 .smp_finish = octeon_smp_finish,
379 .boot_secondary = octeon_boot_secondary,
380 .smp_setup = octeon_smp_setup,
381 .prepare_cpus = octeon_prepare_cpus,
382 #ifdef CONFIG_HOTPLUG_CPU
383 .cpu_disable = octeon_cpu_disable,
384 .cpu_die = octeon_cpu_die,
385 #endif
386 };
This page took 0.041785 seconds and 6 git commands to generate.