Commit | Line | Data |
---|---|---|
5b3b1688 DD |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
edfcbb8c | 6 | * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks |
5b3b1688 | 7 | */ |
773cb77d | 8 | #include <linux/cpu.h> |
5b3b1688 DD |
9 | #include <linux/delay.h> |
10 | #include <linux/smp.h> | |
11 | #include <linux/interrupt.h> | |
12 | #include <linux/kernel_stat.h> | |
13 | #include <linux/sched.h> | |
14 | #include <linux/module.h> | |
15 | ||
16 | #include <asm/mmu_context.h> | |
5b3b1688 | 17 | #include <asm/time.h> |
b81947c6 | 18 | #include <asm/setup.h> |
5b3b1688 DD |
19 | |
20 | #include <asm/octeon/octeon.h> | |
21 | ||
773cb77d RB |
22 | #include "octeon_boot.h" |
23 | ||
5b3b1688 DD |
24 | volatile unsigned long octeon_processor_boot = 0xff; |
25 | volatile unsigned long octeon_processor_sp; | |
26 | volatile unsigned long octeon_processor_gp; | |
27 | ||
773cb77d | 28 | #ifdef CONFIG_HOTPLUG_CPU |
babba4f1 DD |
29 | uint64_t octeon_bootloader_entry_addr; |
30 | EXPORT_SYMBOL(octeon_bootloader_entry_addr); | |
773cb77d RB |
31 | #endif |
32 | ||
c6d2b22e DD |
33 | static void octeon_icache_flush(void) |
34 | { | |
35 | asm volatile ("synci 0($0)\n"); | |
36 | } | |
37 | ||
38 | static void (*octeon_message_functions[8])(void) = { | |
39 | scheduler_ipi, | |
40 | generic_smp_call_function_interrupt, | |
41 | octeon_icache_flush, | |
42 | }; | |
43 | ||
5b3b1688 DD |
44 | static irqreturn_t mailbox_interrupt(int irq, void *dev_id) |
45 | { | |
c6d2b22e DD |
46 | u64 mbox_clrx = CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()); |
47 | u64 action; | |
48 | int i; | |
49 | ||
50 | /* | |
51 | * Make sure the function array initialization remains | |
52 | * correct. | |
53 | */ | |
54 | BUILD_BUG_ON(SMP_RESCHEDULE_YOURSELF != (1 << 0)); | |
55 | BUILD_BUG_ON(SMP_CALL_FUNCTION != (1 << 1)); | |
56 | BUILD_BUG_ON(SMP_ICACHE_FLUSH != (1 << 2)); | |
57 | ||
58 | /* | |
59 | * Load the mailbox register to figure out what we're supposed | |
60 | * to do. | |
61 | */ | |
62 | action = cvmx_read_csr(mbox_clrx); | |
5b3b1688 | 63 | |
c6d2b22e DD |
64 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) |
65 | action &= 0xff; | |
66 | else | |
67 | action &= 0xffff; | |
5b3b1688 DD |
68 | |
69 | /* Clear the mailbox to clear the interrupt */ | |
c6d2b22e | 70 | cvmx_write_csr(mbox_clrx, action); |
5b3b1688 | 71 | |
c6d2b22e DD |
72 | for (i = 0; i < ARRAY_SIZE(octeon_message_functions) && action;) { |
73 | if (action & 1) { | |
74 | void (*fn)(void) = octeon_message_functions[i]; | |
5b3b1688 | 75 | |
c6d2b22e DD |
76 | if (fn) |
77 | fn(); | |
78 | } | |
79 | action >>= 1; | |
80 | i++; | |
81 | } | |
5b3b1688 DD |
82 | return IRQ_HANDLED; |
83 | } | |
84 | ||
85 | /** | |
86 | * Cause the function described by call_data to be executed on the passed | |
70342287 | 87 | * cpu. When the function has finished, increment the finished field of |
5b3b1688 DD |
88 | * call_data. |
89 | */ | |
90 | void octeon_send_ipi_single(int cpu, unsigned int action) | |
91 | { | |
92 | int coreid = cpu_logical_map(cpu); | |
93 | /* | |
94 | pr_info("SMP: Mailbox send cpu=%d, coreid=%d, action=%u\n", cpu, | |
95 | coreid, action); | |
96 | */ | |
97 | cvmx_write_csr(CVMX_CIU_MBOX_SETX(coreid), action); | |
98 | } | |
99 | ||
067f3290 DD |
100 | static inline void octeon_send_ipi_mask(const struct cpumask *mask, |
101 | unsigned int action) | |
5b3b1688 DD |
102 | { |
103 | unsigned int i; | |
104 | ||
8dd92891 | 105 | for_each_cpu(i, mask) |
5b3b1688 DD |
106 | octeon_send_ipi_single(i, action); |
107 | } | |
108 | ||
109 | /** | |
5f054e31 | 110 | * Detect available CPUs, populate cpu_possible_mask |
5b3b1688 | 111 | */ |
773cb77d RB |
112 | static void octeon_smp_hotplug_setup(void) |
113 | { | |
114 | #ifdef CONFIG_HOTPLUG_CPU | |
babba4f1 DD |
115 | struct linux_app_boot_info *labi; |
116 | ||
5ca0e377 AK |
117 | if (!setup_max_cpus) |
118 | return; | |
119 | ||
babba4f1 | 120 | labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER); |
eac44d9c AK |
121 | if (labi->labi_signature != LABI_SIGNATURE) { |
122 | pr_info("The bootloader on this board does not support HOTPLUG_CPU."); | |
123 | return; | |
124 | } | |
babba4f1 DD |
125 | |
126 | octeon_bootloader_entry_addr = labi->InitTLBStart_addr; | |
773cb77d RB |
127 | #endif |
128 | } | |
129 | ||
0e8c1a32 | 130 | static void __init octeon_smp_setup(void) |
5b3b1688 DD |
131 | { |
132 | const int coreid = cvmx_get_core_num(); | |
133 | int cpus; | |
134 | int id; | |
7d52ab16 DD |
135 | struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get(); |
136 | ||
edfcbb8c | 137 | #ifdef CONFIG_HOTPLUG_CPU |
c6d2b22e | 138 | int core_mask = octeon_get_boot_coremask(); |
edfcbb8c DD |
139 | unsigned int num_cores = cvmx_octeon_num_cores(); |
140 | #endif | |
141 | ||
142 | /* The present CPUs are initially just the boot cpu (CPU 0). */ | |
143 | for (id = 0; id < NR_CPUS; id++) { | |
144 | set_cpu_possible(id, id == 0); | |
145 | set_cpu_present(id, id == 0); | |
146 | } | |
5b3b1688 | 147 | |
5b3b1688 DD |
148 | __cpu_number_map[coreid] = 0; |
149 | __cpu_logical_map[0] = coreid; | |
5b3b1688 | 150 | |
edfcbb8c | 151 | /* The present CPUs get the lowest CPU numbers. */ |
5b3b1688 | 152 | cpus = 1; |
edfcbb8c | 153 | for (id = 0; id < NR_CPUS; id++) { |
7d52ab16 | 154 | if ((id != coreid) && cvmx_coremask_is_core_set(&sysinfo->core_mask, id)) { |
edfcbb8c DD |
155 | set_cpu_possible(cpus, true); |
156 | set_cpu_present(cpus, true); | |
157 | __cpu_number_map[id] = cpus; | |
158 | __cpu_logical_map[cpus] = id; | |
159 | cpus++; | |
160 | } | |
161 | } | |
162 | ||
163 | #ifdef CONFIG_HOTPLUG_CPU | |
164 | /* | |
70342287 RB |
165 | * The possible CPUs are all those present on the chip. We |
166 | * will assign CPU numbers for possible cores as well. Cores | |
edfcbb8c DD |
167 | * are always consecutively numberd from 0. |
168 | */ | |
eac44d9c AK |
169 | for (id = 0; setup_max_cpus && octeon_bootloader_entry_addr && |
170 | id < num_cores && id < NR_CPUS; id++) { | |
edfcbb8c DD |
171 | if (!(core_mask & (1 << id))) { |
172 | set_cpu_possible(cpus, true); | |
5b3b1688 DD |
173 | __cpu_number_map[id] = cpus; |
174 | __cpu_logical_map[cpus] = id; | |
175 | cpus++; | |
176 | } | |
177 | } | |
edfcbb8c | 178 | #endif |
773cb77d RB |
179 | |
180 | octeon_smp_hotplug_setup(); | |
5b3b1688 DD |
181 | } |
182 | ||
183 | /** | |
184 | * Firmware CPU startup hook | |
185 | * | |
186 | */ | |
187 | static void octeon_boot_secondary(int cpu, struct task_struct *idle) | |
188 | { | |
189 | int count; | |
190 | ||
191 | pr_info("SMP: Booting CPU%02d (CoreId %2d)...\n", cpu, | |
192 | cpu_logical_map(cpu)); | |
193 | ||
194 | octeon_processor_sp = __KSTK_TOS(idle); | |
195 | octeon_processor_gp = (unsigned long)(task_thread_info(idle)); | |
196 | octeon_processor_boot = cpu_logical_map(cpu); | |
197 | mb(); | |
198 | ||
199 | count = 10000; | |
200 | while (octeon_processor_sp && count) { | |
201 | /* Waiting for processor to get the SP and GP */ | |
202 | udelay(1); | |
203 | count--; | |
204 | } | |
205 | if (count == 0) | |
206 | pr_err("Secondary boot timeout\n"); | |
207 | } | |
208 | ||
209 | /** | |
210 | * After we've done initial boot, this function is called to allow the | |
211 | * board code to clean up state, if needed | |
212 | */ | |
078a55fc | 213 | static void octeon_init_secondary(void) |
5b3b1688 | 214 | { |
babba4f1 | 215 | unsigned int sr; |
5b3b1688 | 216 | |
babba4f1 DD |
217 | sr = set_c0_status(ST0_BEV); |
218 | write_c0_ebase((u32)ebase); | |
219 | write_c0_status(sr); | |
220 | ||
5b3b1688 DD |
221 | octeon_check_cpu_bist(); |
222 | octeon_init_cvmcount(); | |
0c326387 DD |
223 | |
224 | octeon_irq_setup_secondary(); | |
5b3b1688 DD |
225 | } |
226 | ||
227 | /** | |
228 | * Callout to firmware before smp_init | |
229 | * | |
230 | */ | |
0e8c1a32 | 231 | static void __init octeon_prepare_cpus(unsigned int max_cpus) |
5b3b1688 | 232 | { |
e650ce0f DD |
233 | /* |
234 | * Only the low order mailbox bits are used for IPIs, leave | |
235 | * the other bits alone. | |
236 | */ | |
237 | cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), 0xffff); | |
e63fb7a9 VS |
238 | if (request_irq(OCTEON_IRQ_MBOX0, mailbox_interrupt, |
239 | IRQF_PERCPU | IRQF_NO_THREAD, "SMP-IPI", | |
240 | mailbox_interrupt)) { | |
ab75dc02 | 241 | panic("Cannot request_irq(OCTEON_IRQ_MBOX0)"); |
5b3b1688 | 242 | } |
5b3b1688 DD |
243 | } |
244 | ||
245 | /** | |
246 | * Last chance for the board code to finish SMP initialization before | |
247 | * the CPU is "online". | |
248 | */ | |
249 | static void octeon_smp_finish(void) | |
250 | { | |
5b3b1688 DD |
251 | octeon_user_io_init(); |
252 | ||
253 | /* to generate the first CPU timer interrupt */ | |
254 | write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ); | |
1bcfecc0 | 255 | local_irq_enable(); |
5b3b1688 DD |
256 | } |
257 | ||
773cb77d RB |
258 | #ifdef CONFIG_HOTPLUG_CPU |
259 | ||
260 | /* State of each CPU. */ | |
261 | DEFINE_PER_CPU(int, cpu_state); | |
262 | ||
773cb77d RB |
263 | static int octeon_cpu_disable(void) |
264 | { | |
265 | unsigned int cpu = smp_processor_id(); | |
266 | ||
267 | if (cpu == 0) | |
268 | return -EBUSY; | |
269 | ||
eac44d9c AK |
270 | if (!octeon_bootloader_entry_addr) |
271 | return -ENOTSUPP; | |
272 | ||
0b5f9c00 | 273 | set_cpu_online(cpu, false); |
8dd92891 | 274 | cpumask_clear_cpu(cpu, &cpu_callin_map); |
17efb59a | 275 | octeon_fixup_irqs(); |
773cb77d | 276 | |
9329c154 | 277 | __flush_cache_all(); |
773cb77d RB |
278 | local_flush_tlb_all(); |
279 | ||
773cb77d RB |
280 | return 0; |
281 | } | |
282 | ||
283 | static void octeon_cpu_die(unsigned int cpu) | |
284 | { | |
285 | int coreid = cpu_logical_map(cpu); | |
babba4f1 DD |
286 | uint32_t mask, new_mask; |
287 | const struct cvmx_bootmem_named_block_desc *block_desc; | |
773cb77d | 288 | |
773cb77d RB |
289 | while (per_cpu(cpu_state, cpu) != CPU_DEAD) |
290 | cpu_relax(); | |
291 | ||
292 | /* | |
293 | * This is a bit complicated strategics of getting/settig available | |
294 | * cores mask, copied from bootloader | |
295 | */ | |
babba4f1 DD |
296 | |
297 | mask = 1 << coreid; | |
773cb77d RB |
298 | /* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */ |
299 | block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME); | |
300 | ||
301 | if (!block_desc) { | |
babba4f1 | 302 | struct linux_app_boot_info *labi; |
773cb77d | 303 | |
babba4f1 | 304 | labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER); |
773cb77d | 305 | |
babba4f1 DD |
306 | labi->avail_coremask |= mask; |
307 | new_mask = labi->avail_coremask; | |
308 | } else { /* alternative, already initialized */ | |
309 | uint32_t *p = (uint32_t *)PHYS_TO_XKSEG_CACHED(block_desc->base_addr + | |
310 | AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK); | |
311 | *p |= mask; | |
312 | new_mask = *p; | |
773cb77d RB |
313 | } |
314 | ||
babba4f1 DD |
315 | pr_info("Reset core %d. Available Coremask = 0x%x \n", coreid, new_mask); |
316 | mb(); | |
773cb77d RB |
317 | cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid); |
318 | cvmx_write_csr(CVMX_CIU_PP_RST, 0); | |
319 | } | |
320 | ||
321 | void play_dead(void) | |
322 | { | |
babba4f1 | 323 | int cpu = cpu_number_map(cvmx_get_core_num()); |
773cb77d RB |
324 | |
325 | idle_task_exit(); | |
326 | octeon_processor_boot = 0xff; | |
babba4f1 DD |
327 | per_cpu(cpu_state, cpu) = CPU_DEAD; |
328 | ||
329 | mb(); | |
773cb77d RB |
330 | |
331 | while (1) /* core will be reset here */ | |
332 | ; | |
333 | } | |
334 | ||
335 | extern void kernel_entry(unsigned long arg1, ...); | |
336 | ||
337 | static void start_after_reset(void) | |
338 | { | |
70342287 | 339 | kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */ |
773cb77d RB |
340 | } |
341 | ||
babba4f1 | 342 | static int octeon_update_boot_vector(unsigned int cpu) |
773cb77d RB |
343 | { |
344 | ||
345 | int coreid = cpu_logical_map(cpu); | |
babba4f1 DD |
346 | uint32_t avail_coremask; |
347 | const struct cvmx_bootmem_named_block_desc *block_desc; | |
773cb77d | 348 | struct boot_init_vector *boot_vect = |
babba4f1 | 349 | (struct boot_init_vector *)PHYS_TO_XKSEG_CACHED(BOOTLOADER_BOOT_VECTOR); |
773cb77d RB |
350 | |
351 | block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME); | |
352 | ||
353 | if (!block_desc) { | |
babba4f1 DD |
354 | struct linux_app_boot_info *labi; |
355 | ||
356 | labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER); | |
357 | ||
358 | avail_coremask = labi->avail_coremask; | |
359 | labi->avail_coremask &= ~(1 << coreid); | |
773cb77d | 360 | } else { /* alternative, already initialized */ |
babba4f1 DD |
361 | avail_coremask = *(uint32_t *)PHYS_TO_XKSEG_CACHED( |
362 | block_desc->base_addr + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK); | |
773cb77d RB |
363 | } |
364 | ||
365 | if (!(avail_coremask & (1 << coreid))) { | |
92a76f6d | 366 | /* core not available, assume, that caught by simple-executive */ |
773cb77d RB |
367 | cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid); |
368 | cvmx_write_csr(CVMX_CIU_PP_RST, 0); | |
369 | } | |
370 | ||
371 | boot_vect[coreid].app_start_func_addr = | |
372 | (uint32_t) (unsigned long) start_after_reset; | |
babba4f1 | 373 | boot_vect[coreid].code_addr = octeon_bootloader_entry_addr; |
773cb77d | 374 | |
babba4f1 | 375 | mb(); |
773cb77d RB |
376 | |
377 | cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask); | |
378 | ||
379 | return 0; | |
380 | } | |
381 | ||
078a55fc | 382 | static int octeon_cpu_callback(struct notifier_block *nfb, |
773cb77d RB |
383 | unsigned long action, void *hcpu) |
384 | { | |
385 | unsigned int cpu = (unsigned long)hcpu; | |
386 | ||
a8c5ddf0 | 387 | switch (action & ~CPU_TASKS_FROZEN) { |
773cb77d RB |
388 | case CPU_UP_PREPARE: |
389 | octeon_update_boot_vector(cpu); | |
390 | break; | |
391 | case CPU_ONLINE: | |
392 | pr_info("Cpu %d online\n", cpu); | |
393 | break; | |
394 | case CPU_DEAD: | |
395 | break; | |
396 | } | |
397 | ||
398 | return NOTIFY_OK; | |
399 | } | |
400 | ||
078a55fc | 401 | static int register_cavium_notifier(void) |
773cb77d | 402 | { |
442f2012 | 403 | hotcpu_notifier(octeon_cpu_callback, 0); |
773cb77d RB |
404 | return 0; |
405 | } | |
773cb77d RB |
406 | late_initcall(register_cavium_notifier); |
407 | ||
70342287 | 408 | #endif /* CONFIG_HOTPLUG_CPU */ |
773cb77d | 409 | |
5b3b1688 DD |
410 | struct plat_smp_ops octeon_smp_ops = { |
411 | .send_ipi_single = octeon_send_ipi_single, | |
412 | .send_ipi_mask = octeon_send_ipi_mask, | |
413 | .init_secondary = octeon_init_secondary, | |
414 | .smp_finish = octeon_smp_finish, | |
5b3b1688 DD |
415 | .boot_secondary = octeon_boot_secondary, |
416 | .smp_setup = octeon_smp_setup, | |
417 | .prepare_cpus = octeon_prepare_cpus, | |
773cb77d RB |
418 | #ifdef CONFIG_HOTPLUG_CPU |
419 | .cpu_disable = octeon_cpu_disable, | |
420 | .cpu_die = octeon_cpu_die, | |
421 | #endif | |
5b3b1688 | 422 | }; |
c6d2b22e DD |
423 | |
424 | static irqreturn_t octeon_78xx_reched_interrupt(int irq, void *dev_id) | |
425 | { | |
426 | scheduler_ipi(); | |
427 | return IRQ_HANDLED; | |
428 | } | |
429 | ||
430 | static irqreturn_t octeon_78xx_call_function_interrupt(int irq, void *dev_id) | |
431 | { | |
432 | generic_smp_call_function_interrupt(); | |
433 | return IRQ_HANDLED; | |
434 | } | |
435 | ||
436 | static irqreturn_t octeon_78xx_icache_flush_interrupt(int irq, void *dev_id) | |
437 | { | |
438 | octeon_icache_flush(); | |
439 | return IRQ_HANDLED; | |
440 | } | |
441 | ||
442 | /* | |
443 | * Callout to firmware before smp_init | |
444 | */ | |
445 | static void octeon_78xx_prepare_cpus(unsigned int max_cpus) | |
446 | { | |
447 | if (request_irq(OCTEON_IRQ_MBOX0 + 0, | |
448 | octeon_78xx_reched_interrupt, | |
449 | IRQF_PERCPU | IRQF_NO_THREAD, "Scheduler", | |
450 | octeon_78xx_reched_interrupt)) { | |
451 | panic("Cannot request_irq for SchedulerIPI"); | |
452 | } | |
453 | if (request_irq(OCTEON_IRQ_MBOX0 + 1, | |
454 | octeon_78xx_call_function_interrupt, | |
455 | IRQF_PERCPU | IRQF_NO_THREAD, "SMP-Call", | |
456 | octeon_78xx_call_function_interrupt)) { | |
457 | panic("Cannot request_irq for SMP-Call"); | |
458 | } | |
459 | if (request_irq(OCTEON_IRQ_MBOX0 + 2, | |
460 | octeon_78xx_icache_flush_interrupt, | |
461 | IRQF_PERCPU | IRQF_NO_THREAD, "ICache-Flush", | |
462 | octeon_78xx_icache_flush_interrupt)) { | |
463 | panic("Cannot request_irq for ICache-Flush"); | |
464 | } | |
465 | } | |
466 | ||
467 | static void octeon_78xx_send_ipi_single(int cpu, unsigned int action) | |
468 | { | |
469 | int i; | |
470 | ||
471 | for (i = 0; i < 8; i++) { | |
472 | if (action & 1) | |
473 | octeon_ciu3_mbox_send(cpu, i); | |
474 | action >>= 1; | |
475 | } | |
476 | } | |
477 | ||
478 | static void octeon_78xx_send_ipi_mask(const struct cpumask *mask, | |
479 | unsigned int action) | |
480 | { | |
481 | unsigned int cpu; | |
482 | ||
483 | for_each_cpu(cpu, mask) | |
484 | octeon_78xx_send_ipi_single(cpu, action); | |
485 | } | |
486 | ||
487 | static struct plat_smp_ops octeon_78xx_smp_ops = { | |
488 | .send_ipi_single = octeon_78xx_send_ipi_single, | |
489 | .send_ipi_mask = octeon_78xx_send_ipi_mask, | |
490 | .init_secondary = octeon_init_secondary, | |
491 | .smp_finish = octeon_smp_finish, | |
492 | .boot_secondary = octeon_boot_secondary, | |
493 | .smp_setup = octeon_smp_setup, | |
494 | .prepare_cpus = octeon_78xx_prepare_cpus, | |
495 | #ifdef CONFIG_HOTPLUG_CPU | |
496 | .cpu_disable = octeon_cpu_disable, | |
497 | .cpu_die = octeon_cpu_die, | |
498 | #endif | |
499 | }; | |
500 | ||
501 | void __init octeon_setup_smp(void) | |
502 | { | |
503 | struct plat_smp_ops *ops; | |
504 | ||
505 | if (octeon_has_feature(OCTEON_FEATURE_CIU3)) | |
506 | ops = &octeon_78xx_smp_ops; | |
507 | else | |
508 | ops = &octeon_smp_ops; | |
509 | ||
510 | register_smp_ops(ops); | |
511 | } |