Commit | Line | Data |
---|---|---|
41195d23 VG |
1 | /* |
2 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * RajeshwarR: Dec 11, 2007 | |
9 | * -- Added support for Inter Processor Interrupts | |
10 | * | |
11 | * Vineetg: Nov 1st, 2007 | |
12 | * -- Initial Write (Borrowed heavily from ARM) | |
13 | */ | |
14 | ||
41195d23 VG |
15 | #include <linux/spinlock.h> |
16 | #include <linux/sched.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/profile.h> | |
41195d23 VG |
19 | #include <linux/mm.h> |
20 | #include <linux/cpu.h> | |
41195d23 | 21 | #include <linux/irq.h> |
41195d23 | 22 | #include <linux/atomic.h> |
41195d23 | 23 | #include <linux/cpumask.h> |
41195d23 VG |
24 | #include <linux/reboot.h> |
25 | #include <asm/processor.h> | |
26 | #include <asm/setup.h> | |
03a6d28c | 27 | #include <asm/mach_desc.h> |
41195d23 | 28 | |
9fb92eb1 | 29 | #ifndef CONFIG_ARC_HAS_LLSC |
41195d23 VG |
30 | arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
31 | arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED; | |
9fb92eb1 | 32 | #endif |
41195d23 | 33 | |
173eaafa | 34 | struct plat_smp_ops __weak plat_smp_ops; |
10b12718 | 35 | |
41195d23 VG |
36 | /* XXX: per cpu ? Only needed once in early seconday boot */ |
37 | struct task_struct *secondary_idle_tsk; | |
38 | ||
39 | /* Called from start_kernel */ | |
40 | void __init smp_prepare_boot_cpu(void) | |
41 | { | |
42 | } | |
43 | ||
44 | /* | |
e55af4da VG |
45 | * Called from setup_arch() before calling setup_processor() |
46 | * | |
47 | * - Initialise the CPU possible map early - this describes the CPUs | |
48 | * which may be present or become present in the system. | |
49 | * - Call early smp init hook. This can initialize a specific multi-core | |
50 | * IP which is say common to several platforms (hence not part of | |
51 | * platform specific int_early() hook) | |
41195d23 VG |
52 | */ |
53 | void __init smp_init_cpus(void) | |
54 | { | |
55 | unsigned int i; | |
56 | ||
57 | for (i = 0; i < NR_CPUS; i++) | |
58 | set_cpu_possible(i, true); | |
e55af4da VG |
59 | |
60 | if (plat_smp_ops.init_early_smp) | |
61 | plat_smp_ops.init_early_smp(); | |
41195d23 VG |
62 | } |
63 | ||
64 | /* called from init ( ) => process 1 */ | |
65 | void __init smp_prepare_cpus(unsigned int max_cpus) | |
66 | { | |
67 | int i; | |
68 | ||
69 | /* | |
70 | * Initialise the present map, which describes the set of CPUs | |
71 | * actually populated at the present time. | |
72 | */ | |
73 | for (i = 0; i < max_cpus; i++) | |
74 | set_cpu_present(i, true); | |
75 | } | |
76 | ||
77 | void __init smp_cpus_done(unsigned int max_cpus) | |
78 | { | |
79 | ||
80 | } | |
81 | ||
82 | /* | |
f33e9c43 VG |
83 | * Default smp boot helper for Run-on-reset case where all cores start off |
84 | * together. Non-masters need to wait for Master to start running. | |
85 | * This is implemented using a flag in memory, which Non-masters spin-wait on. | |
86 | * Master sets it to cpu-id of core to "ungate" it. | |
41195d23 | 87 | */ |
f33e9c43 VG |
88 | static volatile int wake_flag; |
89 | ||
90 | static void arc_default_smp_cpu_kick(int cpu, unsigned long pc) | |
41195d23 | 91 | { |
f33e9c43 VG |
92 | BUG_ON(cpu == 0); |
93 | wake_flag = cpu; | |
94 | } | |
95 | ||
96 | void arc_platform_smp_wait_to_boot(int cpu) | |
97 | { | |
98 | while (wake_flag != cpu) | |
99 | ; | |
100 | ||
101 | wake_flag = 0; | |
102 | __asm__ __volatile__("j @first_lines_of_secondary \n"); | |
41195d23 VG |
103 | } |
104 | ||
f33e9c43 | 105 | |
10b12718 VG |
106 | const char *arc_platform_smp_cpuinfo(void) |
107 | { | |
619f3018 | 108 | return plat_smp_ops.info ? : ""; |
10b12718 VG |
109 | } |
110 | ||
41195d23 VG |
111 | /* |
112 | * The very first "C" code executed by secondary | |
113 | * Called from asm stub in head.S | |
114 | * "current"/R25 already setup by low level boot code | |
115 | */ | |
ce759956 | 116 | void start_kernel_secondary(void) |
41195d23 VG |
117 | { |
118 | struct mm_struct *mm = &init_mm; | |
119 | unsigned int cpu = smp_processor_id(); | |
120 | ||
121 | /* MMU, Caches, Vector Table, Interrupts etc */ | |
122 | setup_processor(); | |
123 | ||
124 | atomic_inc(&mm->mm_users); | |
125 | atomic_inc(&mm->mm_count); | |
126 | current->active_mm = mm; | |
5ea72a90 | 127 | cpumask_set_cpu(cpu, mm_cpumask(mm)); |
41195d23 | 128 | |
286130eb | 129 | /* Some SMP H/w setup - for each cpu */ |
b474a023 NC |
130 | if (plat_smp_ops.init_per_cpu) |
131 | plat_smp_ops.init_per_cpu(cpu); | |
286130eb | 132 | |
575a9d4e VG |
133 | if (machine_desc->init_per_cpu) |
134 | machine_desc->init_per_cpu(cpu); | |
41195d23 | 135 | |
71f9cf8f NC |
136 | notify_cpu_starting(cpu); |
137 | set_cpu_online(cpu, true); | |
138 | ||
139 | pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); | |
140 | ||
41195d23 VG |
141 | local_irq_enable(); |
142 | preempt_disable(); | |
fc6d73d6 | 143 | cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); |
41195d23 VG |
144 | } |
145 | ||
146 | /* | |
147 | * Called from kernel_init( ) -> smp_init( ) - for each CPU | |
148 | * | |
149 | * At this point, Secondary Processor is "HALT"ed: | |
150 | * -It booted, but was halted in head.S | |
151 | * -It was configured to halt-on-reset | |
152 | * So need to wake it up. | |
153 | * | |
154 | * Essential requirements being where to run from (PC) and stack (SP) | |
155 | */ | |
ce759956 | 156 | int __cpu_up(unsigned int cpu, struct task_struct *idle) |
41195d23 VG |
157 | { |
158 | unsigned long wait_till; | |
159 | ||
160 | secondary_idle_tsk = idle; | |
161 | ||
162 | pr_info("Idle Task [%d] %p", cpu, idle); | |
163 | pr_info("Trying to bring up CPU%u ...\n", cpu); | |
164 | ||
10b12718 VG |
165 | if (plat_smp_ops.cpu_kick) |
166 | plat_smp_ops.cpu_kick(cpu, | |
41195d23 | 167 | (unsigned long)first_lines_of_secondary); |
f33e9c43 VG |
168 | else |
169 | arc_default_smp_cpu_kick(cpu, (unsigned long)NULL); | |
41195d23 VG |
170 | |
171 | /* wait for 1 sec after kicking the secondary */ | |
172 | wait_till = jiffies + HZ; | |
173 | while (time_before(jiffies, wait_till)) { | |
174 | if (cpu_online(cpu)) | |
175 | break; | |
176 | } | |
177 | ||
178 | if (!cpu_online(cpu)) { | |
179 | pr_info("Timeout: CPU%u FAILED to comeup !!!\n", cpu); | |
180 | return -1; | |
181 | } | |
182 | ||
183 | secondary_idle_tsk = NULL; | |
184 | ||
185 | return 0; | |
186 | } | |
187 | ||
188 | /* | |
189 | * not supported here | |
190 | */ | |
b27f7391 | 191 | int setup_profiling_timer(unsigned int multiplier) |
41195d23 VG |
192 | { |
193 | return -EINVAL; | |
194 | } | |
195 | ||
196 | /*****************************************************************************/ | |
197 | /* Inter Processor Interrupt Handling */ | |
198 | /*****************************************************************************/ | |
199 | ||
41195d23 | 200 | enum ipi_msg_type { |
f2a4aa56 | 201 | IPI_EMPTY = 0, |
41195d23 VG |
202 | IPI_RESCHEDULE = 1, |
203 | IPI_CALL_FUNC, | |
f2a4aa56 | 204 | IPI_CPU_STOP, |
41195d23 VG |
205 | }; |
206 | ||
f2a4aa56 VG |
207 | /* |
208 | * In arches with IRQ for each msg type (above), receiver can use IRQ-id to | |
209 | * figure out what msg was sent. For those which don't (ARC has dedicated IPI | |
210 | * IRQ), the msg-type needs to be conveyed via per-cpu data | |
211 | */ | |
41195d23 | 212 | |
f2a4aa56 | 213 | static DEFINE_PER_CPU(unsigned long, ipi_data); |
41195d23 | 214 | |
ddf84433 | 215 | static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg) |
41195d23 | 216 | { |
f2a4aa56 | 217 | unsigned long __percpu *ipi_data_ptr = per_cpu_ptr(&ipi_data, cpu); |
d8e8c7dd | 218 | unsigned long old, new; |
41195d23 | 219 | unsigned long flags; |
41195d23 | 220 | |
f2a4aa56 VG |
221 | pr_debug("%d Sending msg [%d] to %d\n", smp_processor_id(), msg, cpu); |
222 | ||
41195d23 VG |
223 | local_irq_save(flags); |
224 | ||
d8e8c7dd VG |
225 | /* |
226 | * Atomically write new msg bit (in case others are writing too), | |
227 | * and read back old value | |
228 | */ | |
229 | do { | |
7082a29c | 230 | new = old = ACCESS_ONCE(*ipi_data_ptr); |
d8e8c7dd VG |
231 | new |= 1U << msg; |
232 | } while (cmpxchg(ipi_data_ptr, old, new) != old); | |
41195d23 | 233 | |
d8e8c7dd VG |
234 | /* |
235 | * Call the platform specific IPI kick function, but avoid if possible: | |
236 | * Only do so if there's no pending msg from other concurrent sender(s). | |
237 | * Otherwise, recevier will see this msg as well when it takes the | |
238 | * IPI corresponding to that msg. This is true, even if it is already in | |
239 | * IPI handler, because !@old means it has not yet dequeued the msg(s) | |
240 | * so @new msg can be a free-loader | |
241 | */ | |
242 | if (plat_smp_ops.ipi_send && !old) | |
ddf84433 | 243 | plat_smp_ops.ipi_send(cpu); |
41195d23 VG |
244 | |
245 | local_irq_restore(flags); | |
246 | } | |
247 | ||
ddf84433 VG |
248 | static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg) |
249 | { | |
250 | unsigned int cpu; | |
251 | ||
252 | for_each_cpu(cpu, callmap) | |
253 | ipi_send_msg_one(cpu, msg); | |
254 | } | |
255 | ||
41195d23 VG |
256 | void smp_send_reschedule(int cpu) |
257 | { | |
ddf84433 | 258 | ipi_send_msg_one(cpu, IPI_RESCHEDULE); |
41195d23 VG |
259 | } |
260 | ||
261 | void smp_send_stop(void) | |
262 | { | |
263 | struct cpumask targets; | |
264 | cpumask_copy(&targets, cpu_online_mask); | |
265 | cpumask_clear_cpu(smp_processor_id(), &targets); | |
266 | ipi_send_msg(&targets, IPI_CPU_STOP); | |
267 | } | |
268 | ||
269 | void arch_send_call_function_single_ipi(int cpu) | |
270 | { | |
ddf84433 | 271 | ipi_send_msg_one(cpu, IPI_CALL_FUNC); |
41195d23 VG |
272 | } |
273 | ||
274 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | |
275 | { | |
276 | ipi_send_msg(mask, IPI_CALL_FUNC); | |
277 | } | |
278 | ||
279 | /* | |
280 | * ipi_cpu_stop - handle IPI from smp_send_stop() | |
281 | */ | |
53dc110c | 282 | static void ipi_cpu_stop(void) |
41195d23 VG |
283 | { |
284 | machine_halt(); | |
285 | } | |
286 | ||
aa6083ed | 287 | static inline int __do_IPI(unsigned long msg) |
41195d23 | 288 | { |
aa6083ed VG |
289 | int rc = 0; |
290 | ||
d8e8c7dd VG |
291 | switch (msg) { |
292 | case IPI_RESCHEDULE: | |
293 | scheduler_ipi(); | |
294 | break; | |
41195d23 | 295 | |
d8e8c7dd VG |
296 | case IPI_CALL_FUNC: |
297 | generic_smp_call_function_interrupt(); | |
298 | break; | |
f2a4aa56 | 299 | |
d8e8c7dd VG |
300 | case IPI_CPU_STOP: |
301 | ipi_cpu_stop(); | |
302 | break; | |
f2a4aa56 | 303 | |
d8e8c7dd | 304 | default: |
aa6083ed | 305 | rc = 1; |
f2a4aa56 | 306 | } |
aa6083ed VG |
307 | |
308 | return rc; | |
41195d23 VG |
309 | } |
310 | ||
311 | /* | |
312 | * arch-common ISR to handle for inter-processor interrupts | |
313 | * Has hooks for platform specific IPI | |
314 | */ | |
315 | irqreturn_t do_IPI(int irq, void *dev_id) | |
316 | { | |
f2a4aa56 | 317 | unsigned long pending; |
aa6083ed | 318 | unsigned long __maybe_unused copy; |
f2a4aa56 VG |
319 | |
320 | pr_debug("IPI [%ld] received on cpu %d\n", | |
321 | *this_cpu_ptr(&ipi_data), smp_processor_id()); | |
41195d23 | 322 | |
10b12718 | 323 | if (plat_smp_ops.ipi_clear) |
ccdaa6e0 | 324 | plat_smp_ops.ipi_clear(irq); |
41195d23 VG |
325 | |
326 | /* | |
d8e8c7dd VG |
327 | * "dequeue" the msg corresponding to this IPI (and possibly other |
328 | * piggybacked msg from elided IPIs: see ipi_send_msg_one() above) | |
41195d23 | 329 | */ |
aa6083ed | 330 | copy = pending = xchg(this_cpu_ptr(&ipi_data), 0); |
d8e8c7dd VG |
331 | |
332 | do { | |
333 | unsigned long msg = __ffs(pending); | |
aa6083ed VG |
334 | int rc; |
335 | ||
336 | rc = __do_IPI(msg); | |
aa6083ed VG |
337 | if (rc) |
338 | pr_info("IPI with bogus msg %ld in %ld\n", msg, copy); | |
d8e8c7dd VG |
339 | pending &= ~(1U << msg); |
340 | } while (pending); | |
41195d23 VG |
341 | |
342 | return IRQ_HANDLED; | |
343 | } | |
344 | ||
345 | /* | |
346 | * API called by platform code to hookup arch-common ISR to their IPI IRQ | |
56957940 VG |
347 | * |
348 | * Note: If IPI is provided by platform (vs. say ARC MCIP), their intc setup/map | |
349 | * function needs to call call irq_set_percpu_devid() for IPI IRQ, otherwise | |
350 | * request_percpu_irq() below will fail | |
41195d23 VG |
351 | */ |
352 | static DEFINE_PER_CPU(int, ipi_dev); | |
7e512219 | 353 | |
41195d23 VG |
354 | int smp_ipi_irq_setup(int cpu, int irq) |
355 | { | |
2b75c0f9 VG |
356 | int *dev = per_cpu_ptr(&ipi_dev, cpu); |
357 | ||
56957940 VG |
358 | /* Boot cpu calls request, all call enable */ |
359 | if (!cpu) { | |
360 | int rc; | |
361 | ||
362 | rc = request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev); | |
363 | if (rc) | |
364 | panic("Percpu IRQ request failed for %d\n", irq); | |
365 | } | |
366 | ||
367 | enable_percpu_irq(irq, 0); | |
7e512219 NC |
368 | |
369 | return 0; | |
41195d23 | 370 | } |