x86: unify cpu_callin_mask/cpu_callout_mask/cpu_initialized_mask/cpu_sibling_setup_mask
[deliverable/linux.git] / arch / x86 / xen / smp.c
CommitLineData
f87e4cac
JF
1/*
2 * Xen SMP support
3 *
4 * This file implements the Xen versions of smp_ops. SMP under Xen is
5 * very straightforward. Bringing a CPU up is simply a matter of
6 * loading its initial context and setting it running.
7 *
8 * IPIs are handled through the Xen event mechanism.
9 *
10 * Because virtual CPUs can be scheduled onto any real CPU, there's no
11 * useful topology information for the kernel to make use of. As a
12 * result, all CPUs are treated as if they're single-core and
13 * single-threaded.
f87e4cac
JF
14 */
15#include <linux/sched.h>
16#include <linux/err.h>
17#include <linux/smp.h>
18
19#include <asm/paravirt.h>
20#include <asm/desc.h>
21#include <asm/pgtable.h>
22#include <asm/cpu.h>
23
24#include <xen/interface/xen.h>
25#include <xen/interface/vcpu.h>
26
27#include <asm/xen/interface.h>
28#include <asm/xen/hypercall.h>
29
30#include <xen/page.h>
31#include <xen/events.h>
32
33#include "xen-ops.h"
34#include "mmu.h"
35
b78936e1 36cpumask_var_t xen_cpu_initialized_map;
f87e4cac 37
3b16cf87
JA
38static DEFINE_PER_CPU(int, resched_irq);
39static DEFINE_PER_CPU(int, callfunc_irq);
40static DEFINE_PER_CPU(int, callfuncsingle_irq);
41static DEFINE_PER_CPU(int, debug_irq) = -1;
f87e4cac
JF
42
43static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
3b16cf87 44static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
f87e4cac
JF
45
46/*
47 * Reschedule call back. Nothing to do,
48 * all the work is done automatically when
49 * we return from the interrupt.
50 */
51static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
52{
1b437c8c 53 inc_irq_stat(irq_resched_count);
38bb5ab4 54
f87e4cac
JF
55 return IRQ_HANDLED;
56}
57
d68d82af 58static __cpuinit void cpu_bringup(void)
f87e4cac
JF
59{
60 int cpu = smp_processor_id();
61
62 cpu_init();
d68d82af 63 touch_softlockup_watchdog();
c7b75947
JF
64 preempt_disable();
65
e2a81baf 66 xen_enable_sysenter();
6fcac6d3 67 xen_enable_syscall();
f87e4cac 68
c7b75947
JF
69 cpu = smp_processor_id();
70 smp_store_cpu_info(cpu);
71 cpu_data(cpu).x86_max_cores = 1;
72 set_cpu_sibling_map(cpu);
f87e4cac
JF
73
74 xen_setup_cpu_clockevents();
75
c7b75947 76 cpu_set(cpu, cpu_online_map);
6dbde353 77 percpu_write(cpu_state, CPU_ONLINE);
c7b75947
JF
78 wmb();
79
f87e4cac
JF
80 /* We can take interrupts now: we're officially "up". */
81 local_irq_enable();
82
83 wmb(); /* make sure everything is out */
d68d82af
AN
84}
85
86static __cpuinit void cpu_bringup_and_idle(void)
87{
88 cpu_bringup();
f87e4cac
JF
89 cpu_idle();
90}
91
92static int xen_smp_intr_init(unsigned int cpu)
93{
94 int rc;
ee523ca1 95 const char *resched_name, *callfunc_name, *debug_name;
f87e4cac
JF
96
97 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
98 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
99 cpu,
100 xen_reschedule_interrupt,
101 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
102 resched_name,
103 NULL);
104 if (rc < 0)
105 goto fail;
106 per_cpu(resched_irq, cpu) = rc;
107
108 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
109 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
110 cpu,
111 xen_call_function_interrupt,
112 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
113 callfunc_name,
114 NULL);
115 if (rc < 0)
116 goto fail;
117 per_cpu(callfunc_irq, cpu) = rc;
118
ee523ca1
JF
119 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
120 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
121 IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING,
122 debug_name, NULL);
123 if (rc < 0)
124 goto fail;
125 per_cpu(debug_irq, cpu) = rc;
126
3b16cf87
JA
127 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
128 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
129 cpu,
130 xen_call_function_single_interrupt,
131 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
132 callfunc_name,
133 NULL);
134 if (rc < 0)
135 goto fail;
136 per_cpu(callfuncsingle_irq, cpu) = rc;
137
f87e4cac
JF
138 return 0;
139
140 fail:
141 if (per_cpu(resched_irq, cpu) >= 0)
142 unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
143 if (per_cpu(callfunc_irq, cpu) >= 0)
144 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
ee523ca1
JF
145 if (per_cpu(debug_irq, cpu) >= 0)
146 unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
3b16cf87
JA
147 if (per_cpu(callfuncsingle_irq, cpu) >= 0)
148 unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
149
f87e4cac
JF
150 return rc;
151}
152
c7b75947 153static void __init xen_fill_possible_map(void)
f87e4cac
JF
154{
155 int i, rc;
156
e7986739 157 for (i = 0; i < nr_cpu_ids; i++) {
f87e4cac 158 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
4560a294
JF
159 if (rc >= 0) {
160 num_processors++;
f87e4cac 161 cpu_set(i, cpu_possible_map);
4560a294 162 }
f87e4cac
JF
163 }
164}
165
a9e7062d 166static void __init xen_smp_prepare_boot_cpu(void)
f87e4cac 167{
f87e4cac
JF
168 BUG_ON(smp_processor_id() != 0);
169 native_smp_prepare_boot_cpu();
170
f87e4cac
JF
171 /* We've switched to the "real" per-cpu gdt, so make sure the
172 old memory can be recycled */
38341432 173 make_lowmem_page_readwrite(xen_initial_gdt);
60223a32
JF
174
175 xen_setup_vcpu_info_placement();
f87e4cac
JF
176}
177
a9e7062d 178static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
f87e4cac
JF
179{
180 unsigned cpu;
181
2d9e1e2f
JF
182 xen_init_lock_cpu(0);
183
f87e4cac 184 smp_store_cpu_info(0);
c7b75947 185 cpu_data(0).x86_max_cores = 1;
f87e4cac
JF
186 set_cpu_sibling_map(0);
187
188 if (xen_smp_intr_init(0))
189 BUG();
190
b78936e1
MT
191 if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
192 panic("could not allocate xen_cpu_initialized_map\n");
193
194 cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
f87e4cac
JF
195
196 /* Restrict the possible_map according to max_cpus. */
197 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
e7986739 198 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
f87e4cac
JF
199 continue;
200 cpu_clear(cpu, cpu_possible_map);
201 }
202
203 for_each_possible_cpu (cpu) {
204 struct task_struct *idle;
205
206 if (cpu == 0)
207 continue;
208
209 idle = fork_idle(cpu);
210 if (IS_ERR(idle))
211 panic("failed fork for CPU %d", cpu);
212
213 cpu_set(cpu, cpu_present_map);
214 }
f87e4cac
JF
215}
216
217static __cpuinit int
218cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
219{
220 struct vcpu_guest_context *ctxt;
c7b75947 221 struct desc_struct *gdt;
9976b39b 222 unsigned long gdt_mfn;
f87e4cac 223
b78936e1 224 if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
f87e4cac
JF
225 return 0;
226
227 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
228 if (ctxt == NULL)
229 return -ENOMEM;
230
c7b75947
JF
231 gdt = get_cpu_gdt_table(cpu);
232
f87e4cac
JF
233 ctxt->flags = VGCF_IN_KERNEL;
234 ctxt->user_regs.ds = __USER_DS;
235 ctxt->user_regs.es = __USER_DS;
f87e4cac 236 ctxt->user_regs.ss = __KERNEL_DS;
c7b75947
JF
237#ifdef CONFIG_X86_32
238 ctxt->user_regs.fs = __KERNEL_PERCPU;
795f99b6
JF
239#else
240 ctxt->gs_base_kernel = per_cpu_offset(cpu);
c7b75947 241#endif
f87e4cac
JF
242 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
243 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
244
245 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
246
247 xen_copy_trap_info(ctxt->trap_ctxt);
248
249 ctxt->ldt_ents = 0;
250
c7b75947 251 BUG_ON((unsigned long)gdt & ~PAGE_MASK);
9976b39b
JF
252
253 gdt_mfn = arbitrary_virt_to_mfn(gdt);
c7b75947 254 make_lowmem_page_readonly(gdt);
9976b39b 255 make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
f87e4cac 256
9976b39b 257 ctxt->gdt_frames[0] = gdt_mfn;
c7b75947 258 ctxt->gdt_ents = GDT_ENTRIES;
f87e4cac
JF
259
260 ctxt->user_regs.cs = __KERNEL_CS;
faca6227 261 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
f87e4cac
JF
262
263 ctxt->kernel_ss = __KERNEL_DS;
faca6227 264 ctxt->kernel_sp = idle->thread.sp0;
f87e4cac 265
c7b75947 266#ifdef CONFIG_X86_32
f87e4cac 267 ctxt->event_callback_cs = __KERNEL_CS;
f87e4cac 268 ctxt->failsafe_callback_cs = __KERNEL_CS;
c7b75947
JF
269#endif
270 ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback;
f87e4cac
JF
271 ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;
272
273 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
274 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
275
276 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
277 BUG();
278
279 kfree(ctxt);
280 return 0;
281}
282
a9e7062d 283static int __cpuinit xen_cpu_up(unsigned int cpu)
f87e4cac
JF
284{
285 struct task_struct *idle = idle_task(cpu);
286 int rc;
287
c6f5e0ac 288 per_cpu(current_task, cpu) = idle;
c7b75947 289#ifdef CONFIG_X86_32
f87e4cac 290 irq_ctx_init(cpu);
c7b75947 291#else
c7b75947 292 clear_tsk_thread_flag(idle, TIF_FORK);
38341432
JF
293 per_cpu(kernel_stack, cpu) =
294 (unsigned long)task_stack_page(idle) -
295 KERNEL_STACK_OFFSET + THREAD_SIZE;
c7b75947 296#endif
f87e4cac 297 xen_setup_timer(cpu);
2d9e1e2f 298 xen_init_lock_cpu(cpu);
f87e4cac 299
c7b75947
JF
300 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
301
f87e4cac
JF
302 /* make sure interrupts start blocked */
303 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
304
305 rc = cpu_initialize_context(cpu, idle);
306 if (rc)
307 return rc;
308
309 if (num_online_cpus() == 1)
310 alternatives_smp_switch(1);
311
312 rc = xen_smp_intr_init(cpu);
313 if (rc)
314 return rc;
315
f87e4cac
JF
316 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
317 BUG_ON(rc);
318
c7b75947
JF
319 while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
320 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
321 barrier();
322 }
323
f87e4cac
JF
324 return 0;
325}
326
a9e7062d 327static void xen_smp_cpus_done(unsigned int max_cpus)
f87e4cac
JF
328{
329}
330
2737146b 331#ifdef CONFIG_HOTPLUG_CPU
26fd1051 332static int xen_cpu_disable(void)
d68d82af
AN
333{
334 unsigned int cpu = smp_processor_id();
335 if (cpu == 0)
336 return -EBUSY;
337
338 cpu_disable_common();
339
340 load_cr3(swapper_pg_dir);
341 return 0;
342}
343
26fd1051 344static void xen_cpu_die(unsigned int cpu)
d68d82af
AN
345{
346 while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
347 current->state = TASK_UNINTERRUPTIBLE;
348 schedule_timeout(HZ/10);
349 }
350 unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
351 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
352 unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
353 unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
354 xen_uninit_lock_cpu(cpu);
355 xen_teardown_timer(cpu);
356
357 if (num_online_cpus() == 1)
358 alternatives_smp_switch(0);
359}
360
df6b0794 361static void __cpuinit xen_play_dead(void) /* used only with CPU_HOTPLUG */
d68d82af
AN
362{
363 play_dead_common();
364 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
365 cpu_bringup();
366}
367
2737146b 368#else /* !CONFIG_HOTPLUG_CPU */
26fd1051 369static int xen_cpu_disable(void)
2737146b
AN
370{
371 return -ENOSYS;
372}
373
26fd1051 374static void xen_cpu_die(unsigned int cpu)
2737146b
AN
375{
376 BUG();
377}
378
26fd1051 379static void xen_play_dead(void)
2737146b
AN
380{
381 BUG();
382}
383
384#endif
f87e4cac
JF
385static void stop_self(void *v)
386{
387 int cpu = smp_processor_id();
388
389 /* make sure we're not pinning something down */
390 load_cr3(swapper_pg_dir);
391 /* should set up a minimal gdt */
392
393 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
394 BUG();
395}
396
a9e7062d 397static void xen_smp_send_stop(void)
f87e4cac 398{
8691e5a8 399 smp_call_function(stop_self, NULL, 0);
f87e4cac
JF
400}
401
a9e7062d 402static void xen_smp_send_reschedule(int cpu)
f87e4cac
JF
403{
404 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
405}
406
bcda016e
MT
407static void xen_send_IPI_mask(const struct cpumask *mask,
408 enum ipi_vector vector)
f87e4cac
JF
409{
410 unsigned cpu;
411
bcda016e 412 for_each_cpu_and(cpu, mask, cpu_online_mask)
f87e4cac
JF
413 xen_send_IPI_one(cpu, vector);
414}
415
bcda016e 416static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
3b16cf87
JA
417{
418 int cpu;
419
420 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
421
422 /* Make sure other vcpus get a chance to run if they need to. */
bcda016e 423 for_each_cpu(cpu, mask) {
3b16cf87
JA
424 if (xen_vcpu_stolen(cpu)) {
425 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
426 break;
427 }
428 }
429}
430
a9e7062d 431static void xen_smp_send_call_function_single_ipi(int cpu)
3b16cf87 432{
bcda016e 433 xen_send_IPI_mask(cpumask_of(cpu),
e7986739 434 XEN_CALL_FUNCTION_SINGLE_VECTOR);
3b16cf87
JA
435}
436
f87e4cac
JF
437static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
438{
f87e4cac 439 irq_enter();
3b16cf87 440 generic_smp_call_function_interrupt();
1b437c8c 441 inc_irq_stat(irq_call_count);
f87e4cac
JF
442 irq_exit();
443
f87e4cac
JF
444 return IRQ_HANDLED;
445}
446
3b16cf87 447static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
f87e4cac 448{
3b16cf87
JA
449 irq_enter();
450 generic_smp_call_function_single_interrupt();
1b437c8c 451 inc_irq_stat(irq_call_count);
3b16cf87 452 irq_exit();
f87e4cac 453
3b16cf87 454 return IRQ_HANDLED;
f87e4cac 455}
a9e7062d
JF
456
457static const struct smp_ops xen_smp_ops __initdata = {
458 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
459 .smp_prepare_cpus = xen_smp_prepare_cpus,
a9e7062d
JF
460 .smp_cpus_done = xen_smp_cpus_done,
461
d68d82af
AN
462 .cpu_up = xen_cpu_up,
463 .cpu_die = xen_cpu_die,
464 .cpu_disable = xen_cpu_disable,
465 .play_dead = xen_play_dead,
466
a9e7062d
JF
467 .smp_send_stop = xen_smp_send_stop,
468 .smp_send_reschedule = xen_smp_send_reschedule,
469
470 .send_call_func_ipi = xen_smp_send_call_function_ipi,
471 .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
472};
473
474void __init xen_smp_init(void)
475{
476 smp_ops = xen_smp_ops;
c7b75947 477 xen_fill_possible_map();
2d9e1e2f 478 xen_init_spinlocks();
a9e7062d 479}
This page took 0.192868 seconds and 5 git commands to generate.