Commit | Line | Data |
---|---|---|
0cf1bfd2 MT |
1 | /* |
2 | * KVM paravirt_ops implementation | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
17 | * | |
18 | * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | |
19 | * Copyright IBM Corporation, 2007 | |
20 | * Authors: Anthony Liguori <aliguori@us.ibm.com> | |
21 | */ | |
22 | ||
56dd9470 | 23 | #include <linux/context_tracking.h> |
0cf1bfd2 MT |
24 | #include <linux/module.h> |
25 | #include <linux/kernel.h> | |
26 | #include <linux/kvm_para.h> | |
27 | #include <linux/cpu.h> | |
28 | #include <linux/mm.h> | |
1da8a77b | 29 | #include <linux/highmem.h> |
096d14a3 | 30 | #include <linux/hardirq.h> |
fd10cde9 GN |
31 | #include <linux/notifier.h> |
32 | #include <linux/reboot.h> | |
631bc487 GN |
33 | #include <linux/hash.h> |
34 | #include <linux/sched.h> | |
35 | #include <linux/slab.h> | |
36 | #include <linux/kprobes.h> | |
92b75202 | 37 | #include <linux/debugfs.h> |
a90ede7b | 38 | #include <asm/timer.h> |
fd10cde9 | 39 | #include <asm/cpu.h> |
631bc487 GN |
40 | #include <asm/traps.h> |
41 | #include <asm/desc.h> | |
6c047cd9 | 42 | #include <asm/tlbflush.h> |
e0875921 | 43 | #include <asm/idle.h> |
ab9cf499 MT |
44 | #include <asm/apic.h> |
45 | #include <asm/apicdef.h> | |
fc73373b | 46 | #include <asm/hypervisor.h> |
3dc4f7cf | 47 | #include <asm/kvm_guest.h> |
096d14a3 | 48 | |
fd10cde9 GN |
49 | static int kvmapf = 1; |
50 | ||
51 | static int parse_no_kvmapf(char *arg) | |
52 | { | |
53 | kvmapf = 0; | |
54 | return 0; | |
55 | } | |
56 | ||
57 | early_param("no-kvmapf", parse_no_kvmapf); | |
58 | ||
d910f5c1 GC |
59 | static int steal_acc = 1; |
60 | static int parse_no_stealacc(char *arg) | |
61 | { | |
62 | steal_acc = 0; | |
63 | return 0; | |
64 | } | |
65 | ||
66 | early_param("no-steal-acc", parse_no_stealacc); | |
67 | ||
3dc4f7cf MT |
68 | static int kvmclock_vsyscall = 1; |
69 | static int parse_no_kvmclock_vsyscall(char *arg) | |
70 | { | |
71 | kvmclock_vsyscall = 0; | |
72 | return 0; | |
73 | } | |
74 | ||
75 | early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall); | |
76 | ||
fd10cde9 | 77 | static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64); |
d910f5c1 GC |
78 | static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64); |
79 | static int has_steal_clock = 0; | |
096d14a3 | 80 | |
0cf1bfd2 MT |
81 | /* |
82 | * No need for any "IO delay" on KVM | |
83 | */ | |
84 | static void kvm_io_delay(void) | |
85 | { | |
86 | } | |
87 | ||
631bc487 GN |
88 | #define KVM_TASK_SLEEP_HASHBITS 8 |
89 | #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS) | |
90 | ||
91 | struct kvm_task_sleep_node { | |
92 | struct hlist_node link; | |
93 | wait_queue_head_t wq; | |
94 | u32 token; | |
95 | int cpu; | |
6c047cd9 | 96 | bool halted; |
631bc487 GN |
97 | }; |
98 | ||
99 | static struct kvm_task_sleep_head { | |
100 | spinlock_t lock; | |
101 | struct hlist_head list; | |
102 | } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE]; | |
103 | ||
104 | static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b, | |
105 | u32 token) | |
106 | { | |
107 | struct hlist_node *p; | |
108 | ||
109 | hlist_for_each(p, &b->list) { | |
110 | struct kvm_task_sleep_node *n = | |
111 | hlist_entry(p, typeof(*n), link); | |
112 | if (n->token == token) | |
113 | return n; | |
114 | } | |
115 | ||
116 | return NULL; | |
117 | } | |
118 | ||
119 | void kvm_async_pf_task_wait(u32 token) | |
120 | { | |
121 | u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); | |
122 | struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; | |
123 | struct kvm_task_sleep_node n, *e; | |
124 | DEFINE_WAIT(wait); | |
125 | ||
9b132fbe LZ |
126 | rcu_irq_enter(); |
127 | ||
631bc487 GN |
128 | spin_lock(&b->lock); |
129 | e = _find_apf_task(b, token); | |
130 | if (e) { | |
131 | /* dummy entry exist -> wake up was delivered ahead of PF */ | |
132 | hlist_del(&e->link); | |
133 | kfree(e); | |
134 | spin_unlock(&b->lock); | |
9b132fbe LZ |
135 | |
136 | rcu_irq_exit(); | |
631bc487 GN |
137 | return; |
138 | } | |
139 | ||
140 | n.token = token; | |
141 | n.cpu = smp_processor_id(); | |
859f8450 | 142 | n.halted = is_idle_task(current) || preempt_count() > 1; |
631bc487 GN |
143 | init_waitqueue_head(&n.wq); |
144 | hlist_add_head(&n.link, &b->list); | |
145 | spin_unlock(&b->lock); | |
146 | ||
147 | for (;;) { | |
6c047cd9 GN |
148 | if (!n.halted) |
149 | prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE); | |
631bc487 GN |
150 | if (hlist_unhashed(&n.link)) |
151 | break; | |
6c047cd9 GN |
152 | |
153 | if (!n.halted) { | |
154 | local_irq_enable(); | |
155 | schedule(); | |
156 | local_irq_disable(); | |
157 | } else { | |
158 | /* | |
159 | * We cannot reschedule. So halt. | |
160 | */ | |
9b132fbe | 161 | rcu_irq_exit(); |
6c047cd9 | 162 | native_safe_halt(); |
9b132fbe | 163 | rcu_irq_enter(); |
6c047cd9 GN |
164 | local_irq_disable(); |
165 | } | |
631bc487 | 166 | } |
6c047cd9 GN |
167 | if (!n.halted) |
168 | finish_wait(&n.wq, &wait); | |
631bc487 | 169 | |
9b132fbe | 170 | rcu_irq_exit(); |
631bc487 GN |
171 | return; |
172 | } | |
173 | EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait); | |
174 | ||
175 | static void apf_task_wake_one(struct kvm_task_sleep_node *n) | |
176 | { | |
177 | hlist_del_init(&n->link); | |
6c047cd9 GN |
178 | if (n->halted) |
179 | smp_send_reschedule(n->cpu); | |
180 | else if (waitqueue_active(&n->wq)) | |
631bc487 GN |
181 | wake_up(&n->wq); |
182 | } | |
183 | ||
184 | static void apf_task_wake_all(void) | |
185 | { | |
186 | int i; | |
187 | ||
188 | for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) { | |
189 | struct hlist_node *p, *next; | |
190 | struct kvm_task_sleep_head *b = &async_pf_sleepers[i]; | |
191 | spin_lock(&b->lock); | |
192 | hlist_for_each_safe(p, next, &b->list) { | |
193 | struct kvm_task_sleep_node *n = | |
194 | hlist_entry(p, typeof(*n), link); | |
195 | if (n->cpu == smp_processor_id()) | |
196 | apf_task_wake_one(n); | |
197 | } | |
198 | spin_unlock(&b->lock); | |
199 | } | |
200 | } | |
201 | ||
202 | void kvm_async_pf_task_wake(u32 token) | |
203 | { | |
204 | u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); | |
205 | struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; | |
206 | struct kvm_task_sleep_node *n; | |
207 | ||
208 | if (token == ~0) { | |
209 | apf_task_wake_all(); | |
210 | return; | |
211 | } | |
212 | ||
213 | again: | |
214 | spin_lock(&b->lock); | |
215 | n = _find_apf_task(b, token); | |
216 | if (!n) { | |
217 | /* | |
218 | * async PF was not yet handled. | |
219 | * Add dummy entry for the token. | |
220 | */ | |
62c49cc9 | 221 | n = kzalloc(sizeof(*n), GFP_ATOMIC); |
631bc487 GN |
222 | if (!n) { |
223 | /* | |
224 | * Allocation failed! Busy wait while other cpu | |
225 | * handles async PF. | |
226 | */ | |
227 | spin_unlock(&b->lock); | |
228 | cpu_relax(); | |
229 | goto again; | |
230 | } | |
231 | n->token = token; | |
232 | n->cpu = smp_processor_id(); | |
233 | init_waitqueue_head(&n->wq); | |
234 | hlist_add_head(&n->link, &b->list); | |
235 | } else | |
236 | apf_task_wake_one(n); | |
237 | spin_unlock(&b->lock); | |
238 | return; | |
239 | } | |
240 | EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake); | |
241 | ||
242 | u32 kvm_read_and_reset_pf_reason(void) | |
243 | { | |
244 | u32 reason = 0; | |
245 | ||
246 | if (__get_cpu_var(apf_reason).enabled) { | |
247 | reason = __get_cpu_var(apf_reason).reason; | |
248 | __get_cpu_var(apf_reason).reason = 0; | |
249 | } | |
250 | ||
251 | return reason; | |
252 | } | |
253 | EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason); | |
254 | ||
255 | dotraplinkage void __kprobes | |
256 | do_async_page_fault(struct pt_regs *regs, unsigned long error_code) | |
257 | { | |
6c1e0256 FW |
258 | enum ctx_state prev_state; |
259 | ||
631bc487 GN |
260 | switch (kvm_read_and_reset_pf_reason()) { |
261 | default: | |
262 | do_page_fault(regs, error_code); | |
263 | break; | |
264 | case KVM_PV_REASON_PAGE_NOT_PRESENT: | |
265 | /* page is swapped out by the host. */ | |
6c1e0256 | 266 | prev_state = exception_enter(); |
c5e015d4 | 267 | exit_idle(); |
631bc487 | 268 | kvm_async_pf_task_wait((u32)read_cr2()); |
6c1e0256 | 269 | exception_exit(prev_state); |
631bc487 GN |
270 | break; |
271 | case KVM_PV_REASON_PAGE_READY: | |
e0875921 GN |
272 | rcu_irq_enter(); |
273 | exit_idle(); | |
631bc487 | 274 | kvm_async_pf_task_wake((u32)read_cr2()); |
e0875921 | 275 | rcu_irq_exit(); |
631bc487 GN |
276 | break; |
277 | } | |
278 | } | |
279 | ||
d3ac8815 | 280 | static void __init paravirt_ops_setup(void) |
0cf1bfd2 MT |
281 | { |
282 | pv_info.name = "KVM"; | |
283 | pv_info.paravirt_enabled = 1; | |
284 | ||
285 | if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY)) | |
286 | pv_cpu_ops.io_delay = kvm_io_delay; | |
287 | ||
a90ede7b MT |
288 | #ifdef CONFIG_X86_IO_APIC |
289 | no_timer_check = 1; | |
290 | #endif | |
0cf1bfd2 MT |
291 | } |
292 | ||
d910f5c1 GC |
293 | static void kvm_register_steal_time(void) |
294 | { | |
295 | int cpu = smp_processor_id(); | |
296 | struct kvm_steal_time *st = &per_cpu(steal_time, cpu); | |
297 | ||
298 | if (!has_steal_clock) | |
299 | return; | |
300 | ||
301 | memset(st, 0, sizeof(*st)); | |
302 | ||
5dfd486c | 303 | wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED)); |
136867f5 SK |
304 | pr_info("kvm-stealtime: cpu %d, msr %llx\n", |
305 | cpu, (unsigned long long) slow_virt_to_phys(st)); | |
d910f5c1 GC |
306 | } |
307 | ||
ab9cf499 MT |
308 | static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED; |
309 | ||
310 | static void kvm_guest_apic_eoi_write(u32 reg, u32 val) | |
311 | { | |
312 | /** | |
313 | * This relies on __test_and_clear_bit to modify the memory | |
314 | * in a way that is atomic with respect to the local CPU. | |
315 | * The hypervisor only accesses this memory from the local CPU so | |
316 | * there's no need for lock or memory barriers. | |
317 | * An optimization barrier is implied in apic write. | |
318 | */ | |
319 | if (__test_and_clear_bit(KVM_PV_EOI_BIT, &__get_cpu_var(kvm_apic_eoi))) | |
320 | return; | |
90536664 | 321 | apic_write(APIC_EOI, APIC_EOI_ACK); |
ab9cf499 MT |
322 | } |
323 | ||
148f9bb8 | 324 | void kvm_guest_cpu_init(void) |
fd10cde9 GN |
325 | { |
326 | if (!kvm_para_available()) | |
327 | return; | |
328 | ||
329 | if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) { | |
5dfd486c | 330 | u64 pa = slow_virt_to_phys(&__get_cpu_var(apf_reason)); |
fd10cde9 | 331 | |
6adba527 GN |
332 | #ifdef CONFIG_PREEMPT |
333 | pa |= KVM_ASYNC_PF_SEND_ALWAYS; | |
334 | #endif | |
fd10cde9 GN |
335 | wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED); |
336 | __get_cpu_var(apf_reason).enabled = 1; | |
337 | printk(KERN_INFO"KVM setup async PF for cpu %d\n", | |
338 | smp_processor_id()); | |
339 | } | |
d910f5c1 | 340 | |
ab9cf499 MT |
341 | if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) { |
342 | unsigned long pa; | |
343 | /* Size alignment is implied but just to make it explicit. */ | |
344 | BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4); | |
345 | __get_cpu_var(kvm_apic_eoi) = 0; | |
5dfd486c DH |
346 | pa = slow_virt_to_phys(&__get_cpu_var(kvm_apic_eoi)) |
347 | | KVM_MSR_ENABLED; | |
ab9cf499 MT |
348 | wrmsrl(MSR_KVM_PV_EOI_EN, pa); |
349 | } | |
350 | ||
d910f5c1 GC |
351 | if (has_steal_clock) |
352 | kvm_register_steal_time(); | |
fd10cde9 GN |
353 | } |
354 | ||
ab9cf499 | 355 | static void kvm_pv_disable_apf(void) |
fd10cde9 GN |
356 | { |
357 | if (!__get_cpu_var(apf_reason).enabled) | |
358 | return; | |
359 | ||
360 | wrmsrl(MSR_KVM_ASYNC_PF_EN, 0); | |
361 | __get_cpu_var(apf_reason).enabled = 0; | |
362 | ||
363 | printk(KERN_INFO"Unregister pv shared memory for cpu %d\n", | |
364 | smp_processor_id()); | |
365 | } | |
366 | ||
ab9cf499 MT |
367 | static void kvm_pv_guest_cpu_reboot(void *unused) |
368 | { | |
369 | /* | |
370 | * We disable PV EOI before we load a new kernel by kexec, | |
371 | * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory. | |
372 | * New kernel can re-enable when it boots. | |
373 | */ | |
374 | if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) | |
375 | wrmsrl(MSR_KVM_PV_EOI_EN, 0); | |
376 | kvm_pv_disable_apf(); | |
8fbe6a54 | 377 | kvm_disable_steal_time(); |
ab9cf499 MT |
378 | } |
379 | ||
fd10cde9 GN |
380 | static int kvm_pv_reboot_notify(struct notifier_block *nb, |
381 | unsigned long code, void *unused) | |
382 | { | |
383 | if (code == SYS_RESTART) | |
ab9cf499 | 384 | on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1); |
fd10cde9 GN |
385 | return NOTIFY_DONE; |
386 | } | |
387 | ||
388 | static struct notifier_block kvm_pv_reboot_nb = { | |
389 | .notifier_call = kvm_pv_reboot_notify, | |
390 | }; | |
391 | ||
d910f5c1 GC |
392 | static u64 kvm_steal_clock(int cpu) |
393 | { | |
394 | u64 steal; | |
395 | struct kvm_steal_time *src; | |
396 | int version; | |
397 | ||
398 | src = &per_cpu(steal_time, cpu); | |
399 | do { | |
400 | version = src->version; | |
401 | rmb(); | |
402 | steal = src->steal; | |
403 | rmb(); | |
404 | } while ((version & 1) || (version != src->version)); | |
405 | ||
406 | return steal; | |
407 | } | |
408 | ||
409 | void kvm_disable_steal_time(void) | |
410 | { | |
411 | if (!has_steal_clock) | |
412 | return; | |
413 | ||
414 | wrmsr(MSR_KVM_STEAL_TIME, 0, 0); | |
415 | } | |
416 | ||
ca3f1017 GN |
417 | #ifdef CONFIG_SMP |
418 | static void __init kvm_smp_prepare_boot_cpu(void) | |
419 | { | |
fd10cde9 | 420 | kvm_guest_cpu_init(); |
ca3f1017 | 421 | native_smp_prepare_boot_cpu(); |
92b75202 | 422 | kvm_spinlock_init(); |
ca3f1017 | 423 | } |
fd10cde9 | 424 | |
148f9bb8 | 425 | static void kvm_guest_cpu_online(void *dummy) |
fd10cde9 GN |
426 | { |
427 | kvm_guest_cpu_init(); | |
428 | } | |
429 | ||
430 | static void kvm_guest_cpu_offline(void *dummy) | |
431 | { | |
d910f5c1 | 432 | kvm_disable_steal_time(); |
ab9cf499 MT |
433 | if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) |
434 | wrmsrl(MSR_KVM_PV_EOI_EN, 0); | |
435 | kvm_pv_disable_apf(); | |
631bc487 | 436 | apf_task_wake_all(); |
fd10cde9 GN |
437 | } |
438 | ||
148f9bb8 PG |
439 | static int kvm_cpu_notify(struct notifier_block *self, unsigned long action, |
440 | void *hcpu) | |
fd10cde9 GN |
441 | { |
442 | int cpu = (unsigned long)hcpu; | |
443 | switch (action) { | |
444 | case CPU_ONLINE: | |
445 | case CPU_DOWN_FAILED: | |
446 | case CPU_ONLINE_FROZEN: | |
447 | smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0); | |
448 | break; | |
449 | case CPU_DOWN_PREPARE: | |
450 | case CPU_DOWN_PREPARE_FROZEN: | |
451 | smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1); | |
452 | break; | |
453 | default: | |
454 | break; | |
455 | } | |
456 | return NOTIFY_OK; | |
457 | } | |
458 | ||
148f9bb8 | 459 | static struct notifier_block kvm_cpu_notifier = { |
fd10cde9 GN |
460 | .notifier_call = kvm_cpu_notify, |
461 | }; | |
ca3f1017 GN |
462 | #endif |
463 | ||
631bc487 GN |
464 | static void __init kvm_apf_trap_init(void) |
465 | { | |
25c74b10 | 466 | set_intr_gate(14, async_page_fault); |
631bc487 GN |
467 | } |
468 | ||
0cf1bfd2 MT |
469 | void __init kvm_guest_init(void) |
470 | { | |
631bc487 GN |
471 | int i; |
472 | ||
0cf1bfd2 MT |
473 | if (!kvm_para_available()) |
474 | return; | |
475 | ||
476 | paravirt_ops_setup(); | |
fd10cde9 | 477 | register_reboot_notifier(&kvm_pv_reboot_nb); |
631bc487 GN |
478 | for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) |
479 | spin_lock_init(&async_pf_sleepers[i].lock); | |
480 | if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF)) | |
481 | x86_init.irqs.trap_init = kvm_apf_trap_init; | |
482 | ||
d910f5c1 GC |
483 | if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { |
484 | has_steal_clock = 1; | |
485 | pv_time_ops.steal_clock = kvm_steal_clock; | |
486 | } | |
487 | ||
90536664 MT |
488 | if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) |
489 | apic_set_eoi_write(kvm_guest_apic_eoi_write); | |
ab9cf499 | 490 | |
3dc4f7cf MT |
491 | if (kvmclock_vsyscall) |
492 | kvm_setup_vsyscall_timeinfo(); | |
493 | ||
ca3f1017 GN |
494 | #ifdef CONFIG_SMP |
495 | smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu; | |
fd10cde9 GN |
496 | register_cpu_notifier(&kvm_cpu_notifier); |
497 | #else | |
498 | kvm_guest_cpu_init(); | |
ca3f1017 | 499 | #endif |
0cf1bfd2 | 500 | } |
d910f5c1 | 501 | |
1c300a40 PB |
502 | static noinline uint32_t __kvm_cpuid_base(void) |
503 | { | |
504 | if (boot_cpu_data.cpuid_level < 0) | |
505 | return 0; /* So we don't blow up on old processors */ | |
506 | ||
507 | if (cpu_has_hypervisor) | |
508 | return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0); | |
509 | ||
510 | return 0; | |
511 | } | |
512 | ||
513 | static inline uint32_t kvm_cpuid_base(void) | |
514 | { | |
515 | static int kvm_cpuid_base = -1; | |
516 | ||
517 | if (kvm_cpuid_base == -1) | |
518 | kvm_cpuid_base = __kvm_cpuid_base(); | |
519 | ||
520 | return kvm_cpuid_base; | |
521 | } | |
522 | ||
523 | bool kvm_para_available(void) | |
524 | { | |
525 | return kvm_cpuid_base() != 0; | |
526 | } | |
527 | EXPORT_SYMBOL_GPL(kvm_para_available); | |
528 | ||
77f01bdf PB |
529 | unsigned int kvm_arch_para_features(void) |
530 | { | |
531 | return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES); | |
532 | } | |
533 | ||
9df56f19 | 534 | static uint32_t __init kvm_detect(void) |
fc73373b | 535 | { |
9df56f19 | 536 | return kvm_cpuid_base(); |
fc73373b PB |
537 | } |
538 | ||
539 | const struct hypervisor_x86 x86_hyper_kvm __refconst = { | |
540 | .name = "KVM", | |
541 | .detect = kvm_detect, | |
4cca6ea0 | 542 | .x2apic_available = kvm_para_available, |
fc73373b PB |
543 | }; |
544 | EXPORT_SYMBOL_GPL(x86_hyper_kvm); | |
545 | ||
d910f5c1 GC |
546 | static __init int activate_jump_labels(void) |
547 | { | |
548 | if (has_steal_clock) { | |
c5905afb | 549 | static_key_slow_inc(¶virt_steal_enabled); |
d910f5c1 | 550 | if (steal_acc) |
c5905afb | 551 | static_key_slow_inc(¶virt_steal_rq_enabled); |
d910f5c1 GC |
552 | } |
553 | ||
554 | return 0; | |
555 | } | |
556 | arch_initcall(activate_jump_labels); | |
92b75202 SV |
557 | |
558 | #ifdef CONFIG_PARAVIRT_SPINLOCKS | |
559 | ||
560 | /* Kick a cpu by its apicid. Used to wake up a halted vcpu */ | |
36bd6213 | 561 | static void kvm_kick_cpu(int cpu) |
92b75202 SV |
562 | { |
563 | int apicid; | |
564 | unsigned long flags = 0; | |
565 | ||
566 | apicid = per_cpu(x86_cpu_to_apicid, cpu); | |
567 | kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid); | |
568 | } | |
569 | ||
570 | enum kvm_contention_stat { | |
571 | TAKEN_SLOW, | |
572 | TAKEN_SLOW_PICKUP, | |
573 | RELEASED_SLOW, | |
574 | RELEASED_SLOW_KICKED, | |
575 | NR_CONTENTION_STATS | |
576 | }; | |
577 | ||
578 | #ifdef CONFIG_KVM_DEBUG_FS | |
579 | #define HISTO_BUCKETS 30 | |
580 | ||
581 | static struct kvm_spinlock_stats | |
582 | { | |
583 | u32 contention_stats[NR_CONTENTION_STATS]; | |
584 | u32 histo_spin_blocked[HISTO_BUCKETS+1]; | |
585 | u64 time_blocked; | |
586 | } spinlock_stats; | |
587 | ||
588 | static u8 zero_stats; | |
589 | ||
590 | static inline void check_zero(void) | |
591 | { | |
592 | u8 ret; | |
593 | u8 old; | |
594 | ||
595 | old = ACCESS_ONCE(zero_stats); | |
596 | if (unlikely(old)) { | |
597 | ret = cmpxchg(&zero_stats, old, 0); | |
598 | /* This ensures only one fellow resets the stat */ | |
599 | if (ret == old) | |
600 | memset(&spinlock_stats, 0, sizeof(spinlock_stats)); | |
601 | } | |
602 | } | |
603 | ||
604 | static inline void add_stats(enum kvm_contention_stat var, u32 val) | |
605 | { | |
606 | check_zero(); | |
607 | spinlock_stats.contention_stats[var] += val; | |
608 | } | |
609 | ||
610 | ||
611 | static inline u64 spin_time_start(void) | |
612 | { | |
613 | return sched_clock(); | |
614 | } | |
615 | ||
616 | static void __spin_time_accum(u64 delta, u32 *array) | |
617 | { | |
618 | unsigned index; | |
619 | ||
620 | index = ilog2(delta); | |
621 | check_zero(); | |
622 | ||
623 | if (index < HISTO_BUCKETS) | |
624 | array[index]++; | |
625 | else | |
626 | array[HISTO_BUCKETS]++; | |
627 | } | |
628 | ||
629 | static inline void spin_time_accum_blocked(u64 start) | |
630 | { | |
631 | u32 delta; | |
632 | ||
633 | delta = sched_clock() - start; | |
634 | __spin_time_accum(delta, spinlock_stats.histo_spin_blocked); | |
635 | spinlock_stats.time_blocked += delta; | |
636 | } | |
637 | ||
638 | static struct dentry *d_spin_debug; | |
639 | static struct dentry *d_kvm_debug; | |
640 | ||
641 | struct dentry *kvm_init_debugfs(void) | |
642 | { | |
d780a312 | 643 | d_kvm_debug = debugfs_create_dir("kvm-guest", NULL); |
92b75202 SV |
644 | if (!d_kvm_debug) |
645 | printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n"); | |
646 | ||
647 | return d_kvm_debug; | |
648 | } | |
649 | ||
650 | static int __init kvm_spinlock_debugfs(void) | |
651 | { | |
652 | struct dentry *d_kvm; | |
653 | ||
654 | d_kvm = kvm_init_debugfs(); | |
655 | if (d_kvm == NULL) | |
656 | return -ENOMEM; | |
657 | ||
658 | d_spin_debug = debugfs_create_dir("spinlocks", d_kvm); | |
659 | ||
660 | debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats); | |
661 | ||
662 | debugfs_create_u32("taken_slow", 0444, d_spin_debug, | |
663 | &spinlock_stats.contention_stats[TAKEN_SLOW]); | |
664 | debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug, | |
665 | &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]); | |
666 | ||
667 | debugfs_create_u32("released_slow", 0444, d_spin_debug, | |
668 | &spinlock_stats.contention_stats[RELEASED_SLOW]); | |
669 | debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug, | |
670 | &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]); | |
671 | ||
672 | debugfs_create_u64("time_blocked", 0444, d_spin_debug, | |
673 | &spinlock_stats.time_blocked); | |
674 | ||
675 | debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug, | |
676 | spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1); | |
677 | ||
678 | return 0; | |
679 | } | |
680 | fs_initcall(kvm_spinlock_debugfs); | |
681 | #else /* !CONFIG_KVM_DEBUG_FS */ | |
682 | static inline void add_stats(enum kvm_contention_stat var, u32 val) | |
683 | { | |
684 | } | |
685 | ||
686 | static inline u64 spin_time_start(void) | |
687 | { | |
688 | return 0; | |
689 | } | |
690 | ||
691 | static inline void spin_time_accum_blocked(u64 start) | |
692 | { | |
693 | } | |
694 | #endif /* CONFIG_KVM_DEBUG_FS */ | |
695 | ||
696 | struct kvm_lock_waiting { | |
697 | struct arch_spinlock *lock; | |
698 | __ticket_t want; | |
699 | }; | |
700 | ||
701 | /* cpus 'waiting' on a spinlock to become available */ | |
702 | static cpumask_t waiting_cpus; | |
703 | ||
704 | /* Track spinlock on which a cpu is waiting */ | |
705 | static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting); | |
706 | ||
dd41f818 | 707 | __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want) |
92b75202 SV |
708 | { |
709 | struct kvm_lock_waiting *w; | |
710 | int cpu; | |
711 | u64 start; | |
712 | unsigned long flags; | |
713 | ||
714 | if (in_nmi()) | |
715 | return; | |
716 | ||
717 | w = &__get_cpu_var(klock_waiting); | |
718 | cpu = smp_processor_id(); | |
719 | start = spin_time_start(); | |
720 | ||
721 | /* | |
722 | * Make sure an interrupt handler can't upset things in a | |
723 | * partially setup state. | |
724 | */ | |
725 | local_irq_save(flags); | |
726 | ||
727 | /* | |
728 | * The ordering protocol on this is that the "lock" pointer | |
729 | * may only be set non-NULL if the "want" ticket is correct. | |
730 | * If we're updating "want", we must first clear "lock". | |
731 | */ | |
732 | w->lock = NULL; | |
733 | smp_wmb(); | |
734 | w->want = want; | |
735 | smp_wmb(); | |
736 | w->lock = lock; | |
737 | ||
738 | add_stats(TAKEN_SLOW, 1); | |
739 | ||
740 | /* | |
741 | * This uses set_bit, which is atomic but we should not rely on its | |
742 | * reordering gurantees. So barrier is needed after this call. | |
743 | */ | |
744 | cpumask_set_cpu(cpu, &waiting_cpus); | |
745 | ||
746 | barrier(); | |
747 | ||
748 | /* | |
749 | * Mark entry to slowpath before doing the pickup test to make | |
750 | * sure we don't deadlock with an unlocker. | |
751 | */ | |
752 | __ticket_enter_slowpath(lock); | |
753 | ||
754 | /* | |
755 | * check again make sure it didn't become free while | |
756 | * we weren't looking. | |
757 | */ | |
758 | if (ACCESS_ONCE(lock->tickets.head) == want) { | |
759 | add_stats(TAKEN_SLOW_PICKUP, 1); | |
760 | goto out; | |
761 | } | |
762 | ||
763 | /* | |
764 | * halt until it's our turn and kicked. Note that we do safe halt | |
765 | * for irq enabled case to avoid hang when lock info is overwritten | |
766 | * in irq spinlock slowpath and no spurious interrupt occur to save us. | |
767 | */ | |
768 | if (arch_irqs_disabled_flags(flags)) | |
769 | halt(); | |
770 | else | |
771 | safe_halt(); | |
772 | ||
773 | out: | |
774 | cpumask_clear_cpu(cpu, &waiting_cpus); | |
775 | w->lock = NULL; | |
776 | local_irq_restore(flags); | |
777 | spin_time_accum_blocked(start); | |
778 | } | |
779 | PV_CALLEE_SAVE_REGS_THUNK(kvm_lock_spinning); | |
780 | ||
781 | /* Kick vcpu waiting on @lock->head to reach value @ticket */ | |
782 | static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket) | |
783 | { | |
784 | int cpu; | |
785 | ||
786 | add_stats(RELEASED_SLOW, 1); | |
787 | for_each_cpu(cpu, &waiting_cpus) { | |
788 | const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu); | |
789 | if (ACCESS_ONCE(w->lock) == lock && | |
790 | ACCESS_ONCE(w->want) == ticket) { | |
791 | add_stats(RELEASED_SLOW_KICKED, 1); | |
792 | kvm_kick_cpu(cpu); | |
793 | break; | |
794 | } | |
795 | } | |
796 | } | |
797 | ||
798 | /* | |
799 | * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present. | |
800 | */ | |
801 | void __init kvm_spinlock_init(void) | |
802 | { | |
803 | if (!kvm_para_available()) | |
804 | return; | |
805 | /* Does host kernel support KVM_FEATURE_PV_UNHALT? */ | |
806 | if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) | |
807 | return; | |
808 | ||
3dbef3e3 R |
809 | pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning); |
810 | pv_lock_ops.unlock_kick = kvm_unlock_kick; | |
811 | } | |
812 | ||
813 | static __init int kvm_spinlock_init_jump(void) | |
814 | { | |
815 | if (!kvm_para_available()) | |
816 | return 0; | |
817 | if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) | |
818 | return 0; | |
92b75202 SV |
819 | |
820 | static_key_slow_inc(¶virt_ticketlocks_enabled); | |
3dbef3e3 | 821 | printk(KERN_INFO "KVM setup paravirtual spinlock\n"); |
92b75202 | 822 | |
3dbef3e3 | 823 | return 0; |
92b75202 | 824 | } |
3dbef3e3 R |
825 | early_initcall(kvm_spinlock_init_jump); |
826 | ||
92b75202 | 827 | #endif /* CONFIG_PARAVIRT_SPINLOCKS */ |