KVM: Fix write protection race during dirty logging
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
ef50f7ac 30#include <asm/system.h>
8f2abe6a 31#include "kvm-s390.h"
b0c632db
HC
32#include "gaccess.h"
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 38 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 68 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 69 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
70 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 75 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 76 { "diagnose_44", VCPU_STAT(diagnose_44) },
b0c632db
HC
77 { NULL }
78};
79
ef50f7ac 80static unsigned long long *facilities;
b0c632db
HC
81
82/* Section: not file related */
10474ae8 83int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
84{
85 /* every s390 is virtualization enabled ;-) */
10474ae8 86 return 0;
b0c632db
HC
87}
88
89void kvm_arch_hardware_disable(void *garbage)
90{
91}
92
b0c632db
HC
93int kvm_arch_hardware_setup(void)
94{
95 return 0;
96}
97
98void kvm_arch_hardware_unsetup(void)
99{
100}
101
102void kvm_arch_check_processor_compat(void *rtn)
103{
104}
105
106int kvm_arch_init(void *opaque)
107{
108 return 0;
109}
110
111void kvm_arch_exit(void)
112{
113}
114
115/* Section: device related */
116long kvm_arch_dev_ioctl(struct file *filp,
117 unsigned int ioctl, unsigned long arg)
118{
119 if (ioctl == KVM_S390_ENABLE_SIE)
120 return s390_enable_sie();
121 return -EINVAL;
122}
123
124int kvm_dev_ioctl_check_extension(long ext)
125{
d7b0b5eb
CO
126 int r;
127
2bd0ac4e 128 switch (ext) {
d7b0b5eb 129 case KVM_CAP_S390_PSW:
b6cf8788 130 case KVM_CAP_S390_GMAP:
52e16b18 131 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
132#ifdef CONFIG_KVM_S390_UCONTROL
133 case KVM_CAP_S390_UCONTROL:
134#endif
60b413c9 135 case KVM_CAP_SYNC_REGS:
d7b0b5eb
CO
136 r = 1;
137 break;
2bd0ac4e 138 default:
d7b0b5eb 139 r = 0;
2bd0ac4e 140 }
d7b0b5eb 141 return r;
b0c632db
HC
142}
143
144/* Section: vm related */
145/*
146 * Get (and clear) the dirty memory log for a memory slot.
147 */
148int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
149 struct kvm_dirty_log *log)
150{
151 return 0;
152}
153
154long kvm_arch_vm_ioctl(struct file *filp,
155 unsigned int ioctl, unsigned long arg)
156{
157 struct kvm *kvm = filp->private_data;
158 void __user *argp = (void __user *)arg;
159 int r;
160
161 switch (ioctl) {
ba5c1e9b
CO
162 case KVM_S390_INTERRUPT: {
163 struct kvm_s390_interrupt s390int;
164
165 r = -EFAULT;
166 if (copy_from_user(&s390int, argp, sizeof(s390int)))
167 break;
168 r = kvm_s390_inject_vm(kvm, &s390int);
169 break;
170 }
b0c632db 171 default:
367e1319 172 r = -ENOTTY;
b0c632db
HC
173 }
174
175 return r;
176}
177
e08b9637 178int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 179{
b0c632db
HC
180 int rc;
181 char debug_name[16];
182
e08b9637
CO
183 rc = -EINVAL;
184#ifdef CONFIG_KVM_S390_UCONTROL
185 if (type & ~KVM_VM_S390_UCONTROL)
186 goto out_err;
187 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
188 goto out_err;
189#else
190 if (type)
191 goto out_err;
192#endif
193
b0c632db
HC
194 rc = s390_enable_sie();
195 if (rc)
d89f5eff 196 goto out_err;
b0c632db 197
b290411a
CO
198 rc = -ENOMEM;
199
b0c632db
HC
200 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
201 if (!kvm->arch.sca)
d89f5eff 202 goto out_err;
b0c632db
HC
203
204 sprintf(debug_name, "kvm-%u", current->pid);
205
206 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
207 if (!kvm->arch.dbf)
208 goto out_nodbf;
209
ba5c1e9b
CO
210 spin_lock_init(&kvm->arch.float_int.lock);
211 INIT_LIST_HEAD(&kvm->arch.float_int.list);
212
b0c632db
HC
213 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
214 VM_EVENT(kvm, 3, "%s", "vm created");
215
e08b9637
CO
216 if (type & KVM_VM_S390_UCONTROL) {
217 kvm->arch.gmap = NULL;
218 } else {
219 kvm->arch.gmap = gmap_alloc(current->mm);
220 if (!kvm->arch.gmap)
221 goto out_nogmap;
222 }
d89f5eff 223 return 0;
598841ca
CO
224out_nogmap:
225 debug_unregister(kvm->arch.dbf);
b0c632db
HC
226out_nodbf:
227 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
228out_err:
229 return rc;
b0c632db
HC
230}
231
d329c035
CB
232void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
233{
234 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
58f9460b
CO
235 if (!kvm_is_ucontrol(vcpu->kvm)) {
236 clear_bit(63 - vcpu->vcpu_id,
237 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
238 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
239 (__u64) vcpu->arch.sie_block)
240 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
241 }
abf4a71e 242 smp_mb();
27e0393f
CO
243
244 if (kvm_is_ucontrol(vcpu->kvm))
245 gmap_free(vcpu->arch.gmap);
246
d329c035 247 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 248 kvm_vcpu_uninit(vcpu);
d329c035
CB
249 kfree(vcpu);
250}
251
252static void kvm_free_vcpus(struct kvm *kvm)
253{
254 unsigned int i;
988a2cae 255 struct kvm_vcpu *vcpu;
d329c035 256
988a2cae
GN
257 kvm_for_each_vcpu(i, vcpu, kvm)
258 kvm_arch_vcpu_destroy(vcpu);
259
260 mutex_lock(&kvm->lock);
261 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
262 kvm->vcpus[i] = NULL;
263
264 atomic_set(&kvm->online_vcpus, 0);
265 mutex_unlock(&kvm->lock);
d329c035
CB
266}
267
ad8ba2cd
SY
268void kvm_arch_sync_events(struct kvm *kvm)
269{
270}
271
b0c632db
HC
272void kvm_arch_destroy_vm(struct kvm *kvm)
273{
d329c035 274 kvm_free_vcpus(kvm);
b0c632db 275 free_page((unsigned long)(kvm->arch.sca));
d329c035 276 debug_unregister(kvm->arch.dbf);
27e0393f
CO
277 if (!kvm_is_ucontrol(kvm))
278 gmap_free(kvm->arch.gmap);
b0c632db
HC
279}
280
281/* Section: vcpu related */
282int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
283{
27e0393f
CO
284 if (kvm_is_ucontrol(vcpu->kvm)) {
285 vcpu->arch.gmap = gmap_alloc(current->mm);
286 if (!vcpu->arch.gmap)
287 return -ENOMEM;
288 return 0;
289 }
290
598841ca 291 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
292 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
293 KVM_SYNC_GPRS |
294 KVM_SYNC_ACRS;
b0c632db
HC
295 return 0;
296}
297
298void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
299{
6692cef3 300 /* Nothing todo */
b0c632db
HC
301}
302
303void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
304{
305 save_fp_regs(&vcpu->arch.host_fpregs);
306 save_access_regs(vcpu->arch.host_acrs);
307 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
308 restore_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 309 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 310 gmap_enable(vcpu->arch.gmap);
9e6dabef 311 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
312}
313
314void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
315{
9e6dabef 316 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 317 gmap_disable(vcpu->arch.gmap);
b0c632db 318 save_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 319 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
320 restore_fp_regs(&vcpu->arch.host_fpregs);
321 restore_access_regs(vcpu->arch.host_acrs);
322}
323
324static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
325{
326 /* this equals initial cpu reset in pop, but we don't switch to ESA */
327 vcpu->arch.sie_block->gpsw.mask = 0UL;
328 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 329 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
330 vcpu->arch.sie_block->cputm = 0UL;
331 vcpu->arch.sie_block->ckc = 0UL;
332 vcpu->arch.sie_block->todpr = 0;
333 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
334 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
335 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
336 vcpu->arch.guest_fpregs.fpc = 0;
337 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
338 vcpu->arch.sie_block->gbea = 1;
339}
340
341int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
342{
9e6dabef
CH
343 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
344 CPUSTAT_SM |
345 CPUSTAT_STOPPED);
fc34531d 346 vcpu->arch.sie_block->ecb = 6;
b0c632db 347 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 348 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
349 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
350 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
351 (unsigned long) vcpu);
352 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 353 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 354 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
355 return 0;
356}
357
358struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
359 unsigned int id)
360{
4d47555a
CO
361 struct kvm_vcpu *vcpu;
362 int rc = -EINVAL;
363
364 if (id >= KVM_MAX_VCPUS)
365 goto out;
366
367 rc = -ENOMEM;
b0c632db 368
4d47555a 369 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
b0c632db 370 if (!vcpu)
4d47555a 371 goto out;
b0c632db 372
180c12fb
CB
373 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
374 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
375
376 if (!vcpu->arch.sie_block)
377 goto out_free_cpu;
378
379 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
380 if (!kvm_is_ucontrol(kvm)) {
381 if (!kvm->arch.sca) {
382 WARN_ON_ONCE(1);
383 goto out_free_cpu;
384 }
385 if (!kvm->arch.sca->cpu[id].sda)
386 kvm->arch.sca->cpu[id].sda =
387 (__u64) vcpu->arch.sie_block;
388 vcpu->arch.sie_block->scaoh =
389 (__u32)(((__u64)kvm->arch.sca) >> 32);
390 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
391 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
392 }
b0c632db 393
ba5c1e9b
CO
394 spin_lock_init(&vcpu->arch.local_int.lock);
395 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
396 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 397 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
398 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
399 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 400 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 401 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 402
b0c632db
HC
403 rc = kvm_vcpu_init(vcpu, kvm, id);
404 if (rc)
7b06bf2f 405 goto out_free_sie_block;
b0c632db
HC
406 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
407 vcpu->arch.sie_block);
408
b0c632db 409 return vcpu;
7b06bf2f
WY
410out_free_sie_block:
411 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
412out_free_cpu:
413 kfree(vcpu);
4d47555a 414out:
b0c632db
HC
415 return ERR_PTR(rc);
416}
417
b0c632db
HC
418int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
419{
420 /* kvm common code refers to this, but never calls it */
421 BUG();
422 return 0;
423}
424
425static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
426{
b0c632db 427 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
428 return 0;
429}
430
431int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
432{
5a32c1af 433 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
434 return 0;
435}
436
437int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
438{
5a32c1af 439 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
440 return 0;
441}
442
443int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
444 struct kvm_sregs *sregs)
445{
59674c1a 446 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 447 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 448 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
449 return 0;
450}
451
452int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
453 struct kvm_sregs *sregs)
454{
59674c1a 455 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 456 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
457 return 0;
458}
459
460int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
461{
b0c632db
HC
462 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
463 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
7eef87dc 464 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
465 return 0;
466}
467
468int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
469{
b0c632db
HC
470 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
471 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
472 return 0;
473}
474
475static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
476{
477 int rc = 0;
478
9e6dabef 479 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 480 rc = -EBUSY;
d7b0b5eb
CO
481 else {
482 vcpu->run->psw_mask = psw.mask;
483 vcpu->run->psw_addr = psw.addr;
484 }
b0c632db
HC
485 return rc;
486}
487
488int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
489 struct kvm_translation *tr)
490{
491 return -EINVAL; /* not implemented yet */
492}
493
d0bfb940
JK
494int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
495 struct kvm_guest_debug *dbg)
b0c632db
HC
496{
497 return -EINVAL; /* not implemented yet */
498}
499
62d9f0db
MT
500int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
501 struct kvm_mp_state *mp_state)
502{
503 return -EINVAL; /* not implemented yet */
504}
505
506int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
507 struct kvm_mp_state *mp_state)
508{
509 return -EINVAL; /* not implemented yet */
510}
511
e168bf8d 512static int __vcpu_run(struct kvm_vcpu *vcpu)
b0c632db 513{
e168bf8d
CO
514 int rc;
515
5a32c1af 516 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
517
518 if (need_resched())
519 schedule();
520
71cde587
CB
521 if (test_thread_flag(TIF_MCCK_PENDING))
522 s390_handle_mcck();
523
d6b6d166
CO
524 if (!kvm_is_ucontrol(vcpu->kvm))
525 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 526
b0c632db
HC
527 vcpu->arch.sie_block->icptcode = 0;
528 local_irq_disable();
529 kvm_guest_enter();
530 local_irq_enable();
531 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
532 atomic_read(&vcpu->arch.sie_block->cpuflags));
5a32c1af 533 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
e168bf8d
CO
534 if (rc) {
535 if (kvm_is_ucontrol(vcpu->kvm)) {
536 rc = SIE_INTERCEPT_UCONTROL;
537 } else {
538 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
539 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
540 rc = 0;
541 }
1f0d0f09 542 }
b0c632db
HC
543 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
544 vcpu->arch.sie_block->icptcode);
545 local_irq_disable();
546 kvm_guest_exit();
547 local_irq_enable();
548
5a32c1af 549 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
e168bf8d 550 return rc;
b0c632db
HC
551}
552
553int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
554{
8f2abe6a 555 int rc;
b0c632db
HC
556 sigset_t sigsaved;
557
9ace903d 558rerun_vcpu:
b0c632db
HC
559 if (vcpu->sigset_active)
560 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
561
9e6dabef 562 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 563
ba5c1e9b
CO
564 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
565
8f2abe6a
CB
566 switch (kvm_run->exit_reason) {
567 case KVM_EXIT_S390_SIEIC:
8f2abe6a 568 case KVM_EXIT_UNKNOWN:
9ace903d 569 case KVM_EXIT_INTR:
8f2abe6a 570 case KVM_EXIT_S390_RESET:
e168bf8d 571 case KVM_EXIT_S390_UCONTROL:
8f2abe6a
CB
572 break;
573 default:
574 BUG();
575 }
576
d7b0b5eb
CO
577 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
578 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
579 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
580 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
581 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
582 }
d7b0b5eb 583
dab4079d 584 might_fault();
8f2abe6a
CB
585
586 do {
e168bf8d
CO
587 rc = __vcpu_run(vcpu);
588 if (rc)
589 break;
c0d744a9
CO
590 if (kvm_is_ucontrol(vcpu->kvm))
591 rc = -EOPNOTSUPP;
592 else
593 rc = kvm_handle_sie_intercept(vcpu);
8f2abe6a
CB
594 } while (!signal_pending(current) && !rc);
595
9ace903d
CE
596 if (rc == SIE_INTERCEPT_RERUNVCPU)
597 goto rerun_vcpu;
598
b1d16c49
CE
599 if (signal_pending(current) && !rc) {
600 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 601 rc = -EINTR;
b1d16c49 602 }
8f2abe6a 603
e168bf8d
CO
604#ifdef CONFIG_KVM_S390_UCONTROL
605 if (rc == SIE_INTERCEPT_UCONTROL) {
606 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
607 kvm_run->s390_ucontrol.trans_exc_code =
608 current->thread.gmap_addr;
609 kvm_run->s390_ucontrol.pgm_code = 0x10;
610 rc = 0;
611 }
612#endif
613
b8e660b8 614 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
615 /* intercept cannot be handled in-kernel, prepare kvm-run */
616 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
617 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
618 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
619 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
620 rc = 0;
621 }
622
623 if (rc == -EREMOTE) {
624 /* intercept was handled, but userspace support is needed
625 * kvm_run has been prepared by the handler */
626 rc = 0;
627 }
b0c632db 628
d7b0b5eb
CO
629 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
630 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 631 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
d7b0b5eb 632
b0c632db
HC
633 if (vcpu->sigset_active)
634 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
635
b0c632db 636 vcpu->stat.exit_userspace++;
7e8e6ab4 637 return rc;
b0c632db
HC
638}
639
092670cd 640static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
641 unsigned long n, int prefix)
642{
643 if (prefix)
644 return copy_to_guest(vcpu, guestdest, from, n);
645 else
646 return copy_to_guest_absolute(vcpu, guestdest, from, n);
647}
648
649/*
650 * store status at address
651 * we use have two special cases:
652 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
653 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
654 */
971eb77f 655int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 656{
092670cd 657 unsigned char archmode = 1;
b0c632db
HC
658 int prefix;
659
660 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
661 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
662 return -EFAULT;
663 addr = SAVE_AREA_BASE;
664 prefix = 0;
665 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
666 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
667 return -EFAULT;
668 addr = SAVE_AREA_BASE;
669 prefix = 1;
670 } else
671 prefix = 0;
672
f64ca217 673 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
674 vcpu->arch.guest_fpregs.fprs, 128, prefix))
675 return -EFAULT;
676
f64ca217 677 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
5a32c1af 678 vcpu->run->s.regs.gprs, 128, prefix))
b0c632db
HC
679 return -EFAULT;
680
f64ca217 681 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
682 &vcpu->arch.sie_block->gpsw, 16, prefix))
683 return -EFAULT;
684
f64ca217 685 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
686 &vcpu->arch.sie_block->prefix, 4, prefix))
687 return -EFAULT;
688
689 if (__guestcopy(vcpu,
f64ca217 690 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
691 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
692 return -EFAULT;
693
f64ca217 694 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
695 &vcpu->arch.sie_block->todpr, 4, prefix))
696 return -EFAULT;
697
f64ca217 698 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
699 &vcpu->arch.sie_block->cputm, 8, prefix))
700 return -EFAULT;
701
f64ca217 702 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
703 &vcpu->arch.sie_block->ckc, 8, prefix))
704 return -EFAULT;
705
f64ca217 706 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
59674c1a 707 &vcpu->run->s.regs.acrs, 64, prefix))
b0c632db
HC
708 return -EFAULT;
709
710 if (__guestcopy(vcpu,
f64ca217 711 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
712 &vcpu->arch.sie_block->gcr, 128, prefix))
713 return -EFAULT;
714 return 0;
715}
716
b0c632db
HC
717long kvm_arch_vcpu_ioctl(struct file *filp,
718 unsigned int ioctl, unsigned long arg)
719{
720 struct kvm_vcpu *vcpu = filp->private_data;
721 void __user *argp = (void __user *)arg;
bc923cc9 722 long r;
b0c632db 723
93736624
AK
724 switch (ioctl) {
725 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
726 struct kvm_s390_interrupt s390int;
727
93736624 728 r = -EFAULT;
ba5c1e9b 729 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
730 break;
731 r = kvm_s390_inject_vcpu(vcpu, &s390int);
732 break;
ba5c1e9b 733 }
b0c632db 734 case KVM_S390_STORE_STATUS:
bc923cc9
AK
735 r = kvm_s390_vcpu_store_status(vcpu, arg);
736 break;
b0c632db
HC
737 case KVM_S390_SET_INITIAL_PSW: {
738 psw_t psw;
739
bc923cc9 740 r = -EFAULT;
b0c632db 741 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
742 break;
743 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
744 break;
b0c632db
HC
745 }
746 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
747 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
748 break;
27e0393f
CO
749#ifdef CONFIG_KVM_S390_UCONTROL
750 case KVM_S390_UCAS_MAP: {
751 struct kvm_s390_ucas_mapping ucasmap;
752
753 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
754 r = -EFAULT;
755 break;
756 }
757
758 if (!kvm_is_ucontrol(vcpu->kvm)) {
759 r = -EINVAL;
760 break;
761 }
762
763 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
764 ucasmap.vcpu_addr, ucasmap.length);
765 break;
766 }
767 case KVM_S390_UCAS_UNMAP: {
768 struct kvm_s390_ucas_mapping ucasmap;
769
770 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
771 r = -EFAULT;
772 break;
773 }
774
775 if (!kvm_is_ucontrol(vcpu->kvm)) {
776 r = -EINVAL;
777 break;
778 }
779
780 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
781 ucasmap.length);
782 break;
783 }
784#endif
ccc7910f
CO
785 case KVM_S390_VCPU_FAULT: {
786 r = gmap_fault(arg, vcpu->arch.gmap);
787 if (!IS_ERR_VALUE(r))
788 r = 0;
789 break;
790 }
b0c632db 791 default:
3e6afcf1 792 r = -ENOTTY;
b0c632db 793 }
bc923cc9 794 return r;
b0c632db
HC
795}
796
5b1c1493
CO
797int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
798{
799#ifdef CONFIG_KVM_S390_UCONTROL
800 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
801 && (kvm_is_ucontrol(vcpu->kvm))) {
802 vmf->page = virt_to_page(vcpu->arch.sie_block);
803 get_page(vmf->page);
804 return 0;
805 }
806#endif
807 return VM_FAULT_SIGBUS;
808}
809
b0c632db 810/* Section: memory related */
f7784b8e
MT
811int kvm_arch_prepare_memory_region(struct kvm *kvm,
812 struct kvm_memory_slot *memslot,
813 struct kvm_memory_slot old,
814 struct kvm_userspace_memory_region *mem,
815 int user_alloc)
b0c632db
HC
816{
817 /* A few sanity checks. We can have exactly one memory slot which has
818 to start at guest virtual zero and which has to be located at a
819 page boundary in userland and which has to end at a page boundary.
820 The memory in userland is ok to be fragmented into various different
821 vmas. It is okay to mmap() and munmap() stuff in this slot after
822 doing this call at any time */
823
628eb9b8 824 if (mem->slot)
b0c632db
HC
825 return -EINVAL;
826
827 if (mem->guest_phys_addr)
828 return -EINVAL;
829
598841ca 830 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
831 return -EINVAL;
832
598841ca 833 if (mem->memory_size & 0xffffful)
b0c632db
HC
834 return -EINVAL;
835
2668dab7
CO
836 if (!user_alloc)
837 return -EINVAL;
838
f7784b8e
MT
839 return 0;
840}
841
842void kvm_arch_commit_memory_region(struct kvm *kvm,
843 struct kvm_userspace_memory_region *mem,
844 struct kvm_memory_slot old,
845 int user_alloc)
846{
f7850c92 847 int rc;
f7784b8e 848
598841ca
CO
849
850 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
851 mem->guest_phys_addr, mem->memory_size);
852 if (rc)
f7850c92 853 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 854 return;
b0c632db
HC
855}
856
34d4cb8f
MT
857void kvm_arch_flush_shadow(struct kvm *kvm)
858{
859}
860
b0c632db
HC
861static int __init kvm_s390_init(void)
862{
ef50f7ac 863 int ret;
0ee75bea 864 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
865 if (ret)
866 return ret;
867
868 /*
869 * guests can ask for up to 255+1 double words, we need a full page
25985edc 870 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
871 * only set facilities that are known to work in KVM.
872 */
c2f0e8c8 873 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
874 if (!facilities) {
875 kvm_exit();
876 return -ENOMEM;
877 }
14375bc4 878 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 879 facilities[0] &= 0xff00fff3f47c0000ULL;
9950f8be 880 facilities[1] &= 0x201c000000000000ULL;
ef50f7ac 881 return 0;
b0c632db
HC
882}
883
884static void __exit kvm_s390_exit(void)
885{
ef50f7ac 886 free_page((unsigned long) facilities);
b0c632db
HC
887 kvm_exit();
888}
889
890module_init(kvm_s390_init);
891module_exit(kvm_s390_exit);
This page took 0.341335 seconds and 5 git commands to generate.