KVM: provide synchronous registers in kvm_run
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
ef50f7ac 30#include <asm/system.h>
8f2abe6a 31#include "kvm-s390.h"
b0c632db
HC
32#include "gaccess.h"
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 38 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 68 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 69 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
70 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 75 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 76 { "diagnose_44", VCPU_STAT(diagnose_44) },
b0c632db
HC
77 { NULL }
78};
79
ef50f7ac 80static unsigned long long *facilities;
b0c632db
HC
81
82/* Section: not file related */
10474ae8 83int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
84{
85 /* every s390 is virtualization enabled ;-) */
10474ae8 86 return 0;
b0c632db
HC
87}
88
89void kvm_arch_hardware_disable(void *garbage)
90{
91}
92
b0c632db
HC
93int kvm_arch_hardware_setup(void)
94{
95 return 0;
96}
97
98void kvm_arch_hardware_unsetup(void)
99{
100}
101
102void kvm_arch_check_processor_compat(void *rtn)
103{
104}
105
106int kvm_arch_init(void *opaque)
107{
108 return 0;
109}
110
111void kvm_arch_exit(void)
112{
113}
114
115/* Section: device related */
116long kvm_arch_dev_ioctl(struct file *filp,
117 unsigned int ioctl, unsigned long arg)
118{
119 if (ioctl == KVM_S390_ENABLE_SIE)
120 return s390_enable_sie();
121 return -EINVAL;
122}
123
124int kvm_dev_ioctl_check_extension(long ext)
125{
d7b0b5eb
CO
126 int r;
127
2bd0ac4e 128 switch (ext) {
d7b0b5eb 129 case KVM_CAP_S390_PSW:
b6cf8788 130 case KVM_CAP_S390_GMAP:
52e16b18 131 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
132#ifdef CONFIG_KVM_S390_UCONTROL
133 case KVM_CAP_S390_UCONTROL:
134#endif
d7b0b5eb
CO
135 r = 1;
136 break;
2bd0ac4e 137 default:
d7b0b5eb 138 r = 0;
2bd0ac4e 139 }
d7b0b5eb 140 return r;
b0c632db
HC
141}
142
143/* Section: vm related */
144/*
145 * Get (and clear) the dirty memory log for a memory slot.
146 */
147int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
148 struct kvm_dirty_log *log)
149{
150 return 0;
151}
152
153long kvm_arch_vm_ioctl(struct file *filp,
154 unsigned int ioctl, unsigned long arg)
155{
156 struct kvm *kvm = filp->private_data;
157 void __user *argp = (void __user *)arg;
158 int r;
159
160 switch (ioctl) {
ba5c1e9b
CO
161 case KVM_S390_INTERRUPT: {
162 struct kvm_s390_interrupt s390int;
163
164 r = -EFAULT;
165 if (copy_from_user(&s390int, argp, sizeof(s390int)))
166 break;
167 r = kvm_s390_inject_vm(kvm, &s390int);
168 break;
169 }
b0c632db 170 default:
367e1319 171 r = -ENOTTY;
b0c632db
HC
172 }
173
174 return r;
175}
176
e08b9637 177int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 178{
b0c632db
HC
179 int rc;
180 char debug_name[16];
181
e08b9637
CO
182 rc = -EINVAL;
183#ifdef CONFIG_KVM_S390_UCONTROL
184 if (type & ~KVM_VM_S390_UCONTROL)
185 goto out_err;
186 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
187 goto out_err;
188#else
189 if (type)
190 goto out_err;
191#endif
192
b0c632db
HC
193 rc = s390_enable_sie();
194 if (rc)
d89f5eff 195 goto out_err;
b0c632db 196
b290411a
CO
197 rc = -ENOMEM;
198
b0c632db
HC
199 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
200 if (!kvm->arch.sca)
d89f5eff 201 goto out_err;
b0c632db
HC
202
203 sprintf(debug_name, "kvm-%u", current->pid);
204
205 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
206 if (!kvm->arch.dbf)
207 goto out_nodbf;
208
ba5c1e9b
CO
209 spin_lock_init(&kvm->arch.float_int.lock);
210 INIT_LIST_HEAD(&kvm->arch.float_int.list);
211
b0c632db
HC
212 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
213 VM_EVENT(kvm, 3, "%s", "vm created");
214
e08b9637
CO
215 if (type & KVM_VM_S390_UCONTROL) {
216 kvm->arch.gmap = NULL;
217 } else {
218 kvm->arch.gmap = gmap_alloc(current->mm);
219 if (!kvm->arch.gmap)
220 goto out_nogmap;
221 }
d89f5eff 222 return 0;
598841ca
CO
223out_nogmap:
224 debug_unregister(kvm->arch.dbf);
b0c632db
HC
225out_nodbf:
226 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
227out_err:
228 return rc;
b0c632db
HC
229}
230
d329c035
CB
231void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
232{
233 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
58f9460b
CO
234 if (!kvm_is_ucontrol(vcpu->kvm)) {
235 clear_bit(63 - vcpu->vcpu_id,
236 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
237 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
238 (__u64) vcpu->arch.sie_block)
239 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
240 }
abf4a71e 241 smp_mb();
27e0393f
CO
242
243 if (kvm_is_ucontrol(vcpu->kvm))
244 gmap_free(vcpu->arch.gmap);
245
d329c035 246 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 247 kvm_vcpu_uninit(vcpu);
d329c035
CB
248 kfree(vcpu);
249}
250
251static void kvm_free_vcpus(struct kvm *kvm)
252{
253 unsigned int i;
988a2cae 254 struct kvm_vcpu *vcpu;
d329c035 255
988a2cae
GN
256 kvm_for_each_vcpu(i, vcpu, kvm)
257 kvm_arch_vcpu_destroy(vcpu);
258
259 mutex_lock(&kvm->lock);
260 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
261 kvm->vcpus[i] = NULL;
262
263 atomic_set(&kvm->online_vcpus, 0);
264 mutex_unlock(&kvm->lock);
d329c035
CB
265}
266
ad8ba2cd
SY
267void kvm_arch_sync_events(struct kvm *kvm)
268{
269}
270
b0c632db
HC
271void kvm_arch_destroy_vm(struct kvm *kvm)
272{
d329c035 273 kvm_free_vcpus(kvm);
b0c632db 274 free_page((unsigned long)(kvm->arch.sca));
d329c035 275 debug_unregister(kvm->arch.dbf);
27e0393f
CO
276 if (!kvm_is_ucontrol(kvm))
277 gmap_free(kvm->arch.gmap);
b0c632db
HC
278}
279
280/* Section: vcpu related */
281int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
282{
27e0393f
CO
283 if (kvm_is_ucontrol(vcpu->kvm)) {
284 vcpu->arch.gmap = gmap_alloc(current->mm);
285 if (!vcpu->arch.gmap)
286 return -ENOMEM;
287 return 0;
288 }
289
598841ca 290 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
b0c632db
HC
291 return 0;
292}
293
294void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
295{
6692cef3 296 /* Nothing todo */
b0c632db
HC
297}
298
299void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
300{
301 save_fp_regs(&vcpu->arch.host_fpregs);
302 save_access_regs(vcpu->arch.host_acrs);
303 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
304 restore_fp_regs(&vcpu->arch.guest_fpregs);
305 restore_access_regs(vcpu->arch.guest_acrs);
480e5926 306 gmap_enable(vcpu->arch.gmap);
9e6dabef 307 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
308}
309
310void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
311{
9e6dabef 312 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 313 gmap_disable(vcpu->arch.gmap);
b0c632db
HC
314 save_fp_regs(&vcpu->arch.guest_fpregs);
315 save_access_regs(vcpu->arch.guest_acrs);
316 restore_fp_regs(&vcpu->arch.host_fpregs);
317 restore_access_regs(vcpu->arch.host_acrs);
318}
319
320static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
321{
322 /* this equals initial cpu reset in pop, but we don't switch to ESA */
323 vcpu->arch.sie_block->gpsw.mask = 0UL;
324 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 325 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
326 vcpu->arch.sie_block->cputm = 0UL;
327 vcpu->arch.sie_block->ckc = 0UL;
328 vcpu->arch.sie_block->todpr = 0;
329 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
330 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
331 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
332 vcpu->arch.guest_fpregs.fpc = 0;
333 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
334 vcpu->arch.sie_block->gbea = 1;
335}
336
337int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
338{
9e6dabef
CH
339 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
340 CPUSTAT_SM |
341 CPUSTAT_STOPPED);
fc34531d 342 vcpu->arch.sie_block->ecb = 6;
b0c632db 343 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 344 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
345 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
346 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
347 (unsigned long) vcpu);
348 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 349 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 350 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
351 return 0;
352}
353
354struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
355 unsigned int id)
356{
4d47555a
CO
357 struct kvm_vcpu *vcpu;
358 int rc = -EINVAL;
359
360 if (id >= KVM_MAX_VCPUS)
361 goto out;
362
363 rc = -ENOMEM;
b0c632db 364
4d47555a 365 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
b0c632db 366 if (!vcpu)
4d47555a 367 goto out;
b0c632db 368
180c12fb
CB
369 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
370 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
371
372 if (!vcpu->arch.sie_block)
373 goto out_free_cpu;
374
375 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
376 if (!kvm_is_ucontrol(kvm)) {
377 if (!kvm->arch.sca) {
378 WARN_ON_ONCE(1);
379 goto out_free_cpu;
380 }
381 if (!kvm->arch.sca->cpu[id].sda)
382 kvm->arch.sca->cpu[id].sda =
383 (__u64) vcpu->arch.sie_block;
384 vcpu->arch.sie_block->scaoh =
385 (__u32)(((__u64)kvm->arch.sca) >> 32);
386 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
387 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
388 }
b0c632db 389
ba5c1e9b
CO
390 spin_lock_init(&vcpu->arch.local_int.lock);
391 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
392 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 393 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
394 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
395 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 396 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 397 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 398
b0c632db
HC
399 rc = kvm_vcpu_init(vcpu, kvm, id);
400 if (rc)
7b06bf2f 401 goto out_free_sie_block;
b0c632db
HC
402 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
403 vcpu->arch.sie_block);
404
b0c632db 405 return vcpu;
7b06bf2f
WY
406out_free_sie_block:
407 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
408out_free_cpu:
409 kfree(vcpu);
4d47555a 410out:
b0c632db
HC
411 return ERR_PTR(rc);
412}
413
b0c632db
HC
414int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
415{
416 /* kvm common code refers to this, but never calls it */
417 BUG();
418 return 0;
419}
420
421static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
422{
b0c632db 423 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
424 return 0;
425}
426
427int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
428{
b0c632db 429 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
430 return 0;
431}
432
433int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
434{
b0c632db 435 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
b0c632db
HC
436 return 0;
437}
438
439int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
440 struct kvm_sregs *sregs)
441{
b0c632db
HC
442 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
443 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
7eef87dc 444 restore_access_regs(vcpu->arch.guest_acrs);
b0c632db
HC
445 return 0;
446}
447
448int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
449 struct kvm_sregs *sregs)
450{
b0c632db
HC
451 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
452 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
453 return 0;
454}
455
456int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
457{
b0c632db
HC
458 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
459 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
7eef87dc 460 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
461 return 0;
462}
463
464int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
465{
b0c632db
HC
466 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
467 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
468 return 0;
469}
470
471static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
472{
473 int rc = 0;
474
9e6dabef 475 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 476 rc = -EBUSY;
d7b0b5eb
CO
477 else {
478 vcpu->run->psw_mask = psw.mask;
479 vcpu->run->psw_addr = psw.addr;
480 }
b0c632db
HC
481 return rc;
482}
483
484int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
485 struct kvm_translation *tr)
486{
487 return -EINVAL; /* not implemented yet */
488}
489
d0bfb940
JK
490int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
491 struct kvm_guest_debug *dbg)
b0c632db
HC
492{
493 return -EINVAL; /* not implemented yet */
494}
495
62d9f0db
MT
496int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
497 struct kvm_mp_state *mp_state)
498{
499 return -EINVAL; /* not implemented yet */
500}
501
502int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
503 struct kvm_mp_state *mp_state)
504{
505 return -EINVAL; /* not implemented yet */
506}
507
e168bf8d 508static int __vcpu_run(struct kvm_vcpu *vcpu)
b0c632db 509{
e168bf8d
CO
510 int rc;
511
b0c632db
HC
512 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
513
514 if (need_resched())
515 schedule();
516
71cde587
CB
517 if (test_thread_flag(TIF_MCCK_PENDING))
518 s390_handle_mcck();
519
d6b6d166
CO
520 if (!kvm_is_ucontrol(vcpu->kvm))
521 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 522
b0c632db
HC
523 vcpu->arch.sie_block->icptcode = 0;
524 local_irq_disable();
525 kvm_guest_enter();
526 local_irq_enable();
527 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
528 atomic_read(&vcpu->arch.sie_block->cpuflags));
e168bf8d
CO
529 rc = sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
530 if (rc) {
531 if (kvm_is_ucontrol(vcpu->kvm)) {
532 rc = SIE_INTERCEPT_UCONTROL;
533 } else {
534 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
535 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
536 rc = 0;
537 }
1f0d0f09 538 }
b0c632db
HC
539 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
540 vcpu->arch.sie_block->icptcode);
541 local_irq_disable();
542 kvm_guest_exit();
543 local_irq_enable();
544
545 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
e168bf8d 546 return rc;
b0c632db
HC
547}
548
549int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
550{
8f2abe6a 551 int rc;
b0c632db
HC
552 sigset_t sigsaved;
553
9ace903d 554rerun_vcpu:
b0c632db
HC
555 if (vcpu->sigset_active)
556 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
557
9e6dabef 558 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 559
ba5c1e9b
CO
560 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
561
8f2abe6a
CB
562 switch (kvm_run->exit_reason) {
563 case KVM_EXIT_S390_SIEIC:
8f2abe6a 564 case KVM_EXIT_UNKNOWN:
9ace903d 565 case KVM_EXIT_INTR:
8f2abe6a 566 case KVM_EXIT_S390_RESET:
e168bf8d 567 case KVM_EXIT_S390_UCONTROL:
8f2abe6a
CB
568 break;
569 default:
570 BUG();
571 }
572
d7b0b5eb
CO
573 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
574 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
575
dab4079d 576 might_fault();
8f2abe6a
CB
577
578 do {
e168bf8d
CO
579 rc = __vcpu_run(vcpu);
580 if (rc)
581 break;
c0d744a9
CO
582 if (kvm_is_ucontrol(vcpu->kvm))
583 rc = -EOPNOTSUPP;
584 else
585 rc = kvm_handle_sie_intercept(vcpu);
8f2abe6a
CB
586 } while (!signal_pending(current) && !rc);
587
9ace903d
CE
588 if (rc == SIE_INTERCEPT_RERUNVCPU)
589 goto rerun_vcpu;
590
b1d16c49
CE
591 if (signal_pending(current) && !rc) {
592 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 593 rc = -EINTR;
b1d16c49 594 }
8f2abe6a 595
e168bf8d
CO
596#ifdef CONFIG_KVM_S390_UCONTROL
597 if (rc == SIE_INTERCEPT_UCONTROL) {
598 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
599 kvm_run->s390_ucontrol.trans_exc_code =
600 current->thread.gmap_addr;
601 kvm_run->s390_ucontrol.pgm_code = 0x10;
602 rc = 0;
603 }
604#endif
605
b8e660b8 606 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
607 /* intercept cannot be handled in-kernel, prepare kvm-run */
608 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
609 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
610 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
611 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
612 rc = 0;
613 }
614
615 if (rc == -EREMOTE) {
616 /* intercept was handled, but userspace support is needed
617 * kvm_run has been prepared by the handler */
618 rc = 0;
619 }
b0c632db 620
d7b0b5eb
CO
621 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
622 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
623
b0c632db
HC
624 if (vcpu->sigset_active)
625 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
626
b0c632db 627 vcpu->stat.exit_userspace++;
7e8e6ab4 628 return rc;
b0c632db
HC
629}
630
092670cd 631static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
632 unsigned long n, int prefix)
633{
634 if (prefix)
635 return copy_to_guest(vcpu, guestdest, from, n);
636 else
637 return copy_to_guest_absolute(vcpu, guestdest, from, n);
638}
639
640/*
641 * store status at address
642 * we use have two special cases:
643 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
644 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
645 */
971eb77f 646int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 647{
092670cd 648 unsigned char archmode = 1;
b0c632db
HC
649 int prefix;
650
651 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
652 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
653 return -EFAULT;
654 addr = SAVE_AREA_BASE;
655 prefix = 0;
656 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
657 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
658 return -EFAULT;
659 addr = SAVE_AREA_BASE;
660 prefix = 1;
661 } else
662 prefix = 0;
663
f64ca217 664 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
665 vcpu->arch.guest_fpregs.fprs, 128, prefix))
666 return -EFAULT;
667
f64ca217 668 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
b0c632db
HC
669 vcpu->arch.guest_gprs, 128, prefix))
670 return -EFAULT;
671
f64ca217 672 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
673 &vcpu->arch.sie_block->gpsw, 16, prefix))
674 return -EFAULT;
675
f64ca217 676 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
677 &vcpu->arch.sie_block->prefix, 4, prefix))
678 return -EFAULT;
679
680 if (__guestcopy(vcpu,
f64ca217 681 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
682 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
683 return -EFAULT;
684
f64ca217 685 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
686 &vcpu->arch.sie_block->todpr, 4, prefix))
687 return -EFAULT;
688
f64ca217 689 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
690 &vcpu->arch.sie_block->cputm, 8, prefix))
691 return -EFAULT;
692
f64ca217 693 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
694 &vcpu->arch.sie_block->ckc, 8, prefix))
695 return -EFAULT;
696
f64ca217 697 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
b0c632db
HC
698 &vcpu->arch.guest_acrs, 64, prefix))
699 return -EFAULT;
700
701 if (__guestcopy(vcpu,
f64ca217 702 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
703 &vcpu->arch.sie_block->gcr, 128, prefix))
704 return -EFAULT;
705 return 0;
706}
707
b0c632db
HC
708long kvm_arch_vcpu_ioctl(struct file *filp,
709 unsigned int ioctl, unsigned long arg)
710{
711 struct kvm_vcpu *vcpu = filp->private_data;
712 void __user *argp = (void __user *)arg;
bc923cc9 713 long r;
b0c632db 714
93736624
AK
715 switch (ioctl) {
716 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
717 struct kvm_s390_interrupt s390int;
718
93736624 719 r = -EFAULT;
ba5c1e9b 720 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
721 break;
722 r = kvm_s390_inject_vcpu(vcpu, &s390int);
723 break;
ba5c1e9b 724 }
b0c632db 725 case KVM_S390_STORE_STATUS:
bc923cc9
AK
726 r = kvm_s390_vcpu_store_status(vcpu, arg);
727 break;
b0c632db
HC
728 case KVM_S390_SET_INITIAL_PSW: {
729 psw_t psw;
730
bc923cc9 731 r = -EFAULT;
b0c632db 732 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
733 break;
734 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
735 break;
b0c632db
HC
736 }
737 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
738 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
739 break;
27e0393f
CO
740#ifdef CONFIG_KVM_S390_UCONTROL
741 case KVM_S390_UCAS_MAP: {
742 struct kvm_s390_ucas_mapping ucasmap;
743
744 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
745 r = -EFAULT;
746 break;
747 }
748
749 if (!kvm_is_ucontrol(vcpu->kvm)) {
750 r = -EINVAL;
751 break;
752 }
753
754 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
755 ucasmap.vcpu_addr, ucasmap.length);
756 break;
757 }
758 case KVM_S390_UCAS_UNMAP: {
759 struct kvm_s390_ucas_mapping ucasmap;
760
761 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
762 r = -EFAULT;
763 break;
764 }
765
766 if (!kvm_is_ucontrol(vcpu->kvm)) {
767 r = -EINVAL;
768 break;
769 }
770
771 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
772 ucasmap.length);
773 break;
774 }
775#endif
ccc7910f
CO
776 case KVM_S390_VCPU_FAULT: {
777 r = gmap_fault(arg, vcpu->arch.gmap);
778 if (!IS_ERR_VALUE(r))
779 r = 0;
780 break;
781 }
b0c632db 782 default:
3e6afcf1 783 r = -ENOTTY;
b0c632db 784 }
bc923cc9 785 return r;
b0c632db
HC
786}
787
5b1c1493
CO
788int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
789{
790#ifdef CONFIG_KVM_S390_UCONTROL
791 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
792 && (kvm_is_ucontrol(vcpu->kvm))) {
793 vmf->page = virt_to_page(vcpu->arch.sie_block);
794 get_page(vmf->page);
795 return 0;
796 }
797#endif
798 return VM_FAULT_SIGBUS;
799}
800
b0c632db 801/* Section: memory related */
f7784b8e
MT
802int kvm_arch_prepare_memory_region(struct kvm *kvm,
803 struct kvm_memory_slot *memslot,
804 struct kvm_memory_slot old,
805 struct kvm_userspace_memory_region *mem,
806 int user_alloc)
b0c632db
HC
807{
808 /* A few sanity checks. We can have exactly one memory slot which has
809 to start at guest virtual zero and which has to be located at a
810 page boundary in userland and which has to end at a page boundary.
811 The memory in userland is ok to be fragmented into various different
812 vmas. It is okay to mmap() and munmap() stuff in this slot after
813 doing this call at any time */
814
628eb9b8 815 if (mem->slot)
b0c632db
HC
816 return -EINVAL;
817
818 if (mem->guest_phys_addr)
819 return -EINVAL;
820
598841ca 821 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
822 return -EINVAL;
823
598841ca 824 if (mem->memory_size & 0xffffful)
b0c632db
HC
825 return -EINVAL;
826
2668dab7
CO
827 if (!user_alloc)
828 return -EINVAL;
829
f7784b8e
MT
830 return 0;
831}
832
833void kvm_arch_commit_memory_region(struct kvm *kvm,
834 struct kvm_userspace_memory_region *mem,
835 struct kvm_memory_slot old,
836 int user_alloc)
837{
f7850c92 838 int rc;
f7784b8e 839
598841ca
CO
840
841 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
842 mem->guest_phys_addr, mem->memory_size);
843 if (rc)
f7850c92 844 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 845 return;
b0c632db
HC
846}
847
34d4cb8f
MT
848void kvm_arch_flush_shadow(struct kvm *kvm)
849{
850}
851
b0c632db
HC
852static int __init kvm_s390_init(void)
853{
ef50f7ac 854 int ret;
0ee75bea 855 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
856 if (ret)
857 return ret;
858
859 /*
860 * guests can ask for up to 255+1 double words, we need a full page
25985edc 861 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
862 * only set facilities that are known to work in KVM.
863 */
c2f0e8c8 864 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
865 if (!facilities) {
866 kvm_exit();
867 return -ENOMEM;
868 }
14375bc4 869 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 870 facilities[0] &= 0xff00fff3f47c0000ULL;
9950f8be 871 facilities[1] &= 0x201c000000000000ULL;
ef50f7ac 872 return 0;
b0c632db
HC
873}
874
875static void __exit kvm_s390_exit(void)
876{
ef50f7ac 877 free_page((unsigned long) facilities);
b0c632db
HC
878 kvm_exit();
879}
880
881module_init(kvm_s390_init);
882module_exit(kvm_s390_exit);
This page took 0.378423 seconds and 5 git commands to generate.