KVM: s390: provide general purpose guest registers via kvm_run
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
ef50f7ac 30#include <asm/system.h>
8f2abe6a 31#include "kvm-s390.h"
b0c632db
HC
32#include "gaccess.h"
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 38 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 68 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 69 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
70 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 75 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 76 { "diagnose_44", VCPU_STAT(diagnose_44) },
b0c632db
HC
77 { NULL }
78};
79
ef50f7ac 80static unsigned long long *facilities;
b0c632db
HC
81
82/* Section: not file related */
10474ae8 83int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
84{
85 /* every s390 is virtualization enabled ;-) */
10474ae8 86 return 0;
b0c632db
HC
87}
88
89void kvm_arch_hardware_disable(void *garbage)
90{
91}
92
b0c632db
HC
93int kvm_arch_hardware_setup(void)
94{
95 return 0;
96}
97
98void kvm_arch_hardware_unsetup(void)
99{
100}
101
102void kvm_arch_check_processor_compat(void *rtn)
103{
104}
105
106int kvm_arch_init(void *opaque)
107{
108 return 0;
109}
110
111void kvm_arch_exit(void)
112{
113}
114
115/* Section: device related */
116long kvm_arch_dev_ioctl(struct file *filp,
117 unsigned int ioctl, unsigned long arg)
118{
119 if (ioctl == KVM_S390_ENABLE_SIE)
120 return s390_enable_sie();
121 return -EINVAL;
122}
123
124int kvm_dev_ioctl_check_extension(long ext)
125{
d7b0b5eb
CO
126 int r;
127
2bd0ac4e 128 switch (ext) {
d7b0b5eb 129 case KVM_CAP_S390_PSW:
b6cf8788 130 case KVM_CAP_S390_GMAP:
52e16b18 131 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
132#ifdef CONFIG_KVM_S390_UCONTROL
133 case KVM_CAP_S390_UCONTROL:
134#endif
60b413c9 135 case KVM_CAP_SYNC_REGS:
d7b0b5eb
CO
136 r = 1;
137 break;
2bd0ac4e 138 default:
d7b0b5eb 139 r = 0;
2bd0ac4e 140 }
d7b0b5eb 141 return r;
b0c632db
HC
142}
143
144/* Section: vm related */
145/*
146 * Get (and clear) the dirty memory log for a memory slot.
147 */
148int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
149 struct kvm_dirty_log *log)
150{
151 return 0;
152}
153
154long kvm_arch_vm_ioctl(struct file *filp,
155 unsigned int ioctl, unsigned long arg)
156{
157 struct kvm *kvm = filp->private_data;
158 void __user *argp = (void __user *)arg;
159 int r;
160
161 switch (ioctl) {
ba5c1e9b
CO
162 case KVM_S390_INTERRUPT: {
163 struct kvm_s390_interrupt s390int;
164
165 r = -EFAULT;
166 if (copy_from_user(&s390int, argp, sizeof(s390int)))
167 break;
168 r = kvm_s390_inject_vm(kvm, &s390int);
169 break;
170 }
b0c632db 171 default:
367e1319 172 r = -ENOTTY;
b0c632db
HC
173 }
174
175 return r;
176}
177
e08b9637 178int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 179{
b0c632db
HC
180 int rc;
181 char debug_name[16];
182
e08b9637
CO
183 rc = -EINVAL;
184#ifdef CONFIG_KVM_S390_UCONTROL
185 if (type & ~KVM_VM_S390_UCONTROL)
186 goto out_err;
187 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
188 goto out_err;
189#else
190 if (type)
191 goto out_err;
192#endif
193
b0c632db
HC
194 rc = s390_enable_sie();
195 if (rc)
d89f5eff 196 goto out_err;
b0c632db 197
b290411a
CO
198 rc = -ENOMEM;
199
b0c632db
HC
200 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
201 if (!kvm->arch.sca)
d89f5eff 202 goto out_err;
b0c632db
HC
203
204 sprintf(debug_name, "kvm-%u", current->pid);
205
206 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
207 if (!kvm->arch.dbf)
208 goto out_nodbf;
209
ba5c1e9b
CO
210 spin_lock_init(&kvm->arch.float_int.lock);
211 INIT_LIST_HEAD(&kvm->arch.float_int.list);
212
b0c632db
HC
213 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
214 VM_EVENT(kvm, 3, "%s", "vm created");
215
e08b9637
CO
216 if (type & KVM_VM_S390_UCONTROL) {
217 kvm->arch.gmap = NULL;
218 } else {
219 kvm->arch.gmap = gmap_alloc(current->mm);
220 if (!kvm->arch.gmap)
221 goto out_nogmap;
222 }
d89f5eff 223 return 0;
598841ca
CO
224out_nogmap:
225 debug_unregister(kvm->arch.dbf);
b0c632db
HC
226out_nodbf:
227 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
228out_err:
229 return rc;
b0c632db
HC
230}
231
d329c035
CB
232void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
233{
234 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
58f9460b
CO
235 if (!kvm_is_ucontrol(vcpu->kvm)) {
236 clear_bit(63 - vcpu->vcpu_id,
237 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
238 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
239 (__u64) vcpu->arch.sie_block)
240 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
241 }
abf4a71e 242 smp_mb();
27e0393f
CO
243
244 if (kvm_is_ucontrol(vcpu->kvm))
245 gmap_free(vcpu->arch.gmap);
246
d329c035 247 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 248 kvm_vcpu_uninit(vcpu);
d329c035
CB
249 kfree(vcpu);
250}
251
252static void kvm_free_vcpus(struct kvm *kvm)
253{
254 unsigned int i;
988a2cae 255 struct kvm_vcpu *vcpu;
d329c035 256
988a2cae
GN
257 kvm_for_each_vcpu(i, vcpu, kvm)
258 kvm_arch_vcpu_destroy(vcpu);
259
260 mutex_lock(&kvm->lock);
261 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
262 kvm->vcpus[i] = NULL;
263
264 atomic_set(&kvm->online_vcpus, 0);
265 mutex_unlock(&kvm->lock);
d329c035
CB
266}
267
ad8ba2cd
SY
268void kvm_arch_sync_events(struct kvm *kvm)
269{
270}
271
b0c632db
HC
272void kvm_arch_destroy_vm(struct kvm *kvm)
273{
d329c035 274 kvm_free_vcpus(kvm);
b0c632db 275 free_page((unsigned long)(kvm->arch.sca));
d329c035 276 debug_unregister(kvm->arch.dbf);
27e0393f
CO
277 if (!kvm_is_ucontrol(kvm))
278 gmap_free(kvm->arch.gmap);
b0c632db
HC
279}
280
281/* Section: vcpu related */
282int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
283{
27e0393f
CO
284 if (kvm_is_ucontrol(vcpu->kvm)) {
285 vcpu->arch.gmap = gmap_alloc(current->mm);
286 if (!vcpu->arch.gmap)
287 return -ENOMEM;
288 return 0;
289 }
290
598841ca 291 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
5a32c1af 292 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | KVM_SYNC_GPRS;
b0c632db
HC
293 return 0;
294}
295
296void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
297{
6692cef3 298 /* Nothing todo */
b0c632db
HC
299}
300
301void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
302{
303 save_fp_regs(&vcpu->arch.host_fpregs);
304 save_access_regs(vcpu->arch.host_acrs);
305 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
306 restore_fp_regs(&vcpu->arch.guest_fpregs);
307 restore_access_regs(vcpu->arch.guest_acrs);
480e5926 308 gmap_enable(vcpu->arch.gmap);
9e6dabef 309 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
310}
311
312void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
313{
9e6dabef 314 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 315 gmap_disable(vcpu->arch.gmap);
b0c632db
HC
316 save_fp_regs(&vcpu->arch.guest_fpregs);
317 save_access_regs(vcpu->arch.guest_acrs);
318 restore_fp_regs(&vcpu->arch.host_fpregs);
319 restore_access_regs(vcpu->arch.host_acrs);
320}
321
322static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
323{
324 /* this equals initial cpu reset in pop, but we don't switch to ESA */
325 vcpu->arch.sie_block->gpsw.mask = 0UL;
326 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 327 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
328 vcpu->arch.sie_block->cputm = 0UL;
329 vcpu->arch.sie_block->ckc = 0UL;
330 vcpu->arch.sie_block->todpr = 0;
331 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
332 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
333 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
334 vcpu->arch.guest_fpregs.fpc = 0;
335 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
336 vcpu->arch.sie_block->gbea = 1;
337}
338
339int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
340{
9e6dabef
CH
341 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
342 CPUSTAT_SM |
343 CPUSTAT_STOPPED);
fc34531d 344 vcpu->arch.sie_block->ecb = 6;
b0c632db 345 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 346 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
347 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
348 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
349 (unsigned long) vcpu);
350 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 351 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 352 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
353 return 0;
354}
355
356struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
357 unsigned int id)
358{
4d47555a
CO
359 struct kvm_vcpu *vcpu;
360 int rc = -EINVAL;
361
362 if (id >= KVM_MAX_VCPUS)
363 goto out;
364
365 rc = -ENOMEM;
b0c632db 366
4d47555a 367 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
b0c632db 368 if (!vcpu)
4d47555a 369 goto out;
b0c632db 370
180c12fb
CB
371 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
372 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
373
374 if (!vcpu->arch.sie_block)
375 goto out_free_cpu;
376
377 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
378 if (!kvm_is_ucontrol(kvm)) {
379 if (!kvm->arch.sca) {
380 WARN_ON_ONCE(1);
381 goto out_free_cpu;
382 }
383 if (!kvm->arch.sca->cpu[id].sda)
384 kvm->arch.sca->cpu[id].sda =
385 (__u64) vcpu->arch.sie_block;
386 vcpu->arch.sie_block->scaoh =
387 (__u32)(((__u64)kvm->arch.sca) >> 32);
388 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
389 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
390 }
b0c632db 391
ba5c1e9b
CO
392 spin_lock_init(&vcpu->arch.local_int.lock);
393 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
394 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 395 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
396 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
397 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 398 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 399 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 400
b0c632db
HC
401 rc = kvm_vcpu_init(vcpu, kvm, id);
402 if (rc)
7b06bf2f 403 goto out_free_sie_block;
b0c632db
HC
404 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
405 vcpu->arch.sie_block);
406
b0c632db 407 return vcpu;
7b06bf2f
WY
408out_free_sie_block:
409 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
410out_free_cpu:
411 kfree(vcpu);
4d47555a 412out:
b0c632db
HC
413 return ERR_PTR(rc);
414}
415
b0c632db
HC
416int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
417{
418 /* kvm common code refers to this, but never calls it */
419 BUG();
420 return 0;
421}
422
423static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
424{
b0c632db 425 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
426 return 0;
427}
428
429int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
430{
5a32c1af 431 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
432 return 0;
433}
434
435int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
436{
5a32c1af 437 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
438 return 0;
439}
440
441int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
442 struct kvm_sregs *sregs)
443{
b0c632db
HC
444 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
445 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
7eef87dc 446 restore_access_regs(vcpu->arch.guest_acrs);
b0c632db
HC
447 return 0;
448}
449
450int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
451 struct kvm_sregs *sregs)
452{
b0c632db
HC
453 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
454 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
455 return 0;
456}
457
458int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
459{
b0c632db
HC
460 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
461 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
7eef87dc 462 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
463 return 0;
464}
465
466int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
467{
b0c632db
HC
468 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
469 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
470 return 0;
471}
472
473static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
474{
475 int rc = 0;
476
9e6dabef 477 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 478 rc = -EBUSY;
d7b0b5eb
CO
479 else {
480 vcpu->run->psw_mask = psw.mask;
481 vcpu->run->psw_addr = psw.addr;
482 }
b0c632db
HC
483 return rc;
484}
485
486int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
487 struct kvm_translation *tr)
488{
489 return -EINVAL; /* not implemented yet */
490}
491
d0bfb940
JK
492int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
493 struct kvm_guest_debug *dbg)
b0c632db
HC
494{
495 return -EINVAL; /* not implemented yet */
496}
497
62d9f0db
MT
498int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
499 struct kvm_mp_state *mp_state)
500{
501 return -EINVAL; /* not implemented yet */
502}
503
504int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
505 struct kvm_mp_state *mp_state)
506{
507 return -EINVAL; /* not implemented yet */
508}
509
e168bf8d 510static int __vcpu_run(struct kvm_vcpu *vcpu)
b0c632db 511{
e168bf8d
CO
512 int rc;
513
5a32c1af 514 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
515
516 if (need_resched())
517 schedule();
518
71cde587
CB
519 if (test_thread_flag(TIF_MCCK_PENDING))
520 s390_handle_mcck();
521
d6b6d166
CO
522 if (!kvm_is_ucontrol(vcpu->kvm))
523 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 524
b0c632db
HC
525 vcpu->arch.sie_block->icptcode = 0;
526 local_irq_disable();
527 kvm_guest_enter();
528 local_irq_enable();
529 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
530 atomic_read(&vcpu->arch.sie_block->cpuflags));
5a32c1af 531 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
e168bf8d
CO
532 if (rc) {
533 if (kvm_is_ucontrol(vcpu->kvm)) {
534 rc = SIE_INTERCEPT_UCONTROL;
535 } else {
536 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
537 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
538 rc = 0;
539 }
1f0d0f09 540 }
b0c632db
HC
541 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
542 vcpu->arch.sie_block->icptcode);
543 local_irq_disable();
544 kvm_guest_exit();
545 local_irq_enable();
546
5a32c1af 547 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
e168bf8d 548 return rc;
b0c632db
HC
549}
550
551int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
552{
8f2abe6a 553 int rc;
b0c632db
HC
554 sigset_t sigsaved;
555
9ace903d 556rerun_vcpu:
b0c632db
HC
557 if (vcpu->sigset_active)
558 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
559
9e6dabef 560 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 561
ba5c1e9b
CO
562 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
563
8f2abe6a
CB
564 switch (kvm_run->exit_reason) {
565 case KVM_EXIT_S390_SIEIC:
8f2abe6a 566 case KVM_EXIT_UNKNOWN:
9ace903d 567 case KVM_EXIT_INTR:
8f2abe6a 568 case KVM_EXIT_S390_RESET:
e168bf8d 569 case KVM_EXIT_S390_UCONTROL:
8f2abe6a
CB
570 break;
571 default:
572 BUG();
573 }
574
d7b0b5eb
CO
575 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
576 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
577 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
578 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
579 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
580 }
d7b0b5eb 581
dab4079d 582 might_fault();
8f2abe6a
CB
583
584 do {
e168bf8d
CO
585 rc = __vcpu_run(vcpu);
586 if (rc)
587 break;
c0d744a9
CO
588 if (kvm_is_ucontrol(vcpu->kvm))
589 rc = -EOPNOTSUPP;
590 else
591 rc = kvm_handle_sie_intercept(vcpu);
8f2abe6a
CB
592 } while (!signal_pending(current) && !rc);
593
9ace903d
CE
594 if (rc == SIE_INTERCEPT_RERUNVCPU)
595 goto rerun_vcpu;
596
b1d16c49
CE
597 if (signal_pending(current) && !rc) {
598 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 599 rc = -EINTR;
b1d16c49 600 }
8f2abe6a 601
e168bf8d
CO
602#ifdef CONFIG_KVM_S390_UCONTROL
603 if (rc == SIE_INTERCEPT_UCONTROL) {
604 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
605 kvm_run->s390_ucontrol.trans_exc_code =
606 current->thread.gmap_addr;
607 kvm_run->s390_ucontrol.pgm_code = 0x10;
608 rc = 0;
609 }
610#endif
611
b8e660b8 612 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
613 /* intercept cannot be handled in-kernel, prepare kvm-run */
614 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
615 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
616 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
617 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
618 rc = 0;
619 }
620
621 if (rc == -EREMOTE) {
622 /* intercept was handled, but userspace support is needed
623 * kvm_run has been prepared by the handler */
624 rc = 0;
625 }
b0c632db 626
d7b0b5eb
CO
627 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
628 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 629 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
d7b0b5eb 630
b0c632db
HC
631 if (vcpu->sigset_active)
632 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
633
b0c632db 634 vcpu->stat.exit_userspace++;
7e8e6ab4 635 return rc;
b0c632db
HC
636}
637
092670cd 638static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
639 unsigned long n, int prefix)
640{
641 if (prefix)
642 return copy_to_guest(vcpu, guestdest, from, n);
643 else
644 return copy_to_guest_absolute(vcpu, guestdest, from, n);
645}
646
647/*
648 * store status at address
649 * we use have two special cases:
650 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
651 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
652 */
971eb77f 653int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 654{
092670cd 655 unsigned char archmode = 1;
b0c632db
HC
656 int prefix;
657
658 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
659 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
660 return -EFAULT;
661 addr = SAVE_AREA_BASE;
662 prefix = 0;
663 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
664 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
665 return -EFAULT;
666 addr = SAVE_AREA_BASE;
667 prefix = 1;
668 } else
669 prefix = 0;
670
f64ca217 671 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
672 vcpu->arch.guest_fpregs.fprs, 128, prefix))
673 return -EFAULT;
674
f64ca217 675 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
5a32c1af 676 vcpu->run->s.regs.gprs, 128, prefix))
b0c632db
HC
677 return -EFAULT;
678
f64ca217 679 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
680 &vcpu->arch.sie_block->gpsw, 16, prefix))
681 return -EFAULT;
682
f64ca217 683 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
684 &vcpu->arch.sie_block->prefix, 4, prefix))
685 return -EFAULT;
686
687 if (__guestcopy(vcpu,
f64ca217 688 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
689 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
690 return -EFAULT;
691
f64ca217 692 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
693 &vcpu->arch.sie_block->todpr, 4, prefix))
694 return -EFAULT;
695
f64ca217 696 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
697 &vcpu->arch.sie_block->cputm, 8, prefix))
698 return -EFAULT;
699
f64ca217 700 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
701 &vcpu->arch.sie_block->ckc, 8, prefix))
702 return -EFAULT;
703
f64ca217 704 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
b0c632db
HC
705 &vcpu->arch.guest_acrs, 64, prefix))
706 return -EFAULT;
707
708 if (__guestcopy(vcpu,
f64ca217 709 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
710 &vcpu->arch.sie_block->gcr, 128, prefix))
711 return -EFAULT;
712 return 0;
713}
714
b0c632db
HC
715long kvm_arch_vcpu_ioctl(struct file *filp,
716 unsigned int ioctl, unsigned long arg)
717{
718 struct kvm_vcpu *vcpu = filp->private_data;
719 void __user *argp = (void __user *)arg;
bc923cc9 720 long r;
b0c632db 721
93736624
AK
722 switch (ioctl) {
723 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
724 struct kvm_s390_interrupt s390int;
725
93736624 726 r = -EFAULT;
ba5c1e9b 727 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
728 break;
729 r = kvm_s390_inject_vcpu(vcpu, &s390int);
730 break;
ba5c1e9b 731 }
b0c632db 732 case KVM_S390_STORE_STATUS:
bc923cc9
AK
733 r = kvm_s390_vcpu_store_status(vcpu, arg);
734 break;
b0c632db
HC
735 case KVM_S390_SET_INITIAL_PSW: {
736 psw_t psw;
737
bc923cc9 738 r = -EFAULT;
b0c632db 739 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
740 break;
741 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
742 break;
b0c632db
HC
743 }
744 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
745 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
746 break;
27e0393f
CO
747#ifdef CONFIG_KVM_S390_UCONTROL
748 case KVM_S390_UCAS_MAP: {
749 struct kvm_s390_ucas_mapping ucasmap;
750
751 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
752 r = -EFAULT;
753 break;
754 }
755
756 if (!kvm_is_ucontrol(vcpu->kvm)) {
757 r = -EINVAL;
758 break;
759 }
760
761 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
762 ucasmap.vcpu_addr, ucasmap.length);
763 break;
764 }
765 case KVM_S390_UCAS_UNMAP: {
766 struct kvm_s390_ucas_mapping ucasmap;
767
768 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
769 r = -EFAULT;
770 break;
771 }
772
773 if (!kvm_is_ucontrol(vcpu->kvm)) {
774 r = -EINVAL;
775 break;
776 }
777
778 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
779 ucasmap.length);
780 break;
781 }
782#endif
ccc7910f
CO
783 case KVM_S390_VCPU_FAULT: {
784 r = gmap_fault(arg, vcpu->arch.gmap);
785 if (!IS_ERR_VALUE(r))
786 r = 0;
787 break;
788 }
b0c632db 789 default:
3e6afcf1 790 r = -ENOTTY;
b0c632db 791 }
bc923cc9 792 return r;
b0c632db
HC
793}
794
5b1c1493
CO
795int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
796{
797#ifdef CONFIG_KVM_S390_UCONTROL
798 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
799 && (kvm_is_ucontrol(vcpu->kvm))) {
800 vmf->page = virt_to_page(vcpu->arch.sie_block);
801 get_page(vmf->page);
802 return 0;
803 }
804#endif
805 return VM_FAULT_SIGBUS;
806}
807
b0c632db 808/* Section: memory related */
f7784b8e
MT
809int kvm_arch_prepare_memory_region(struct kvm *kvm,
810 struct kvm_memory_slot *memslot,
811 struct kvm_memory_slot old,
812 struct kvm_userspace_memory_region *mem,
813 int user_alloc)
b0c632db
HC
814{
815 /* A few sanity checks. We can have exactly one memory slot which has
816 to start at guest virtual zero and which has to be located at a
817 page boundary in userland and which has to end at a page boundary.
818 The memory in userland is ok to be fragmented into various different
819 vmas. It is okay to mmap() and munmap() stuff in this slot after
820 doing this call at any time */
821
628eb9b8 822 if (mem->slot)
b0c632db
HC
823 return -EINVAL;
824
825 if (mem->guest_phys_addr)
826 return -EINVAL;
827
598841ca 828 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
829 return -EINVAL;
830
598841ca 831 if (mem->memory_size & 0xffffful)
b0c632db
HC
832 return -EINVAL;
833
2668dab7
CO
834 if (!user_alloc)
835 return -EINVAL;
836
f7784b8e
MT
837 return 0;
838}
839
840void kvm_arch_commit_memory_region(struct kvm *kvm,
841 struct kvm_userspace_memory_region *mem,
842 struct kvm_memory_slot old,
843 int user_alloc)
844{
f7850c92 845 int rc;
f7784b8e 846
598841ca
CO
847
848 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
849 mem->guest_phys_addr, mem->memory_size);
850 if (rc)
f7850c92 851 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 852 return;
b0c632db
HC
853}
854
34d4cb8f
MT
855void kvm_arch_flush_shadow(struct kvm *kvm)
856{
857}
858
b0c632db
HC
859static int __init kvm_s390_init(void)
860{
ef50f7ac 861 int ret;
0ee75bea 862 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
863 if (ret)
864 return ret;
865
866 /*
867 * guests can ask for up to 255+1 double words, we need a full page
25985edc 868 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
869 * only set facilities that are known to work in KVM.
870 */
c2f0e8c8 871 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
872 if (!facilities) {
873 kvm_exit();
874 return -ENOMEM;
875 }
14375bc4 876 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 877 facilities[0] &= 0xff00fff3f47c0000ULL;
9950f8be 878 facilities[1] &= 0x201c000000000000ULL;
ef50f7ac 879 return 0;
b0c632db
HC
880}
881
882static void __exit kvm_s390_exit(void)
883{
ef50f7ac 884 free_page((unsigned long) facilities);
b0c632db
HC
885 kvm_exit();
886}
887
888module_init(kvm_s390_init);
889module_exit(kvm_s390_exit);
This page took 0.582525 seconds and 5 git commands to generate.