KVM: s390: KVM_GET/SET_ONEREG for s390
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
a0616cde 30#include <asm/switch_to.h>
1526bf9c 31#include <asm/sclp.h>
8f2abe6a 32#include "kvm-s390.h"
b0c632db
HC
33#include "gaccess.h"
34
35#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
36
37struct kvm_stats_debugfs_item debugfs_entries[] = {
38 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 39 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
40 { "exit_validity", VCPU_STAT(exit_validity) },
41 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
42 { "exit_external_request", VCPU_STAT(exit_external_request) },
43 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
44 { "exit_instruction", VCPU_STAT(exit_instruction) },
45 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
46 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 47 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
48 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
49 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 50 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
51 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
52 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
53 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
54 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
55 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
56 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
57 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
58 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
59 { "instruction_spx", VCPU_STAT(instruction_spx) },
60 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
61 { "instruction_stap", VCPU_STAT(instruction_stap) },
62 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
63 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
64 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
65 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
66 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 67 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 68 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 69 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 70 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
71 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
72 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
73 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
74 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
75 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 76 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 77 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 78 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
79 { NULL }
80};
81
ef50f7ac 82static unsigned long long *facilities;
b0c632db
HC
83
84/* Section: not file related */
10474ae8 85int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
86{
87 /* every s390 is virtualization enabled ;-) */
10474ae8 88 return 0;
b0c632db
HC
89}
90
91void kvm_arch_hardware_disable(void *garbage)
92{
93}
94
b0c632db
HC
95int kvm_arch_hardware_setup(void)
96{
97 return 0;
98}
99
100void kvm_arch_hardware_unsetup(void)
101{
102}
103
104void kvm_arch_check_processor_compat(void *rtn)
105{
106}
107
108int kvm_arch_init(void *opaque)
109{
110 return 0;
111}
112
113void kvm_arch_exit(void)
114{
115}
116
117/* Section: device related */
118long kvm_arch_dev_ioctl(struct file *filp,
119 unsigned int ioctl, unsigned long arg)
120{
121 if (ioctl == KVM_S390_ENABLE_SIE)
122 return s390_enable_sie();
123 return -EINVAL;
124}
125
126int kvm_dev_ioctl_check_extension(long ext)
127{
d7b0b5eb
CO
128 int r;
129
2bd0ac4e 130 switch (ext) {
d7b0b5eb 131 case KVM_CAP_S390_PSW:
b6cf8788 132 case KVM_CAP_S390_GMAP:
52e16b18 133 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
134#ifdef CONFIG_KVM_S390_UCONTROL
135 case KVM_CAP_S390_UCONTROL:
136#endif
60b413c9 137 case KVM_CAP_SYNC_REGS:
14eebd91 138 case KVM_CAP_ONE_REG:
d7b0b5eb
CO
139 r = 1;
140 break;
e726b1bd
CB
141 case KVM_CAP_NR_VCPUS:
142 case KVM_CAP_MAX_VCPUS:
143 r = KVM_MAX_VCPUS;
144 break;
1526bf9c
CB
145 case KVM_CAP_S390_COW:
146 r = sclp_get_fac85() & 0x2;
147 break;
2bd0ac4e 148 default:
d7b0b5eb 149 r = 0;
2bd0ac4e 150 }
d7b0b5eb 151 return r;
b0c632db
HC
152}
153
154/* Section: vm related */
155/*
156 * Get (and clear) the dirty memory log for a memory slot.
157 */
158int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
159 struct kvm_dirty_log *log)
160{
161 return 0;
162}
163
164long kvm_arch_vm_ioctl(struct file *filp,
165 unsigned int ioctl, unsigned long arg)
166{
167 struct kvm *kvm = filp->private_data;
168 void __user *argp = (void __user *)arg;
169 int r;
170
171 switch (ioctl) {
ba5c1e9b
CO
172 case KVM_S390_INTERRUPT: {
173 struct kvm_s390_interrupt s390int;
174
175 r = -EFAULT;
176 if (copy_from_user(&s390int, argp, sizeof(s390int)))
177 break;
178 r = kvm_s390_inject_vm(kvm, &s390int);
179 break;
180 }
b0c632db 181 default:
367e1319 182 r = -ENOTTY;
b0c632db
HC
183 }
184
185 return r;
186}
187
e08b9637 188int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 189{
b0c632db
HC
190 int rc;
191 char debug_name[16];
192
e08b9637
CO
193 rc = -EINVAL;
194#ifdef CONFIG_KVM_S390_UCONTROL
195 if (type & ~KVM_VM_S390_UCONTROL)
196 goto out_err;
197 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
198 goto out_err;
199#else
200 if (type)
201 goto out_err;
202#endif
203
b0c632db
HC
204 rc = s390_enable_sie();
205 if (rc)
d89f5eff 206 goto out_err;
b0c632db 207
b290411a
CO
208 rc = -ENOMEM;
209
b0c632db
HC
210 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
211 if (!kvm->arch.sca)
d89f5eff 212 goto out_err;
b0c632db
HC
213
214 sprintf(debug_name, "kvm-%u", current->pid);
215
216 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
217 if (!kvm->arch.dbf)
218 goto out_nodbf;
219
ba5c1e9b
CO
220 spin_lock_init(&kvm->arch.float_int.lock);
221 INIT_LIST_HEAD(&kvm->arch.float_int.list);
222
b0c632db
HC
223 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
224 VM_EVENT(kvm, 3, "%s", "vm created");
225
e08b9637
CO
226 if (type & KVM_VM_S390_UCONTROL) {
227 kvm->arch.gmap = NULL;
228 } else {
229 kvm->arch.gmap = gmap_alloc(current->mm);
230 if (!kvm->arch.gmap)
231 goto out_nogmap;
232 }
d89f5eff 233 return 0;
598841ca
CO
234out_nogmap:
235 debug_unregister(kvm->arch.dbf);
b0c632db
HC
236out_nodbf:
237 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
238out_err:
239 return rc;
b0c632db
HC
240}
241
d329c035
CB
242void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
243{
244 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
58f9460b
CO
245 if (!kvm_is_ucontrol(vcpu->kvm)) {
246 clear_bit(63 - vcpu->vcpu_id,
247 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
248 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
249 (__u64) vcpu->arch.sie_block)
250 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
251 }
abf4a71e 252 smp_mb();
27e0393f
CO
253
254 if (kvm_is_ucontrol(vcpu->kvm))
255 gmap_free(vcpu->arch.gmap);
256
d329c035 257 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 258 kvm_vcpu_uninit(vcpu);
d329c035
CB
259 kfree(vcpu);
260}
261
262static void kvm_free_vcpus(struct kvm *kvm)
263{
264 unsigned int i;
988a2cae 265 struct kvm_vcpu *vcpu;
d329c035 266
988a2cae
GN
267 kvm_for_each_vcpu(i, vcpu, kvm)
268 kvm_arch_vcpu_destroy(vcpu);
269
270 mutex_lock(&kvm->lock);
271 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
272 kvm->vcpus[i] = NULL;
273
274 atomic_set(&kvm->online_vcpus, 0);
275 mutex_unlock(&kvm->lock);
d329c035
CB
276}
277
ad8ba2cd
SY
278void kvm_arch_sync_events(struct kvm *kvm)
279{
280}
281
b0c632db
HC
282void kvm_arch_destroy_vm(struct kvm *kvm)
283{
d329c035 284 kvm_free_vcpus(kvm);
b0c632db 285 free_page((unsigned long)(kvm->arch.sca));
d329c035 286 debug_unregister(kvm->arch.dbf);
27e0393f
CO
287 if (!kvm_is_ucontrol(kvm))
288 gmap_free(kvm->arch.gmap);
b0c632db
HC
289}
290
291/* Section: vcpu related */
292int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
293{
27e0393f
CO
294 if (kvm_is_ucontrol(vcpu->kvm)) {
295 vcpu->arch.gmap = gmap_alloc(current->mm);
296 if (!vcpu->arch.gmap)
297 return -ENOMEM;
298 return 0;
299 }
300
598841ca 301 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
302 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
303 KVM_SYNC_GPRS |
9eed0735
CB
304 KVM_SYNC_ACRS |
305 KVM_SYNC_CRS;
b0c632db
HC
306 return 0;
307}
308
309void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
310{
6692cef3 311 /* Nothing todo */
b0c632db
HC
312}
313
314void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
315{
316 save_fp_regs(&vcpu->arch.host_fpregs);
317 save_access_regs(vcpu->arch.host_acrs);
318 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
319 restore_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 320 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 321 gmap_enable(vcpu->arch.gmap);
9e6dabef 322 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
323}
324
325void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
326{
9e6dabef 327 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 328 gmap_disable(vcpu->arch.gmap);
b0c632db 329 save_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 330 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
331 restore_fp_regs(&vcpu->arch.host_fpregs);
332 restore_access_regs(vcpu->arch.host_acrs);
333}
334
335static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
336{
337 /* this equals initial cpu reset in pop, but we don't switch to ESA */
338 vcpu->arch.sie_block->gpsw.mask = 0UL;
339 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 340 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
341 vcpu->arch.sie_block->cputm = 0UL;
342 vcpu->arch.sie_block->ckc = 0UL;
343 vcpu->arch.sie_block->todpr = 0;
344 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
345 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
346 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
347 vcpu->arch.guest_fpregs.fpc = 0;
348 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
349 vcpu->arch.sie_block->gbea = 1;
350}
351
352int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
353{
9e6dabef
CH
354 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
355 CPUSTAT_SM |
356 CPUSTAT_STOPPED);
fc34531d 357 vcpu->arch.sie_block->ecb = 6;
b0c632db 358 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 359 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
360 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
361 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
362 (unsigned long) vcpu);
363 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 364 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 365 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
366 return 0;
367}
368
369struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
370 unsigned int id)
371{
4d47555a
CO
372 struct kvm_vcpu *vcpu;
373 int rc = -EINVAL;
374
375 if (id >= KVM_MAX_VCPUS)
376 goto out;
377
378 rc = -ENOMEM;
b0c632db 379
4d47555a 380 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
b0c632db 381 if (!vcpu)
4d47555a 382 goto out;
b0c632db 383
180c12fb
CB
384 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
385 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
386
387 if (!vcpu->arch.sie_block)
388 goto out_free_cpu;
389
390 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
391 if (!kvm_is_ucontrol(kvm)) {
392 if (!kvm->arch.sca) {
393 WARN_ON_ONCE(1);
394 goto out_free_cpu;
395 }
396 if (!kvm->arch.sca->cpu[id].sda)
397 kvm->arch.sca->cpu[id].sda =
398 (__u64) vcpu->arch.sie_block;
399 vcpu->arch.sie_block->scaoh =
400 (__u32)(((__u64)kvm->arch.sca) >> 32);
401 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
402 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
403 }
b0c632db 404
ba5c1e9b
CO
405 spin_lock_init(&vcpu->arch.local_int.lock);
406 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
407 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 408 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
409 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
410 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 411 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 412 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 413
b0c632db
HC
414 rc = kvm_vcpu_init(vcpu, kvm, id);
415 if (rc)
7b06bf2f 416 goto out_free_sie_block;
b0c632db
HC
417 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
418 vcpu->arch.sie_block);
419
b0c632db 420 return vcpu;
7b06bf2f
WY
421out_free_sie_block:
422 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
423out_free_cpu:
424 kfree(vcpu);
4d47555a 425out:
b0c632db
HC
426 return ERR_PTR(rc);
427}
428
b0c632db
HC
429int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
430{
431 /* kvm common code refers to this, but never calls it */
432 BUG();
433 return 0;
434}
435
b6d33834
CD
436int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
437{
438 /* kvm common code refers to this, but never calls it */
439 BUG();
440 return 0;
441}
442
14eebd91
CO
443static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
444 struct kvm_one_reg *reg)
445{
446 int r = -EINVAL;
447
448 switch (reg->id) {
449 default:
450 break;
451 }
452
453 return r;
454}
455
456static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
457 struct kvm_one_reg *reg)
458{
459 int r = -EINVAL;
460
461 switch (reg->id) {
462 default:
463 break;
464 }
465
466 return r;
467}
b6d33834 468
b0c632db
HC
469static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
470{
b0c632db 471 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
472 return 0;
473}
474
475int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
476{
5a32c1af 477 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
478 return 0;
479}
480
481int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
482{
5a32c1af 483 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
484 return 0;
485}
486
487int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
488 struct kvm_sregs *sregs)
489{
59674c1a 490 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 491 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 492 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
493 return 0;
494}
495
496int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
497 struct kvm_sregs *sregs)
498{
59674c1a 499 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 500 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
501 return 0;
502}
503
504int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
505{
b0c632db 506 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
85175587 507 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
7eef87dc 508 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
509 return 0;
510}
511
512int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
513{
b0c632db
HC
514 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
515 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
516 return 0;
517}
518
519static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
520{
521 int rc = 0;
522
9e6dabef 523 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 524 rc = -EBUSY;
d7b0b5eb
CO
525 else {
526 vcpu->run->psw_mask = psw.mask;
527 vcpu->run->psw_addr = psw.addr;
528 }
b0c632db
HC
529 return rc;
530}
531
532int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
533 struct kvm_translation *tr)
534{
535 return -EINVAL; /* not implemented yet */
536}
537
d0bfb940
JK
538int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
539 struct kvm_guest_debug *dbg)
b0c632db
HC
540{
541 return -EINVAL; /* not implemented yet */
542}
543
62d9f0db
MT
544int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
545 struct kvm_mp_state *mp_state)
546{
547 return -EINVAL; /* not implemented yet */
548}
549
550int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
551 struct kvm_mp_state *mp_state)
552{
553 return -EINVAL; /* not implemented yet */
554}
555
e168bf8d 556static int __vcpu_run(struct kvm_vcpu *vcpu)
b0c632db 557{
e168bf8d
CO
558 int rc;
559
5a32c1af 560 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
561
562 if (need_resched())
563 schedule();
564
71cde587
CB
565 if (test_thread_flag(TIF_MCCK_PENDING))
566 s390_handle_mcck();
567
d6b6d166
CO
568 if (!kvm_is_ucontrol(vcpu->kvm))
569 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 570
b0c632db
HC
571 vcpu->arch.sie_block->icptcode = 0;
572 local_irq_disable();
573 kvm_guest_enter();
574 local_irq_enable();
575 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
576 atomic_read(&vcpu->arch.sie_block->cpuflags));
5a32c1af 577 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
e168bf8d
CO
578 if (rc) {
579 if (kvm_is_ucontrol(vcpu->kvm)) {
580 rc = SIE_INTERCEPT_UCONTROL;
581 } else {
582 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
583 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
584 rc = 0;
585 }
1f0d0f09 586 }
b0c632db
HC
587 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
588 vcpu->arch.sie_block->icptcode);
589 local_irq_disable();
590 kvm_guest_exit();
591 local_irq_enable();
592
5a32c1af 593 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
e168bf8d 594 return rc;
b0c632db
HC
595}
596
597int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
598{
8f2abe6a 599 int rc;
b0c632db
HC
600 sigset_t sigsaved;
601
9ace903d 602rerun_vcpu:
b0c632db
HC
603 if (vcpu->sigset_active)
604 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
605
9e6dabef 606 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 607
ba5c1e9b
CO
608 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
609
8f2abe6a
CB
610 switch (kvm_run->exit_reason) {
611 case KVM_EXIT_S390_SIEIC:
8f2abe6a 612 case KVM_EXIT_UNKNOWN:
9ace903d 613 case KVM_EXIT_INTR:
8f2abe6a 614 case KVM_EXIT_S390_RESET:
e168bf8d 615 case KVM_EXIT_S390_UCONTROL:
8f2abe6a
CB
616 break;
617 default:
618 BUG();
619 }
620
d7b0b5eb
CO
621 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
622 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
623 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
624 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
625 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
626 }
9eed0735
CB
627 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
628 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
629 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
630 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
631 }
d7b0b5eb 632
dab4079d 633 might_fault();
8f2abe6a
CB
634
635 do {
e168bf8d
CO
636 rc = __vcpu_run(vcpu);
637 if (rc)
638 break;
c0d744a9
CO
639 if (kvm_is_ucontrol(vcpu->kvm))
640 rc = -EOPNOTSUPP;
641 else
642 rc = kvm_handle_sie_intercept(vcpu);
8f2abe6a
CB
643 } while (!signal_pending(current) && !rc);
644
9ace903d
CE
645 if (rc == SIE_INTERCEPT_RERUNVCPU)
646 goto rerun_vcpu;
647
b1d16c49
CE
648 if (signal_pending(current) && !rc) {
649 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 650 rc = -EINTR;
b1d16c49 651 }
8f2abe6a 652
e168bf8d
CO
653#ifdef CONFIG_KVM_S390_UCONTROL
654 if (rc == SIE_INTERCEPT_UCONTROL) {
655 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
656 kvm_run->s390_ucontrol.trans_exc_code =
657 current->thread.gmap_addr;
658 kvm_run->s390_ucontrol.pgm_code = 0x10;
659 rc = 0;
660 }
661#endif
662
b8e660b8 663 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
664 /* intercept cannot be handled in-kernel, prepare kvm-run */
665 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
666 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
667 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
668 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
669 rc = 0;
670 }
671
672 if (rc == -EREMOTE) {
673 /* intercept was handled, but userspace support is needed
674 * kvm_run has been prepared by the handler */
675 rc = 0;
676 }
b0c632db 677
d7b0b5eb
CO
678 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
679 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 680 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
9eed0735 681 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 682
b0c632db
HC
683 if (vcpu->sigset_active)
684 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
685
b0c632db 686 vcpu->stat.exit_userspace++;
7e8e6ab4 687 return rc;
b0c632db
HC
688}
689
092670cd 690static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
691 unsigned long n, int prefix)
692{
693 if (prefix)
694 return copy_to_guest(vcpu, guestdest, from, n);
695 else
696 return copy_to_guest_absolute(vcpu, guestdest, from, n);
697}
698
699/*
700 * store status at address
701 * we use have two special cases:
702 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
703 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
704 */
971eb77f 705int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 706{
092670cd 707 unsigned char archmode = 1;
b0c632db
HC
708 int prefix;
709
710 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
711 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
712 return -EFAULT;
713 addr = SAVE_AREA_BASE;
714 prefix = 0;
715 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
716 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
717 return -EFAULT;
718 addr = SAVE_AREA_BASE;
719 prefix = 1;
720 } else
721 prefix = 0;
722
f64ca217 723 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
724 vcpu->arch.guest_fpregs.fprs, 128, prefix))
725 return -EFAULT;
726
f64ca217 727 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
5a32c1af 728 vcpu->run->s.regs.gprs, 128, prefix))
b0c632db
HC
729 return -EFAULT;
730
f64ca217 731 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
732 &vcpu->arch.sie_block->gpsw, 16, prefix))
733 return -EFAULT;
734
f64ca217 735 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
736 &vcpu->arch.sie_block->prefix, 4, prefix))
737 return -EFAULT;
738
739 if (__guestcopy(vcpu,
f64ca217 740 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
741 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
742 return -EFAULT;
743
f64ca217 744 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
745 &vcpu->arch.sie_block->todpr, 4, prefix))
746 return -EFAULT;
747
f64ca217 748 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
749 &vcpu->arch.sie_block->cputm, 8, prefix))
750 return -EFAULT;
751
f64ca217 752 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
753 &vcpu->arch.sie_block->ckc, 8, prefix))
754 return -EFAULT;
755
f64ca217 756 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
59674c1a 757 &vcpu->run->s.regs.acrs, 64, prefix))
b0c632db
HC
758 return -EFAULT;
759
760 if (__guestcopy(vcpu,
f64ca217 761 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
762 &vcpu->arch.sie_block->gcr, 128, prefix))
763 return -EFAULT;
764 return 0;
765}
766
b0c632db
HC
767long kvm_arch_vcpu_ioctl(struct file *filp,
768 unsigned int ioctl, unsigned long arg)
769{
770 struct kvm_vcpu *vcpu = filp->private_data;
771 void __user *argp = (void __user *)arg;
bc923cc9 772 long r;
b0c632db 773
93736624
AK
774 switch (ioctl) {
775 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
776 struct kvm_s390_interrupt s390int;
777
93736624 778 r = -EFAULT;
ba5c1e9b 779 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
780 break;
781 r = kvm_s390_inject_vcpu(vcpu, &s390int);
782 break;
ba5c1e9b 783 }
b0c632db 784 case KVM_S390_STORE_STATUS:
bc923cc9
AK
785 r = kvm_s390_vcpu_store_status(vcpu, arg);
786 break;
b0c632db
HC
787 case KVM_S390_SET_INITIAL_PSW: {
788 psw_t psw;
789
bc923cc9 790 r = -EFAULT;
b0c632db 791 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
792 break;
793 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
794 break;
b0c632db
HC
795 }
796 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
797 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
798 break;
14eebd91
CO
799 case KVM_SET_ONE_REG:
800 case KVM_GET_ONE_REG: {
801 struct kvm_one_reg reg;
802 r = -EFAULT;
803 if (copy_from_user(&reg, argp, sizeof(reg)))
804 break;
805 if (ioctl == KVM_SET_ONE_REG)
806 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
807 else
808 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
809 break;
810 }
27e0393f
CO
811#ifdef CONFIG_KVM_S390_UCONTROL
812 case KVM_S390_UCAS_MAP: {
813 struct kvm_s390_ucas_mapping ucasmap;
814
815 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
816 r = -EFAULT;
817 break;
818 }
819
820 if (!kvm_is_ucontrol(vcpu->kvm)) {
821 r = -EINVAL;
822 break;
823 }
824
825 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
826 ucasmap.vcpu_addr, ucasmap.length);
827 break;
828 }
829 case KVM_S390_UCAS_UNMAP: {
830 struct kvm_s390_ucas_mapping ucasmap;
831
832 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
833 r = -EFAULT;
834 break;
835 }
836
837 if (!kvm_is_ucontrol(vcpu->kvm)) {
838 r = -EINVAL;
839 break;
840 }
841
842 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
843 ucasmap.length);
844 break;
845 }
846#endif
ccc7910f
CO
847 case KVM_S390_VCPU_FAULT: {
848 r = gmap_fault(arg, vcpu->arch.gmap);
849 if (!IS_ERR_VALUE(r))
850 r = 0;
851 break;
852 }
b0c632db 853 default:
3e6afcf1 854 r = -ENOTTY;
b0c632db 855 }
bc923cc9 856 return r;
b0c632db
HC
857}
858
5b1c1493
CO
859int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
860{
861#ifdef CONFIG_KVM_S390_UCONTROL
862 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
863 && (kvm_is_ucontrol(vcpu->kvm))) {
864 vmf->page = virt_to_page(vcpu->arch.sie_block);
865 get_page(vmf->page);
866 return 0;
867 }
868#endif
869 return VM_FAULT_SIGBUS;
870}
871
db3fe4eb
TY
872void kvm_arch_free_memslot(struct kvm_memory_slot *free,
873 struct kvm_memory_slot *dont)
874{
875}
876
877int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
878{
879 return 0;
880}
881
b0c632db 882/* Section: memory related */
f7784b8e
MT
883int kvm_arch_prepare_memory_region(struct kvm *kvm,
884 struct kvm_memory_slot *memslot,
885 struct kvm_memory_slot old,
886 struct kvm_userspace_memory_region *mem,
887 int user_alloc)
b0c632db
HC
888{
889 /* A few sanity checks. We can have exactly one memory slot which has
890 to start at guest virtual zero and which has to be located at a
891 page boundary in userland and which has to end at a page boundary.
892 The memory in userland is ok to be fragmented into various different
893 vmas. It is okay to mmap() and munmap() stuff in this slot after
894 doing this call at any time */
895
628eb9b8 896 if (mem->slot)
b0c632db
HC
897 return -EINVAL;
898
899 if (mem->guest_phys_addr)
900 return -EINVAL;
901
598841ca 902 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
903 return -EINVAL;
904
598841ca 905 if (mem->memory_size & 0xffffful)
b0c632db
HC
906 return -EINVAL;
907
2668dab7
CO
908 if (!user_alloc)
909 return -EINVAL;
910
f7784b8e
MT
911 return 0;
912}
913
914void kvm_arch_commit_memory_region(struct kvm *kvm,
915 struct kvm_userspace_memory_region *mem,
916 struct kvm_memory_slot old,
917 int user_alloc)
918{
f7850c92 919 int rc;
f7784b8e 920
598841ca
CO
921
922 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
923 mem->guest_phys_addr, mem->memory_size);
924 if (rc)
f7850c92 925 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 926 return;
b0c632db
HC
927}
928
34d4cb8f
MT
929void kvm_arch_flush_shadow(struct kvm *kvm)
930{
931}
932
b0c632db
HC
933static int __init kvm_s390_init(void)
934{
ef50f7ac 935 int ret;
0ee75bea 936 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
937 if (ret)
938 return ret;
939
940 /*
941 * guests can ask for up to 255+1 double words, we need a full page
25985edc 942 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
943 * only set facilities that are known to work in KVM.
944 */
c2f0e8c8 945 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
946 if (!facilities) {
947 kvm_exit();
948 return -ENOMEM;
949 }
14375bc4 950 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 951 facilities[0] &= 0xff00fff3f47c0000ULL;
9950f8be 952 facilities[1] &= 0x201c000000000000ULL;
ef50f7ac 953 return 0;
b0c632db
HC
954}
955
956static void __exit kvm_s390_exit(void)
957{
ef50f7ac 958 free_page((unsigned long) facilities);
b0c632db
HC
959 kvm_exit();
960}
961
962module_init(kvm_s390_init);
963module_exit(kvm_s390_exit);
This page took 0.371707 seconds and 5 git commands to generate.