KVM: s390: Handle sckpf instruction
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
a0616cde 30#include <asm/switch_to.h>
8f2abe6a 31#include "kvm-s390.h"
b0c632db
HC
32#include "gaccess.h"
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 38 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 68 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 69 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
70 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 75 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 76 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 77 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
78 { NULL }
79};
80
ef50f7ac 81static unsigned long long *facilities;
b0c632db
HC
82
83/* Section: not file related */
10474ae8 84int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
85{
86 /* every s390 is virtualization enabled ;-) */
10474ae8 87 return 0;
b0c632db
HC
88}
89
90void kvm_arch_hardware_disable(void *garbage)
91{
92}
93
b0c632db
HC
94int kvm_arch_hardware_setup(void)
95{
96 return 0;
97}
98
99void kvm_arch_hardware_unsetup(void)
100{
101}
102
103void kvm_arch_check_processor_compat(void *rtn)
104{
105}
106
107int kvm_arch_init(void *opaque)
108{
109 return 0;
110}
111
112void kvm_arch_exit(void)
113{
114}
115
116/* Section: device related */
117long kvm_arch_dev_ioctl(struct file *filp,
118 unsigned int ioctl, unsigned long arg)
119{
120 if (ioctl == KVM_S390_ENABLE_SIE)
121 return s390_enable_sie();
122 return -EINVAL;
123}
124
125int kvm_dev_ioctl_check_extension(long ext)
126{
d7b0b5eb
CO
127 int r;
128
2bd0ac4e 129 switch (ext) {
d7b0b5eb 130 case KVM_CAP_S390_PSW:
b6cf8788 131 case KVM_CAP_S390_GMAP:
52e16b18 132 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
133#ifdef CONFIG_KVM_S390_UCONTROL
134 case KVM_CAP_S390_UCONTROL:
135#endif
60b413c9 136 case KVM_CAP_SYNC_REGS:
d7b0b5eb
CO
137 r = 1;
138 break;
2bd0ac4e 139 default:
d7b0b5eb 140 r = 0;
2bd0ac4e 141 }
d7b0b5eb 142 return r;
b0c632db
HC
143}
144
145/* Section: vm related */
146/*
147 * Get (and clear) the dirty memory log for a memory slot.
148 */
149int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
150 struct kvm_dirty_log *log)
151{
152 return 0;
153}
154
155long kvm_arch_vm_ioctl(struct file *filp,
156 unsigned int ioctl, unsigned long arg)
157{
158 struct kvm *kvm = filp->private_data;
159 void __user *argp = (void __user *)arg;
160 int r;
161
162 switch (ioctl) {
ba5c1e9b
CO
163 case KVM_S390_INTERRUPT: {
164 struct kvm_s390_interrupt s390int;
165
166 r = -EFAULT;
167 if (copy_from_user(&s390int, argp, sizeof(s390int)))
168 break;
169 r = kvm_s390_inject_vm(kvm, &s390int);
170 break;
171 }
b0c632db 172 default:
367e1319 173 r = -ENOTTY;
b0c632db
HC
174 }
175
176 return r;
177}
178
e08b9637 179int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 180{
b0c632db
HC
181 int rc;
182 char debug_name[16];
183
e08b9637
CO
184 rc = -EINVAL;
185#ifdef CONFIG_KVM_S390_UCONTROL
186 if (type & ~KVM_VM_S390_UCONTROL)
187 goto out_err;
188 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
189 goto out_err;
190#else
191 if (type)
192 goto out_err;
193#endif
194
b0c632db
HC
195 rc = s390_enable_sie();
196 if (rc)
d89f5eff 197 goto out_err;
b0c632db 198
b290411a
CO
199 rc = -ENOMEM;
200
b0c632db
HC
201 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
202 if (!kvm->arch.sca)
d89f5eff 203 goto out_err;
b0c632db
HC
204
205 sprintf(debug_name, "kvm-%u", current->pid);
206
207 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
208 if (!kvm->arch.dbf)
209 goto out_nodbf;
210
ba5c1e9b
CO
211 spin_lock_init(&kvm->arch.float_int.lock);
212 INIT_LIST_HEAD(&kvm->arch.float_int.list);
213
b0c632db
HC
214 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
215 VM_EVENT(kvm, 3, "%s", "vm created");
216
e08b9637
CO
217 if (type & KVM_VM_S390_UCONTROL) {
218 kvm->arch.gmap = NULL;
219 } else {
220 kvm->arch.gmap = gmap_alloc(current->mm);
221 if (!kvm->arch.gmap)
222 goto out_nogmap;
223 }
d89f5eff 224 return 0;
598841ca
CO
225out_nogmap:
226 debug_unregister(kvm->arch.dbf);
b0c632db
HC
227out_nodbf:
228 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
229out_err:
230 return rc;
b0c632db
HC
231}
232
d329c035
CB
233void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
234{
235 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
58f9460b
CO
236 if (!kvm_is_ucontrol(vcpu->kvm)) {
237 clear_bit(63 - vcpu->vcpu_id,
238 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
239 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
240 (__u64) vcpu->arch.sie_block)
241 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
242 }
abf4a71e 243 smp_mb();
27e0393f
CO
244
245 if (kvm_is_ucontrol(vcpu->kvm))
246 gmap_free(vcpu->arch.gmap);
247
d329c035 248 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 249 kvm_vcpu_uninit(vcpu);
d329c035
CB
250 kfree(vcpu);
251}
252
253static void kvm_free_vcpus(struct kvm *kvm)
254{
255 unsigned int i;
988a2cae 256 struct kvm_vcpu *vcpu;
d329c035 257
988a2cae
GN
258 kvm_for_each_vcpu(i, vcpu, kvm)
259 kvm_arch_vcpu_destroy(vcpu);
260
261 mutex_lock(&kvm->lock);
262 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
263 kvm->vcpus[i] = NULL;
264
265 atomic_set(&kvm->online_vcpus, 0);
266 mutex_unlock(&kvm->lock);
d329c035
CB
267}
268
ad8ba2cd
SY
269void kvm_arch_sync_events(struct kvm *kvm)
270{
271}
272
b0c632db
HC
273void kvm_arch_destroy_vm(struct kvm *kvm)
274{
d329c035 275 kvm_free_vcpus(kvm);
b0c632db 276 free_page((unsigned long)(kvm->arch.sca));
d329c035 277 debug_unregister(kvm->arch.dbf);
27e0393f
CO
278 if (!kvm_is_ucontrol(kvm))
279 gmap_free(kvm->arch.gmap);
b0c632db
HC
280}
281
282/* Section: vcpu related */
283int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
284{
27e0393f
CO
285 if (kvm_is_ucontrol(vcpu->kvm)) {
286 vcpu->arch.gmap = gmap_alloc(current->mm);
287 if (!vcpu->arch.gmap)
288 return -ENOMEM;
289 return 0;
290 }
291
598841ca 292 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
293 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
294 KVM_SYNC_GPRS |
9eed0735
CB
295 KVM_SYNC_ACRS |
296 KVM_SYNC_CRS;
b0c632db
HC
297 return 0;
298}
299
300void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
301{
6692cef3 302 /* Nothing todo */
b0c632db
HC
303}
304
305void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
306{
307 save_fp_regs(&vcpu->arch.host_fpregs);
308 save_access_regs(vcpu->arch.host_acrs);
309 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
310 restore_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 311 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 312 gmap_enable(vcpu->arch.gmap);
9e6dabef 313 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
314}
315
316void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
317{
9e6dabef 318 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 319 gmap_disable(vcpu->arch.gmap);
b0c632db 320 save_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 321 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
322 restore_fp_regs(&vcpu->arch.host_fpregs);
323 restore_access_regs(vcpu->arch.host_acrs);
324}
325
326static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
327{
328 /* this equals initial cpu reset in pop, but we don't switch to ESA */
329 vcpu->arch.sie_block->gpsw.mask = 0UL;
330 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 331 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
332 vcpu->arch.sie_block->cputm = 0UL;
333 vcpu->arch.sie_block->ckc = 0UL;
334 vcpu->arch.sie_block->todpr = 0;
335 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
336 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
337 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
338 vcpu->arch.guest_fpregs.fpc = 0;
339 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
340 vcpu->arch.sie_block->gbea = 1;
341}
342
343int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
344{
9e6dabef
CH
345 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
346 CPUSTAT_SM |
347 CPUSTAT_STOPPED);
fc34531d 348 vcpu->arch.sie_block->ecb = 6;
b0c632db 349 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 350 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
351 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
352 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
353 (unsigned long) vcpu);
354 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 355 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 356 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
357 return 0;
358}
359
360struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
361 unsigned int id)
362{
4d47555a
CO
363 struct kvm_vcpu *vcpu;
364 int rc = -EINVAL;
365
366 if (id >= KVM_MAX_VCPUS)
367 goto out;
368
369 rc = -ENOMEM;
b0c632db 370
4d47555a 371 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
b0c632db 372 if (!vcpu)
4d47555a 373 goto out;
b0c632db 374
180c12fb
CB
375 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
376 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
377
378 if (!vcpu->arch.sie_block)
379 goto out_free_cpu;
380
381 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
382 if (!kvm_is_ucontrol(kvm)) {
383 if (!kvm->arch.sca) {
384 WARN_ON_ONCE(1);
385 goto out_free_cpu;
386 }
387 if (!kvm->arch.sca->cpu[id].sda)
388 kvm->arch.sca->cpu[id].sda =
389 (__u64) vcpu->arch.sie_block;
390 vcpu->arch.sie_block->scaoh =
391 (__u32)(((__u64)kvm->arch.sca) >> 32);
392 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
393 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
394 }
b0c632db 395
ba5c1e9b
CO
396 spin_lock_init(&vcpu->arch.local_int.lock);
397 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
398 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 399 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
400 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
401 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 402 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 403 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 404
b0c632db
HC
405 rc = kvm_vcpu_init(vcpu, kvm, id);
406 if (rc)
7b06bf2f 407 goto out_free_sie_block;
b0c632db
HC
408 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
409 vcpu->arch.sie_block);
410
b0c632db 411 return vcpu;
7b06bf2f
WY
412out_free_sie_block:
413 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
414out_free_cpu:
415 kfree(vcpu);
4d47555a 416out:
b0c632db
HC
417 return ERR_PTR(rc);
418}
419
b0c632db
HC
420int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
421{
422 /* kvm common code refers to this, but never calls it */
423 BUG();
424 return 0;
425}
426
b6d33834
CD
427int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
428{
429 /* kvm common code refers to this, but never calls it */
430 BUG();
431 return 0;
432}
433
434
b0c632db
HC
435static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
436{
b0c632db 437 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
438 return 0;
439}
440
441int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
442{
5a32c1af 443 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
444 return 0;
445}
446
447int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
448{
5a32c1af 449 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
450 return 0;
451}
452
453int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
454 struct kvm_sregs *sregs)
455{
59674c1a 456 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 457 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 458 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
459 return 0;
460}
461
462int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
463 struct kvm_sregs *sregs)
464{
59674c1a 465 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 466 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
467 return 0;
468}
469
470int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
471{
b0c632db 472 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
85175587 473 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
7eef87dc 474 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
475 return 0;
476}
477
478int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
479{
b0c632db
HC
480 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
481 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
482 return 0;
483}
484
485static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
486{
487 int rc = 0;
488
9e6dabef 489 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 490 rc = -EBUSY;
d7b0b5eb
CO
491 else {
492 vcpu->run->psw_mask = psw.mask;
493 vcpu->run->psw_addr = psw.addr;
494 }
b0c632db
HC
495 return rc;
496}
497
498int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
499 struct kvm_translation *tr)
500{
501 return -EINVAL; /* not implemented yet */
502}
503
d0bfb940
JK
504int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
505 struct kvm_guest_debug *dbg)
b0c632db
HC
506{
507 return -EINVAL; /* not implemented yet */
508}
509
62d9f0db
MT
510int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
511 struct kvm_mp_state *mp_state)
512{
513 return -EINVAL; /* not implemented yet */
514}
515
516int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
517 struct kvm_mp_state *mp_state)
518{
519 return -EINVAL; /* not implemented yet */
520}
521
e168bf8d 522static int __vcpu_run(struct kvm_vcpu *vcpu)
b0c632db 523{
e168bf8d
CO
524 int rc;
525
5a32c1af 526 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
527
528 if (need_resched())
529 schedule();
530
71cde587
CB
531 if (test_thread_flag(TIF_MCCK_PENDING))
532 s390_handle_mcck();
533
d6b6d166
CO
534 if (!kvm_is_ucontrol(vcpu->kvm))
535 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 536
b0c632db
HC
537 vcpu->arch.sie_block->icptcode = 0;
538 local_irq_disable();
539 kvm_guest_enter();
540 local_irq_enable();
541 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
542 atomic_read(&vcpu->arch.sie_block->cpuflags));
5a32c1af 543 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
e168bf8d
CO
544 if (rc) {
545 if (kvm_is_ucontrol(vcpu->kvm)) {
546 rc = SIE_INTERCEPT_UCONTROL;
547 } else {
548 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
549 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
550 rc = 0;
551 }
1f0d0f09 552 }
b0c632db
HC
553 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
554 vcpu->arch.sie_block->icptcode);
555 local_irq_disable();
556 kvm_guest_exit();
557 local_irq_enable();
558
5a32c1af 559 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
e168bf8d 560 return rc;
b0c632db
HC
561}
562
563int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
564{
8f2abe6a 565 int rc;
b0c632db
HC
566 sigset_t sigsaved;
567
9ace903d 568rerun_vcpu:
b0c632db
HC
569 if (vcpu->sigset_active)
570 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
571
9e6dabef 572 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 573
ba5c1e9b
CO
574 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
575
8f2abe6a
CB
576 switch (kvm_run->exit_reason) {
577 case KVM_EXIT_S390_SIEIC:
8f2abe6a 578 case KVM_EXIT_UNKNOWN:
9ace903d 579 case KVM_EXIT_INTR:
8f2abe6a 580 case KVM_EXIT_S390_RESET:
e168bf8d 581 case KVM_EXIT_S390_UCONTROL:
8f2abe6a
CB
582 break;
583 default:
584 BUG();
585 }
586
d7b0b5eb
CO
587 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
588 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
589 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
590 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
591 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
592 }
9eed0735
CB
593 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
594 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
595 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
596 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
597 }
d7b0b5eb 598
dab4079d 599 might_fault();
8f2abe6a
CB
600
601 do {
e168bf8d
CO
602 rc = __vcpu_run(vcpu);
603 if (rc)
604 break;
c0d744a9
CO
605 if (kvm_is_ucontrol(vcpu->kvm))
606 rc = -EOPNOTSUPP;
607 else
608 rc = kvm_handle_sie_intercept(vcpu);
8f2abe6a
CB
609 } while (!signal_pending(current) && !rc);
610
9ace903d
CE
611 if (rc == SIE_INTERCEPT_RERUNVCPU)
612 goto rerun_vcpu;
613
b1d16c49
CE
614 if (signal_pending(current) && !rc) {
615 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 616 rc = -EINTR;
b1d16c49 617 }
8f2abe6a 618
e168bf8d
CO
619#ifdef CONFIG_KVM_S390_UCONTROL
620 if (rc == SIE_INTERCEPT_UCONTROL) {
621 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
622 kvm_run->s390_ucontrol.trans_exc_code =
623 current->thread.gmap_addr;
624 kvm_run->s390_ucontrol.pgm_code = 0x10;
625 rc = 0;
626 }
627#endif
628
b8e660b8 629 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
630 /* intercept cannot be handled in-kernel, prepare kvm-run */
631 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
632 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
633 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
634 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
635 rc = 0;
636 }
637
638 if (rc == -EREMOTE) {
639 /* intercept was handled, but userspace support is needed
640 * kvm_run has been prepared by the handler */
641 rc = 0;
642 }
b0c632db 643
d7b0b5eb
CO
644 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
645 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 646 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
9eed0735 647 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 648
b0c632db
HC
649 if (vcpu->sigset_active)
650 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
651
b0c632db 652 vcpu->stat.exit_userspace++;
7e8e6ab4 653 return rc;
b0c632db
HC
654}
655
092670cd 656static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
657 unsigned long n, int prefix)
658{
659 if (prefix)
660 return copy_to_guest(vcpu, guestdest, from, n);
661 else
662 return copy_to_guest_absolute(vcpu, guestdest, from, n);
663}
664
665/*
666 * store status at address
667 * we use have two special cases:
668 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
669 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
670 */
971eb77f 671int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 672{
092670cd 673 unsigned char archmode = 1;
b0c632db
HC
674 int prefix;
675
676 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
677 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
678 return -EFAULT;
679 addr = SAVE_AREA_BASE;
680 prefix = 0;
681 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
682 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
683 return -EFAULT;
684 addr = SAVE_AREA_BASE;
685 prefix = 1;
686 } else
687 prefix = 0;
688
f64ca217 689 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
690 vcpu->arch.guest_fpregs.fprs, 128, prefix))
691 return -EFAULT;
692
f64ca217 693 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
5a32c1af 694 vcpu->run->s.regs.gprs, 128, prefix))
b0c632db
HC
695 return -EFAULT;
696
f64ca217 697 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
698 &vcpu->arch.sie_block->gpsw, 16, prefix))
699 return -EFAULT;
700
f64ca217 701 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
702 &vcpu->arch.sie_block->prefix, 4, prefix))
703 return -EFAULT;
704
705 if (__guestcopy(vcpu,
f64ca217 706 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
707 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
708 return -EFAULT;
709
f64ca217 710 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
711 &vcpu->arch.sie_block->todpr, 4, prefix))
712 return -EFAULT;
713
f64ca217 714 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
715 &vcpu->arch.sie_block->cputm, 8, prefix))
716 return -EFAULT;
717
f64ca217 718 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
719 &vcpu->arch.sie_block->ckc, 8, prefix))
720 return -EFAULT;
721
f64ca217 722 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
59674c1a 723 &vcpu->run->s.regs.acrs, 64, prefix))
b0c632db
HC
724 return -EFAULT;
725
726 if (__guestcopy(vcpu,
f64ca217 727 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
728 &vcpu->arch.sie_block->gcr, 128, prefix))
729 return -EFAULT;
730 return 0;
731}
732
b0c632db
HC
733long kvm_arch_vcpu_ioctl(struct file *filp,
734 unsigned int ioctl, unsigned long arg)
735{
736 struct kvm_vcpu *vcpu = filp->private_data;
737 void __user *argp = (void __user *)arg;
bc923cc9 738 long r;
b0c632db 739
93736624
AK
740 switch (ioctl) {
741 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
742 struct kvm_s390_interrupt s390int;
743
93736624 744 r = -EFAULT;
ba5c1e9b 745 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
746 break;
747 r = kvm_s390_inject_vcpu(vcpu, &s390int);
748 break;
ba5c1e9b 749 }
b0c632db 750 case KVM_S390_STORE_STATUS:
bc923cc9
AK
751 r = kvm_s390_vcpu_store_status(vcpu, arg);
752 break;
b0c632db
HC
753 case KVM_S390_SET_INITIAL_PSW: {
754 psw_t psw;
755
bc923cc9 756 r = -EFAULT;
b0c632db 757 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
758 break;
759 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
760 break;
b0c632db
HC
761 }
762 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
763 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
764 break;
27e0393f
CO
765#ifdef CONFIG_KVM_S390_UCONTROL
766 case KVM_S390_UCAS_MAP: {
767 struct kvm_s390_ucas_mapping ucasmap;
768
769 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
770 r = -EFAULT;
771 break;
772 }
773
774 if (!kvm_is_ucontrol(vcpu->kvm)) {
775 r = -EINVAL;
776 break;
777 }
778
779 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
780 ucasmap.vcpu_addr, ucasmap.length);
781 break;
782 }
783 case KVM_S390_UCAS_UNMAP: {
784 struct kvm_s390_ucas_mapping ucasmap;
785
786 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
787 r = -EFAULT;
788 break;
789 }
790
791 if (!kvm_is_ucontrol(vcpu->kvm)) {
792 r = -EINVAL;
793 break;
794 }
795
796 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
797 ucasmap.length);
798 break;
799 }
800#endif
ccc7910f
CO
801 case KVM_S390_VCPU_FAULT: {
802 r = gmap_fault(arg, vcpu->arch.gmap);
803 if (!IS_ERR_VALUE(r))
804 r = 0;
805 break;
806 }
b0c632db 807 default:
3e6afcf1 808 r = -ENOTTY;
b0c632db 809 }
bc923cc9 810 return r;
b0c632db
HC
811}
812
5b1c1493
CO
813int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
814{
815#ifdef CONFIG_KVM_S390_UCONTROL
816 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
817 && (kvm_is_ucontrol(vcpu->kvm))) {
818 vmf->page = virt_to_page(vcpu->arch.sie_block);
819 get_page(vmf->page);
820 return 0;
821 }
822#endif
823 return VM_FAULT_SIGBUS;
824}
825
db3fe4eb
TY
826void kvm_arch_free_memslot(struct kvm_memory_slot *free,
827 struct kvm_memory_slot *dont)
828{
829}
830
831int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
832{
833 return 0;
834}
835
b0c632db 836/* Section: memory related */
f7784b8e
MT
837int kvm_arch_prepare_memory_region(struct kvm *kvm,
838 struct kvm_memory_slot *memslot,
839 struct kvm_memory_slot old,
840 struct kvm_userspace_memory_region *mem,
841 int user_alloc)
b0c632db
HC
842{
843 /* A few sanity checks. We can have exactly one memory slot which has
844 to start at guest virtual zero and which has to be located at a
845 page boundary in userland and which has to end at a page boundary.
846 The memory in userland is ok to be fragmented into various different
847 vmas. It is okay to mmap() and munmap() stuff in this slot after
848 doing this call at any time */
849
628eb9b8 850 if (mem->slot)
b0c632db
HC
851 return -EINVAL;
852
853 if (mem->guest_phys_addr)
854 return -EINVAL;
855
598841ca 856 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
857 return -EINVAL;
858
598841ca 859 if (mem->memory_size & 0xffffful)
b0c632db
HC
860 return -EINVAL;
861
2668dab7
CO
862 if (!user_alloc)
863 return -EINVAL;
864
f7784b8e
MT
865 return 0;
866}
867
868void kvm_arch_commit_memory_region(struct kvm *kvm,
869 struct kvm_userspace_memory_region *mem,
870 struct kvm_memory_slot old,
871 int user_alloc)
872{
f7850c92 873 int rc;
f7784b8e 874
598841ca
CO
875
876 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
877 mem->guest_phys_addr, mem->memory_size);
878 if (rc)
f7850c92 879 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 880 return;
b0c632db
HC
881}
882
34d4cb8f
MT
883void kvm_arch_flush_shadow(struct kvm *kvm)
884{
885}
886
b0c632db
HC
887static int __init kvm_s390_init(void)
888{
ef50f7ac 889 int ret;
0ee75bea 890 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
891 if (ret)
892 return ret;
893
894 /*
895 * guests can ask for up to 255+1 double words, we need a full page
25985edc 896 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
897 * only set facilities that are known to work in KVM.
898 */
c2f0e8c8 899 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
900 if (!facilities) {
901 kvm_exit();
902 return -ENOMEM;
903 }
14375bc4 904 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 905 facilities[0] &= 0xff00fff3f47c0000ULL;
9950f8be 906 facilities[1] &= 0x201c000000000000ULL;
ef50f7ac 907 return 0;
b0c632db
HC
908}
909
910static void __exit kvm_s390_exit(void)
911{
ef50f7ac 912 free_page((unsigned long) facilities);
b0c632db
HC
913 kvm_exit();
914}
915
916module_init(kvm_s390_init);
917module_exit(kvm_s390_exit);
This page took 0.361277 seconds and 5 git commands to generate.