KVM: s390: Fix tprot locking
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
ef50f7ac 30#include <asm/system.h>
8f2abe6a 31#include "kvm-s390.h"
b0c632db
HC
32#include "gaccess.h"
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 38 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 68 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 69 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
70 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 75 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 76 { "diagnose_44", VCPU_STAT(diagnose_44) },
b0c632db
HC
77 { NULL }
78};
79
ef50f7ac 80static unsigned long long *facilities;
b0c632db
HC
81
82/* Section: not file related */
10474ae8 83int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
84{
85 /* every s390 is virtualization enabled ;-) */
10474ae8 86 return 0;
b0c632db
HC
87}
88
89void kvm_arch_hardware_disable(void *garbage)
90{
91}
92
b0c632db
HC
93int kvm_arch_hardware_setup(void)
94{
95 return 0;
96}
97
98void kvm_arch_hardware_unsetup(void)
99{
100}
101
102void kvm_arch_check_processor_compat(void *rtn)
103{
104}
105
106int kvm_arch_init(void *opaque)
107{
108 return 0;
109}
110
111void kvm_arch_exit(void)
112{
113}
114
115/* Section: device related */
116long kvm_arch_dev_ioctl(struct file *filp,
117 unsigned int ioctl, unsigned long arg)
118{
119 if (ioctl == KVM_S390_ENABLE_SIE)
120 return s390_enable_sie();
121 return -EINVAL;
122}
123
124int kvm_dev_ioctl_check_extension(long ext)
125{
d7b0b5eb
CO
126 int r;
127
2bd0ac4e 128 switch (ext) {
d7b0b5eb 129 case KVM_CAP_S390_PSW:
b6cf8788 130 case KVM_CAP_S390_GMAP:
d7b0b5eb
CO
131 r = 1;
132 break;
2bd0ac4e 133 default:
d7b0b5eb 134 r = 0;
2bd0ac4e 135 }
d7b0b5eb 136 return r;
b0c632db
HC
137}
138
139/* Section: vm related */
140/*
141 * Get (and clear) the dirty memory log for a memory slot.
142 */
143int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
144 struct kvm_dirty_log *log)
145{
146 return 0;
147}
148
149long kvm_arch_vm_ioctl(struct file *filp,
150 unsigned int ioctl, unsigned long arg)
151{
152 struct kvm *kvm = filp->private_data;
153 void __user *argp = (void __user *)arg;
154 int r;
155
156 switch (ioctl) {
ba5c1e9b
CO
157 case KVM_S390_INTERRUPT: {
158 struct kvm_s390_interrupt s390int;
159
160 r = -EFAULT;
161 if (copy_from_user(&s390int, argp, sizeof(s390int)))
162 break;
163 r = kvm_s390_inject_vm(kvm, &s390int);
164 break;
165 }
b0c632db 166 default:
367e1319 167 r = -ENOTTY;
b0c632db
HC
168 }
169
170 return r;
171}
172
d89f5eff 173int kvm_arch_init_vm(struct kvm *kvm)
b0c632db 174{
b0c632db
HC
175 int rc;
176 char debug_name[16];
177
178 rc = s390_enable_sie();
179 if (rc)
d89f5eff 180 goto out_err;
b0c632db 181
b290411a
CO
182 rc = -ENOMEM;
183
b0c632db
HC
184 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
185 if (!kvm->arch.sca)
d89f5eff 186 goto out_err;
b0c632db
HC
187
188 sprintf(debug_name, "kvm-%u", current->pid);
189
190 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
191 if (!kvm->arch.dbf)
192 goto out_nodbf;
193
ba5c1e9b
CO
194 spin_lock_init(&kvm->arch.float_int.lock);
195 INIT_LIST_HEAD(&kvm->arch.float_int.list);
196
b0c632db
HC
197 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
198 VM_EVENT(kvm, 3, "%s", "vm created");
199
598841ca
CO
200 kvm->arch.gmap = gmap_alloc(current->mm);
201 if (!kvm->arch.gmap)
202 goto out_nogmap;
203
d89f5eff 204 return 0;
598841ca
CO
205out_nogmap:
206 debug_unregister(kvm->arch.dbf);
b0c632db
HC
207out_nodbf:
208 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
209out_err:
210 return rc;
b0c632db
HC
211}
212
d329c035
CB
213void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
214{
215 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
fc34531d 216 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
abf4a71e
CO
217 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
218 (__u64) vcpu->arch.sie_block)
219 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
220 smp_mb();
d329c035 221 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 222 kvm_vcpu_uninit(vcpu);
d329c035
CB
223 kfree(vcpu);
224}
225
226static void kvm_free_vcpus(struct kvm *kvm)
227{
228 unsigned int i;
988a2cae 229 struct kvm_vcpu *vcpu;
d329c035 230
988a2cae
GN
231 kvm_for_each_vcpu(i, vcpu, kvm)
232 kvm_arch_vcpu_destroy(vcpu);
233
234 mutex_lock(&kvm->lock);
235 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
236 kvm->vcpus[i] = NULL;
237
238 atomic_set(&kvm->online_vcpus, 0);
239 mutex_unlock(&kvm->lock);
d329c035
CB
240}
241
ad8ba2cd
SY
242void kvm_arch_sync_events(struct kvm *kvm)
243{
244}
245
b0c632db
HC
246void kvm_arch_destroy_vm(struct kvm *kvm)
247{
d329c035 248 kvm_free_vcpus(kvm);
b0c632db 249 free_page((unsigned long)(kvm->arch.sca));
d329c035 250 debug_unregister(kvm->arch.dbf);
598841ca 251 gmap_free(kvm->arch.gmap);
b0c632db
HC
252}
253
254/* Section: vcpu related */
255int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
256{
598841ca 257 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
b0c632db
HC
258 return 0;
259}
260
261void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
262{
6692cef3 263 /* Nothing todo */
b0c632db
HC
264}
265
266void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
267{
268 save_fp_regs(&vcpu->arch.host_fpregs);
269 save_access_regs(vcpu->arch.host_acrs);
270 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
271 restore_fp_regs(&vcpu->arch.guest_fpregs);
272 restore_access_regs(vcpu->arch.guest_acrs);
480e5926 273 gmap_enable(vcpu->arch.gmap);
9e6dabef 274 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
275}
276
277void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
278{
9e6dabef 279 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 280 gmap_disable(vcpu->arch.gmap);
b0c632db
HC
281 save_fp_regs(&vcpu->arch.guest_fpregs);
282 save_access_regs(vcpu->arch.guest_acrs);
283 restore_fp_regs(&vcpu->arch.host_fpregs);
284 restore_access_regs(vcpu->arch.host_acrs);
285}
286
287static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
288{
289 /* this equals initial cpu reset in pop, but we don't switch to ESA */
290 vcpu->arch.sie_block->gpsw.mask = 0UL;
291 vcpu->arch.sie_block->gpsw.addr = 0UL;
292 vcpu->arch.sie_block->prefix = 0UL;
293 vcpu->arch.sie_block->ihcpu = 0xffff;
294 vcpu->arch.sie_block->cputm = 0UL;
295 vcpu->arch.sie_block->ckc = 0UL;
296 vcpu->arch.sie_block->todpr = 0;
297 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
298 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
299 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
300 vcpu->arch.guest_fpregs.fpc = 0;
301 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
302 vcpu->arch.sie_block->gbea = 1;
303}
304
305int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
306{
9e6dabef
CH
307 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
308 CPUSTAT_SM |
309 CPUSTAT_STOPPED);
fc34531d 310 vcpu->arch.sie_block->ecb = 6;
b0c632db 311 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 312 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
313 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
314 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
315 (unsigned long) vcpu);
316 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 317 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 318 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
319 return 0;
320}
321
322struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
323 unsigned int id)
324{
4d47555a
CO
325 struct kvm_vcpu *vcpu;
326 int rc = -EINVAL;
327
328 if (id >= KVM_MAX_VCPUS)
329 goto out;
330
331 rc = -ENOMEM;
b0c632db 332
4d47555a 333 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
b0c632db 334 if (!vcpu)
4d47555a 335 goto out;
b0c632db 336
180c12fb
CB
337 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
338 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
339
340 if (!vcpu->arch.sie_block)
341 goto out_free_cpu;
342
343 vcpu->arch.sie_block->icpua = id;
344 BUG_ON(!kvm->arch.sca);
abf4a71e
CO
345 if (!kvm->arch.sca->cpu[id].sda)
346 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
b0c632db
HC
347 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
348 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
fc34531d 349 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
b0c632db 350
ba5c1e9b
CO
351 spin_lock_init(&vcpu->arch.local_int.lock);
352 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
353 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 354 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
355 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
356 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 357 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 358 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 359
b0c632db
HC
360 rc = kvm_vcpu_init(vcpu, kvm, id);
361 if (rc)
7b06bf2f 362 goto out_free_sie_block;
b0c632db
HC
363 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
364 vcpu->arch.sie_block);
365
b0c632db 366 return vcpu;
7b06bf2f
WY
367out_free_sie_block:
368 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
369out_free_cpu:
370 kfree(vcpu);
4d47555a 371out:
b0c632db
HC
372 return ERR_PTR(rc);
373}
374
b0c632db
HC
375int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
376{
377 /* kvm common code refers to this, but never calls it */
378 BUG();
379 return 0;
380}
381
382static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
383{
b0c632db 384 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
385 return 0;
386}
387
388int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
389{
b0c632db 390 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
391 return 0;
392}
393
394int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
395{
b0c632db 396 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
b0c632db
HC
397 return 0;
398}
399
400int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
401 struct kvm_sregs *sregs)
402{
b0c632db
HC
403 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
404 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
7eef87dc 405 restore_access_regs(vcpu->arch.guest_acrs);
b0c632db
HC
406 return 0;
407}
408
409int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
410 struct kvm_sregs *sregs)
411{
b0c632db
HC
412 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
413 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
414 return 0;
415}
416
417int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
418{
b0c632db
HC
419 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
420 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
7eef87dc 421 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
422 return 0;
423}
424
425int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
426{
b0c632db
HC
427 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
428 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
429 return 0;
430}
431
432static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
433{
434 int rc = 0;
435
9e6dabef 436 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 437 rc = -EBUSY;
d7b0b5eb
CO
438 else {
439 vcpu->run->psw_mask = psw.mask;
440 vcpu->run->psw_addr = psw.addr;
441 }
b0c632db
HC
442 return rc;
443}
444
445int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
446 struct kvm_translation *tr)
447{
448 return -EINVAL; /* not implemented yet */
449}
450
d0bfb940
JK
451int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
452 struct kvm_guest_debug *dbg)
b0c632db
HC
453{
454 return -EINVAL; /* not implemented yet */
455}
456
62d9f0db
MT
457int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
458 struct kvm_mp_state *mp_state)
459{
460 return -EINVAL; /* not implemented yet */
461}
462
463int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
464 struct kvm_mp_state *mp_state)
465{
466 return -EINVAL; /* not implemented yet */
467}
468
b0c632db
HC
469static void __vcpu_run(struct kvm_vcpu *vcpu)
470{
471 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
472
473 if (need_resched())
474 schedule();
475
71cde587
CB
476 if (test_thread_flag(TIF_MCCK_PENDING))
477 s390_handle_mcck();
478
0ff31867
CO
479 kvm_s390_deliver_pending_interrupts(vcpu);
480
b0c632db
HC
481 vcpu->arch.sie_block->icptcode = 0;
482 local_irq_disable();
483 kvm_guest_enter();
484 local_irq_enable();
485 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
486 atomic_read(&vcpu->arch.sie_block->cpuflags));
1f0d0f09
CO
487 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
488 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
489 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
490 }
b0c632db
HC
491 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
492 vcpu->arch.sie_block->icptcode);
493 local_irq_disable();
494 kvm_guest_exit();
495 local_irq_enable();
496
497 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
498}
499
500int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
501{
8f2abe6a 502 int rc;
b0c632db
HC
503 sigset_t sigsaved;
504
9ace903d 505rerun_vcpu:
b0c632db
HC
506 if (vcpu->sigset_active)
507 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
508
9e6dabef 509 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 510
ba5c1e9b
CO
511 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
512
8f2abe6a
CB
513 switch (kvm_run->exit_reason) {
514 case KVM_EXIT_S390_SIEIC:
8f2abe6a 515 case KVM_EXIT_UNKNOWN:
9ace903d 516 case KVM_EXIT_INTR:
8f2abe6a
CB
517 case KVM_EXIT_S390_RESET:
518 break;
519 default:
520 BUG();
521 }
522
d7b0b5eb
CO
523 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
524 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
525
dab4079d 526 might_fault();
8f2abe6a
CB
527
528 do {
529 __vcpu_run(vcpu);
8f2abe6a
CB
530 rc = kvm_handle_sie_intercept(vcpu);
531 } while (!signal_pending(current) && !rc);
532
9ace903d
CE
533 if (rc == SIE_INTERCEPT_RERUNVCPU)
534 goto rerun_vcpu;
535
b1d16c49
CE
536 if (signal_pending(current) && !rc) {
537 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 538 rc = -EINTR;
b1d16c49 539 }
8f2abe6a 540
b8e660b8 541 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
542 /* intercept cannot be handled in-kernel, prepare kvm-run */
543 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
544 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
545 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
546 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
547 rc = 0;
548 }
549
550 if (rc == -EREMOTE) {
551 /* intercept was handled, but userspace support is needed
552 * kvm_run has been prepared by the handler */
553 rc = 0;
554 }
b0c632db 555
d7b0b5eb
CO
556 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
557 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
558
b0c632db
HC
559 if (vcpu->sigset_active)
560 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
561
b0c632db 562 vcpu->stat.exit_userspace++;
7e8e6ab4 563 return rc;
b0c632db
HC
564}
565
092670cd 566static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
567 unsigned long n, int prefix)
568{
569 if (prefix)
570 return copy_to_guest(vcpu, guestdest, from, n);
571 else
572 return copy_to_guest_absolute(vcpu, guestdest, from, n);
573}
574
575/*
576 * store status at address
577 * we use have two special cases:
578 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
579 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
580 */
971eb77f 581int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 582{
092670cd 583 unsigned char archmode = 1;
b0c632db
HC
584 int prefix;
585
586 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
587 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
588 return -EFAULT;
589 addr = SAVE_AREA_BASE;
590 prefix = 0;
591 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
592 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
593 return -EFAULT;
594 addr = SAVE_AREA_BASE;
595 prefix = 1;
596 } else
597 prefix = 0;
598
f64ca217 599 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
600 vcpu->arch.guest_fpregs.fprs, 128, prefix))
601 return -EFAULT;
602
f64ca217 603 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
b0c632db
HC
604 vcpu->arch.guest_gprs, 128, prefix))
605 return -EFAULT;
606
f64ca217 607 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
608 &vcpu->arch.sie_block->gpsw, 16, prefix))
609 return -EFAULT;
610
f64ca217 611 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
612 &vcpu->arch.sie_block->prefix, 4, prefix))
613 return -EFAULT;
614
615 if (__guestcopy(vcpu,
f64ca217 616 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
617 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
618 return -EFAULT;
619
f64ca217 620 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
621 &vcpu->arch.sie_block->todpr, 4, prefix))
622 return -EFAULT;
623
f64ca217 624 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
625 &vcpu->arch.sie_block->cputm, 8, prefix))
626 return -EFAULT;
627
f64ca217 628 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
629 &vcpu->arch.sie_block->ckc, 8, prefix))
630 return -EFAULT;
631
f64ca217 632 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
b0c632db
HC
633 &vcpu->arch.guest_acrs, 64, prefix))
634 return -EFAULT;
635
636 if (__guestcopy(vcpu,
f64ca217 637 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
638 &vcpu->arch.sie_block->gcr, 128, prefix))
639 return -EFAULT;
640 return 0;
641}
642
b0c632db
HC
643long kvm_arch_vcpu_ioctl(struct file *filp,
644 unsigned int ioctl, unsigned long arg)
645{
646 struct kvm_vcpu *vcpu = filp->private_data;
647 void __user *argp = (void __user *)arg;
bc923cc9 648 long r;
b0c632db 649
93736624
AK
650 switch (ioctl) {
651 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
652 struct kvm_s390_interrupt s390int;
653
93736624 654 r = -EFAULT;
ba5c1e9b 655 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
656 break;
657 r = kvm_s390_inject_vcpu(vcpu, &s390int);
658 break;
ba5c1e9b 659 }
b0c632db 660 case KVM_S390_STORE_STATUS:
bc923cc9
AK
661 r = kvm_s390_vcpu_store_status(vcpu, arg);
662 break;
b0c632db
HC
663 case KVM_S390_SET_INITIAL_PSW: {
664 psw_t psw;
665
bc923cc9 666 r = -EFAULT;
b0c632db 667 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
668 break;
669 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
670 break;
b0c632db
HC
671 }
672 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
673 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
674 break;
b0c632db 675 default:
bc923cc9 676 r = -EINVAL;
b0c632db 677 }
bc923cc9 678 return r;
b0c632db
HC
679}
680
681/* Section: memory related */
f7784b8e
MT
682int kvm_arch_prepare_memory_region(struct kvm *kvm,
683 struct kvm_memory_slot *memslot,
684 struct kvm_memory_slot old,
685 struct kvm_userspace_memory_region *mem,
686 int user_alloc)
b0c632db
HC
687{
688 /* A few sanity checks. We can have exactly one memory slot which has
689 to start at guest virtual zero and which has to be located at a
690 page boundary in userland and which has to end at a page boundary.
691 The memory in userland is ok to be fragmented into various different
692 vmas. It is okay to mmap() and munmap() stuff in this slot after
693 doing this call at any time */
694
628eb9b8 695 if (mem->slot)
b0c632db
HC
696 return -EINVAL;
697
698 if (mem->guest_phys_addr)
699 return -EINVAL;
700
598841ca 701 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
702 return -EINVAL;
703
598841ca 704 if (mem->memory_size & 0xffffful)
b0c632db
HC
705 return -EINVAL;
706
2668dab7
CO
707 if (!user_alloc)
708 return -EINVAL;
709
f7784b8e
MT
710 return 0;
711}
712
713void kvm_arch_commit_memory_region(struct kvm *kvm,
714 struct kvm_userspace_memory_region *mem,
715 struct kvm_memory_slot old,
716 int user_alloc)
717{
f7850c92 718 int rc;
f7784b8e 719
598841ca
CO
720
721 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
722 mem->guest_phys_addr, mem->memory_size);
723 if (rc)
f7850c92 724 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 725 return;
b0c632db
HC
726}
727
34d4cb8f
MT
728void kvm_arch_flush_shadow(struct kvm *kvm)
729{
730}
731
b0c632db
HC
732static int __init kvm_s390_init(void)
733{
ef50f7ac 734 int ret;
0ee75bea 735 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
736 if (ret)
737 return ret;
738
739 /*
740 * guests can ask for up to 255+1 double words, we need a full page
25985edc 741 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
742 * only set facilities that are known to work in KVM.
743 */
c2f0e8c8 744 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
745 if (!facilities) {
746 kvm_exit();
747 return -ENOMEM;
748 }
14375bc4 749 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 750 facilities[0] &= 0xff00fff3f47c0000ULL;
9950f8be 751 facilities[1] &= 0x201c000000000000ULL;
ef50f7ac 752 return 0;
b0c632db
HC
753}
754
755static void __exit kvm_s390_exit(void)
756{
ef50f7ac 757 free_page((unsigned long) facilities);
b0c632db
HC
758 kvm_exit();
759}
760
761module_init(kvm_s390_init);
762module_exit(kvm_s390_exit);
This page took 0.317058 seconds and 5 git commands to generate.