KVM: s390: ucontrol: disable in-kernel irq stack
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
ef50f7ac 30#include <asm/system.h>
8f2abe6a 31#include "kvm-s390.h"
b0c632db
HC
32#include "gaccess.h"
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 38 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 68 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 69 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
70 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 75 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 76 { "diagnose_44", VCPU_STAT(diagnose_44) },
b0c632db
HC
77 { NULL }
78};
79
ef50f7ac 80static unsigned long long *facilities;
b0c632db
HC
81
82/* Section: not file related */
10474ae8 83int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
84{
85 /* every s390 is virtualization enabled ;-) */
10474ae8 86 return 0;
b0c632db
HC
87}
88
89void kvm_arch_hardware_disable(void *garbage)
90{
91}
92
b0c632db
HC
93int kvm_arch_hardware_setup(void)
94{
95 return 0;
96}
97
98void kvm_arch_hardware_unsetup(void)
99{
100}
101
102void kvm_arch_check_processor_compat(void *rtn)
103{
104}
105
106int kvm_arch_init(void *opaque)
107{
108 return 0;
109}
110
111void kvm_arch_exit(void)
112{
113}
114
115/* Section: device related */
116long kvm_arch_dev_ioctl(struct file *filp,
117 unsigned int ioctl, unsigned long arg)
118{
119 if (ioctl == KVM_S390_ENABLE_SIE)
120 return s390_enable_sie();
121 return -EINVAL;
122}
123
124int kvm_dev_ioctl_check_extension(long ext)
125{
d7b0b5eb
CO
126 int r;
127
2bd0ac4e 128 switch (ext) {
d7b0b5eb 129 case KVM_CAP_S390_PSW:
b6cf8788 130 case KVM_CAP_S390_GMAP:
52e16b18 131 case KVM_CAP_SYNC_MMU:
d7b0b5eb
CO
132 r = 1;
133 break;
2bd0ac4e 134 default:
d7b0b5eb 135 r = 0;
2bd0ac4e 136 }
d7b0b5eb 137 return r;
b0c632db
HC
138}
139
140/* Section: vm related */
141/*
142 * Get (and clear) the dirty memory log for a memory slot.
143 */
144int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
145 struct kvm_dirty_log *log)
146{
147 return 0;
148}
149
150long kvm_arch_vm_ioctl(struct file *filp,
151 unsigned int ioctl, unsigned long arg)
152{
153 struct kvm *kvm = filp->private_data;
154 void __user *argp = (void __user *)arg;
155 int r;
156
157 switch (ioctl) {
ba5c1e9b
CO
158 case KVM_S390_INTERRUPT: {
159 struct kvm_s390_interrupt s390int;
160
161 r = -EFAULT;
162 if (copy_from_user(&s390int, argp, sizeof(s390int)))
163 break;
164 r = kvm_s390_inject_vm(kvm, &s390int);
165 break;
166 }
b0c632db 167 default:
367e1319 168 r = -ENOTTY;
b0c632db
HC
169 }
170
171 return r;
172}
173
e08b9637 174int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 175{
b0c632db
HC
176 int rc;
177 char debug_name[16];
178
e08b9637
CO
179 rc = -EINVAL;
180#ifdef CONFIG_KVM_S390_UCONTROL
181 if (type & ~KVM_VM_S390_UCONTROL)
182 goto out_err;
183 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
184 goto out_err;
185#else
186 if (type)
187 goto out_err;
188#endif
189
b0c632db
HC
190 rc = s390_enable_sie();
191 if (rc)
d89f5eff 192 goto out_err;
b0c632db 193
b290411a
CO
194 rc = -ENOMEM;
195
b0c632db
HC
196 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
197 if (!kvm->arch.sca)
d89f5eff 198 goto out_err;
b0c632db
HC
199
200 sprintf(debug_name, "kvm-%u", current->pid);
201
202 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
203 if (!kvm->arch.dbf)
204 goto out_nodbf;
205
ba5c1e9b
CO
206 spin_lock_init(&kvm->arch.float_int.lock);
207 INIT_LIST_HEAD(&kvm->arch.float_int.list);
208
b0c632db
HC
209 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
210 VM_EVENT(kvm, 3, "%s", "vm created");
211
e08b9637
CO
212 if (type & KVM_VM_S390_UCONTROL) {
213 kvm->arch.gmap = NULL;
214 } else {
215 kvm->arch.gmap = gmap_alloc(current->mm);
216 if (!kvm->arch.gmap)
217 goto out_nogmap;
218 }
d89f5eff 219 return 0;
598841ca
CO
220out_nogmap:
221 debug_unregister(kvm->arch.dbf);
b0c632db
HC
222out_nodbf:
223 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
224out_err:
225 return rc;
b0c632db
HC
226}
227
d329c035
CB
228void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
229{
230 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
fc34531d 231 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
abf4a71e
CO
232 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
233 (__u64) vcpu->arch.sie_block)
234 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
235 smp_mb();
27e0393f
CO
236
237 if (kvm_is_ucontrol(vcpu->kvm))
238 gmap_free(vcpu->arch.gmap);
239
d329c035 240 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 241 kvm_vcpu_uninit(vcpu);
d329c035
CB
242 kfree(vcpu);
243}
244
245static void kvm_free_vcpus(struct kvm *kvm)
246{
247 unsigned int i;
988a2cae 248 struct kvm_vcpu *vcpu;
d329c035 249
988a2cae
GN
250 kvm_for_each_vcpu(i, vcpu, kvm)
251 kvm_arch_vcpu_destroy(vcpu);
252
253 mutex_lock(&kvm->lock);
254 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
255 kvm->vcpus[i] = NULL;
256
257 atomic_set(&kvm->online_vcpus, 0);
258 mutex_unlock(&kvm->lock);
d329c035
CB
259}
260
ad8ba2cd
SY
261void kvm_arch_sync_events(struct kvm *kvm)
262{
263}
264
b0c632db
HC
265void kvm_arch_destroy_vm(struct kvm *kvm)
266{
d329c035 267 kvm_free_vcpus(kvm);
b0c632db 268 free_page((unsigned long)(kvm->arch.sca));
d329c035 269 debug_unregister(kvm->arch.dbf);
27e0393f
CO
270 if (!kvm_is_ucontrol(kvm))
271 gmap_free(kvm->arch.gmap);
b0c632db
HC
272}
273
274/* Section: vcpu related */
275int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
276{
27e0393f
CO
277 if (kvm_is_ucontrol(vcpu->kvm)) {
278 vcpu->arch.gmap = gmap_alloc(current->mm);
279 if (!vcpu->arch.gmap)
280 return -ENOMEM;
281 return 0;
282 }
283
598841ca 284 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
b0c632db
HC
285 return 0;
286}
287
288void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
289{
6692cef3 290 /* Nothing todo */
b0c632db
HC
291}
292
293void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
294{
295 save_fp_regs(&vcpu->arch.host_fpregs);
296 save_access_regs(vcpu->arch.host_acrs);
297 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
298 restore_fp_regs(&vcpu->arch.guest_fpregs);
299 restore_access_regs(vcpu->arch.guest_acrs);
480e5926 300 gmap_enable(vcpu->arch.gmap);
9e6dabef 301 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
302}
303
304void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
305{
9e6dabef 306 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 307 gmap_disable(vcpu->arch.gmap);
b0c632db
HC
308 save_fp_regs(&vcpu->arch.guest_fpregs);
309 save_access_regs(vcpu->arch.guest_acrs);
310 restore_fp_regs(&vcpu->arch.host_fpregs);
311 restore_access_regs(vcpu->arch.host_acrs);
312}
313
314static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
315{
316 /* this equals initial cpu reset in pop, but we don't switch to ESA */
317 vcpu->arch.sie_block->gpsw.mask = 0UL;
318 vcpu->arch.sie_block->gpsw.addr = 0UL;
319 vcpu->arch.sie_block->prefix = 0UL;
320 vcpu->arch.sie_block->ihcpu = 0xffff;
321 vcpu->arch.sie_block->cputm = 0UL;
322 vcpu->arch.sie_block->ckc = 0UL;
323 vcpu->arch.sie_block->todpr = 0;
324 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
325 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
326 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
327 vcpu->arch.guest_fpregs.fpc = 0;
328 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
329 vcpu->arch.sie_block->gbea = 1;
330}
331
332int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
333{
9e6dabef
CH
334 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
335 CPUSTAT_SM |
336 CPUSTAT_STOPPED);
fc34531d 337 vcpu->arch.sie_block->ecb = 6;
b0c632db 338 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 339 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
340 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
341 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
342 (unsigned long) vcpu);
343 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 344 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 345 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
346 return 0;
347}
348
349struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
350 unsigned int id)
351{
4d47555a
CO
352 struct kvm_vcpu *vcpu;
353 int rc = -EINVAL;
354
355 if (id >= KVM_MAX_VCPUS)
356 goto out;
357
358 rc = -ENOMEM;
b0c632db 359
4d47555a 360 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
b0c632db 361 if (!vcpu)
4d47555a 362 goto out;
b0c632db 363
180c12fb
CB
364 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
365 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
366
367 if (!vcpu->arch.sie_block)
368 goto out_free_cpu;
369
370 vcpu->arch.sie_block->icpua = id;
371 BUG_ON(!kvm->arch.sca);
abf4a71e
CO
372 if (!kvm->arch.sca->cpu[id].sda)
373 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
b0c632db
HC
374 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
375 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
fc34531d 376 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
b0c632db 377
ba5c1e9b
CO
378 spin_lock_init(&vcpu->arch.local_int.lock);
379 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
380 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 381 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
382 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
383 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 384 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 385 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 386
b0c632db
HC
387 rc = kvm_vcpu_init(vcpu, kvm, id);
388 if (rc)
7b06bf2f 389 goto out_free_sie_block;
b0c632db
HC
390 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
391 vcpu->arch.sie_block);
392
b0c632db 393 return vcpu;
7b06bf2f
WY
394out_free_sie_block:
395 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
396out_free_cpu:
397 kfree(vcpu);
4d47555a 398out:
b0c632db
HC
399 return ERR_PTR(rc);
400}
401
b0c632db
HC
402int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
403{
404 /* kvm common code refers to this, but never calls it */
405 BUG();
406 return 0;
407}
408
409static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
410{
b0c632db 411 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
412 return 0;
413}
414
415int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
416{
b0c632db 417 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
418 return 0;
419}
420
421int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
422{
b0c632db 423 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
b0c632db
HC
424 return 0;
425}
426
427int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
428 struct kvm_sregs *sregs)
429{
b0c632db
HC
430 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
431 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
7eef87dc 432 restore_access_regs(vcpu->arch.guest_acrs);
b0c632db
HC
433 return 0;
434}
435
436int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
437 struct kvm_sregs *sregs)
438{
b0c632db
HC
439 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
440 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
441 return 0;
442}
443
444int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
445{
b0c632db
HC
446 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
447 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
7eef87dc 448 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
449 return 0;
450}
451
452int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
453{
b0c632db
HC
454 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
455 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
456 return 0;
457}
458
459static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
460{
461 int rc = 0;
462
9e6dabef 463 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 464 rc = -EBUSY;
d7b0b5eb
CO
465 else {
466 vcpu->run->psw_mask = psw.mask;
467 vcpu->run->psw_addr = psw.addr;
468 }
b0c632db
HC
469 return rc;
470}
471
472int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
473 struct kvm_translation *tr)
474{
475 return -EINVAL; /* not implemented yet */
476}
477
d0bfb940
JK
478int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
479 struct kvm_guest_debug *dbg)
b0c632db
HC
480{
481 return -EINVAL; /* not implemented yet */
482}
483
62d9f0db
MT
484int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
485 struct kvm_mp_state *mp_state)
486{
487 return -EINVAL; /* not implemented yet */
488}
489
490int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
491 struct kvm_mp_state *mp_state)
492{
493 return -EINVAL; /* not implemented yet */
494}
495
e168bf8d 496static int __vcpu_run(struct kvm_vcpu *vcpu)
b0c632db 497{
e168bf8d
CO
498 int rc;
499
b0c632db
HC
500 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
501
502 if (need_resched())
503 schedule();
504
71cde587
CB
505 if (test_thread_flag(TIF_MCCK_PENDING))
506 s390_handle_mcck();
507
d6b6d166
CO
508 if (!kvm_is_ucontrol(vcpu->kvm))
509 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 510
b0c632db
HC
511 vcpu->arch.sie_block->icptcode = 0;
512 local_irq_disable();
513 kvm_guest_enter();
514 local_irq_enable();
515 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
516 atomic_read(&vcpu->arch.sie_block->cpuflags));
e168bf8d
CO
517 rc = sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
518 if (rc) {
519 if (kvm_is_ucontrol(vcpu->kvm)) {
520 rc = SIE_INTERCEPT_UCONTROL;
521 } else {
522 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
523 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
524 rc = 0;
525 }
1f0d0f09 526 }
b0c632db
HC
527 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
528 vcpu->arch.sie_block->icptcode);
529 local_irq_disable();
530 kvm_guest_exit();
531 local_irq_enable();
532
533 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
e168bf8d 534 return rc;
b0c632db
HC
535}
536
537int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
538{
8f2abe6a 539 int rc;
b0c632db
HC
540 sigset_t sigsaved;
541
9ace903d 542rerun_vcpu:
b0c632db
HC
543 if (vcpu->sigset_active)
544 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
545
9e6dabef 546 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 547
ba5c1e9b
CO
548 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
549
8f2abe6a
CB
550 switch (kvm_run->exit_reason) {
551 case KVM_EXIT_S390_SIEIC:
8f2abe6a 552 case KVM_EXIT_UNKNOWN:
9ace903d 553 case KVM_EXIT_INTR:
8f2abe6a 554 case KVM_EXIT_S390_RESET:
e168bf8d 555 case KVM_EXIT_S390_UCONTROL:
8f2abe6a
CB
556 break;
557 default:
558 BUG();
559 }
560
d7b0b5eb
CO
561 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
562 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
563
dab4079d 564 might_fault();
8f2abe6a
CB
565
566 do {
e168bf8d
CO
567 rc = __vcpu_run(vcpu);
568 if (rc)
569 break;
c0d744a9
CO
570 if (kvm_is_ucontrol(vcpu->kvm))
571 rc = -EOPNOTSUPP;
572 else
573 rc = kvm_handle_sie_intercept(vcpu);
8f2abe6a
CB
574 } while (!signal_pending(current) && !rc);
575
9ace903d
CE
576 if (rc == SIE_INTERCEPT_RERUNVCPU)
577 goto rerun_vcpu;
578
b1d16c49
CE
579 if (signal_pending(current) && !rc) {
580 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 581 rc = -EINTR;
b1d16c49 582 }
8f2abe6a 583
e168bf8d
CO
584#ifdef CONFIG_KVM_S390_UCONTROL
585 if (rc == SIE_INTERCEPT_UCONTROL) {
586 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
587 kvm_run->s390_ucontrol.trans_exc_code =
588 current->thread.gmap_addr;
589 kvm_run->s390_ucontrol.pgm_code = 0x10;
590 rc = 0;
591 }
592#endif
593
b8e660b8 594 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
595 /* intercept cannot be handled in-kernel, prepare kvm-run */
596 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
597 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
598 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
599 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
600 rc = 0;
601 }
602
603 if (rc == -EREMOTE) {
604 /* intercept was handled, but userspace support is needed
605 * kvm_run has been prepared by the handler */
606 rc = 0;
607 }
b0c632db 608
d7b0b5eb
CO
609 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
610 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
611
b0c632db
HC
612 if (vcpu->sigset_active)
613 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
614
b0c632db 615 vcpu->stat.exit_userspace++;
7e8e6ab4 616 return rc;
b0c632db
HC
617}
618
092670cd 619static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
620 unsigned long n, int prefix)
621{
622 if (prefix)
623 return copy_to_guest(vcpu, guestdest, from, n);
624 else
625 return copy_to_guest_absolute(vcpu, guestdest, from, n);
626}
627
628/*
629 * store status at address
630 * we use have two special cases:
631 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
632 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
633 */
971eb77f 634int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 635{
092670cd 636 unsigned char archmode = 1;
b0c632db
HC
637 int prefix;
638
639 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
640 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
641 return -EFAULT;
642 addr = SAVE_AREA_BASE;
643 prefix = 0;
644 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
645 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
646 return -EFAULT;
647 addr = SAVE_AREA_BASE;
648 prefix = 1;
649 } else
650 prefix = 0;
651
f64ca217 652 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
653 vcpu->arch.guest_fpregs.fprs, 128, prefix))
654 return -EFAULT;
655
f64ca217 656 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
b0c632db
HC
657 vcpu->arch.guest_gprs, 128, prefix))
658 return -EFAULT;
659
f64ca217 660 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
661 &vcpu->arch.sie_block->gpsw, 16, prefix))
662 return -EFAULT;
663
f64ca217 664 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
665 &vcpu->arch.sie_block->prefix, 4, prefix))
666 return -EFAULT;
667
668 if (__guestcopy(vcpu,
f64ca217 669 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
670 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
671 return -EFAULT;
672
f64ca217 673 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
674 &vcpu->arch.sie_block->todpr, 4, prefix))
675 return -EFAULT;
676
f64ca217 677 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
678 &vcpu->arch.sie_block->cputm, 8, prefix))
679 return -EFAULT;
680
f64ca217 681 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
682 &vcpu->arch.sie_block->ckc, 8, prefix))
683 return -EFAULT;
684
f64ca217 685 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
b0c632db
HC
686 &vcpu->arch.guest_acrs, 64, prefix))
687 return -EFAULT;
688
689 if (__guestcopy(vcpu,
f64ca217 690 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
691 &vcpu->arch.sie_block->gcr, 128, prefix))
692 return -EFAULT;
693 return 0;
694}
695
b0c632db
HC
696long kvm_arch_vcpu_ioctl(struct file *filp,
697 unsigned int ioctl, unsigned long arg)
698{
699 struct kvm_vcpu *vcpu = filp->private_data;
700 void __user *argp = (void __user *)arg;
bc923cc9 701 long r;
b0c632db 702
93736624
AK
703 switch (ioctl) {
704 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
705 struct kvm_s390_interrupt s390int;
706
93736624 707 r = -EFAULT;
ba5c1e9b 708 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
709 break;
710 r = kvm_s390_inject_vcpu(vcpu, &s390int);
711 break;
ba5c1e9b 712 }
b0c632db 713 case KVM_S390_STORE_STATUS:
bc923cc9
AK
714 r = kvm_s390_vcpu_store_status(vcpu, arg);
715 break;
b0c632db
HC
716 case KVM_S390_SET_INITIAL_PSW: {
717 psw_t psw;
718
bc923cc9 719 r = -EFAULT;
b0c632db 720 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
721 break;
722 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
723 break;
b0c632db
HC
724 }
725 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
726 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
727 break;
27e0393f
CO
728#ifdef CONFIG_KVM_S390_UCONTROL
729 case KVM_S390_UCAS_MAP: {
730 struct kvm_s390_ucas_mapping ucasmap;
731
732 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
733 r = -EFAULT;
734 break;
735 }
736
737 if (!kvm_is_ucontrol(vcpu->kvm)) {
738 r = -EINVAL;
739 break;
740 }
741
742 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
743 ucasmap.vcpu_addr, ucasmap.length);
744 break;
745 }
746 case KVM_S390_UCAS_UNMAP: {
747 struct kvm_s390_ucas_mapping ucasmap;
748
749 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
750 r = -EFAULT;
751 break;
752 }
753
754 if (!kvm_is_ucontrol(vcpu->kvm)) {
755 r = -EINVAL;
756 break;
757 }
758
759 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
760 ucasmap.length);
761 break;
762 }
763#endif
b0c632db 764 default:
bc923cc9 765 r = -EINVAL;
b0c632db 766 }
bc923cc9 767 return r;
b0c632db
HC
768}
769
5b1c1493
CO
770int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
771{
772#ifdef CONFIG_KVM_S390_UCONTROL
773 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
774 && (kvm_is_ucontrol(vcpu->kvm))) {
775 vmf->page = virt_to_page(vcpu->arch.sie_block);
776 get_page(vmf->page);
777 return 0;
778 }
779#endif
780 return VM_FAULT_SIGBUS;
781}
782
b0c632db 783/* Section: memory related */
f7784b8e
MT
784int kvm_arch_prepare_memory_region(struct kvm *kvm,
785 struct kvm_memory_slot *memslot,
786 struct kvm_memory_slot old,
787 struct kvm_userspace_memory_region *mem,
788 int user_alloc)
b0c632db
HC
789{
790 /* A few sanity checks. We can have exactly one memory slot which has
791 to start at guest virtual zero and which has to be located at a
792 page boundary in userland and which has to end at a page boundary.
793 The memory in userland is ok to be fragmented into various different
794 vmas. It is okay to mmap() and munmap() stuff in this slot after
795 doing this call at any time */
796
628eb9b8 797 if (mem->slot)
b0c632db
HC
798 return -EINVAL;
799
800 if (mem->guest_phys_addr)
801 return -EINVAL;
802
598841ca 803 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
804 return -EINVAL;
805
598841ca 806 if (mem->memory_size & 0xffffful)
b0c632db
HC
807 return -EINVAL;
808
2668dab7
CO
809 if (!user_alloc)
810 return -EINVAL;
811
f7784b8e
MT
812 return 0;
813}
814
815void kvm_arch_commit_memory_region(struct kvm *kvm,
816 struct kvm_userspace_memory_region *mem,
817 struct kvm_memory_slot old,
818 int user_alloc)
819{
f7850c92 820 int rc;
f7784b8e 821
598841ca
CO
822
823 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
824 mem->guest_phys_addr, mem->memory_size);
825 if (rc)
f7850c92 826 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 827 return;
b0c632db
HC
828}
829
34d4cb8f
MT
830void kvm_arch_flush_shadow(struct kvm *kvm)
831{
832}
833
b0c632db
HC
834static int __init kvm_s390_init(void)
835{
ef50f7ac 836 int ret;
0ee75bea 837 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
838 if (ret)
839 return ret;
840
841 /*
842 * guests can ask for up to 255+1 double words, we need a full page
25985edc 843 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
844 * only set facilities that are known to work in KVM.
845 */
c2f0e8c8 846 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
847 if (!facilities) {
848 kvm_exit();
849 return -ENOMEM;
850 }
14375bc4 851 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 852 facilities[0] &= 0xff00fff3f47c0000ULL;
9950f8be 853 facilities[1] &= 0x201c000000000000ULL;
ef50f7ac 854 return 0;
b0c632db
HC
855}
856
857static void __exit kvm_s390_exit(void)
858{
ef50f7ac 859 free_page((unsigned long) facilities);
b0c632db
HC
860 kvm_exit();
861}
862
863module_init(kvm_s390_init);
864module_exit(kvm_s390_exit);
This page took 0.333476 seconds and 5 git commands to generate.