KVM: s390: add parameter for KVM_CREATE_VM
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
ef50f7ac 30#include <asm/system.h>
8f2abe6a 31#include "kvm-s390.h"
b0c632db
HC
32#include "gaccess.h"
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 38 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 68 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 69 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
70 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 75 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 76 { "diagnose_44", VCPU_STAT(diagnose_44) },
b0c632db
HC
77 { NULL }
78};
79
ef50f7ac 80static unsigned long long *facilities;
b0c632db
HC
81
82/* Section: not file related */
10474ae8 83int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
84{
85 /* every s390 is virtualization enabled ;-) */
10474ae8 86 return 0;
b0c632db
HC
87}
88
89void kvm_arch_hardware_disable(void *garbage)
90{
91}
92
b0c632db
HC
93int kvm_arch_hardware_setup(void)
94{
95 return 0;
96}
97
98void kvm_arch_hardware_unsetup(void)
99{
100}
101
102void kvm_arch_check_processor_compat(void *rtn)
103{
104}
105
106int kvm_arch_init(void *opaque)
107{
108 return 0;
109}
110
111void kvm_arch_exit(void)
112{
113}
114
115/* Section: device related */
116long kvm_arch_dev_ioctl(struct file *filp,
117 unsigned int ioctl, unsigned long arg)
118{
119 if (ioctl == KVM_S390_ENABLE_SIE)
120 return s390_enable_sie();
121 return -EINVAL;
122}
123
124int kvm_dev_ioctl_check_extension(long ext)
125{
d7b0b5eb
CO
126 int r;
127
2bd0ac4e 128 switch (ext) {
d7b0b5eb 129 case KVM_CAP_S390_PSW:
b6cf8788 130 case KVM_CAP_S390_GMAP:
52e16b18 131 case KVM_CAP_SYNC_MMU:
d7b0b5eb
CO
132 r = 1;
133 break;
2bd0ac4e 134 default:
d7b0b5eb 135 r = 0;
2bd0ac4e 136 }
d7b0b5eb 137 return r;
b0c632db
HC
138}
139
140/* Section: vm related */
141/*
142 * Get (and clear) the dirty memory log for a memory slot.
143 */
144int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
145 struct kvm_dirty_log *log)
146{
147 return 0;
148}
149
150long kvm_arch_vm_ioctl(struct file *filp,
151 unsigned int ioctl, unsigned long arg)
152{
153 struct kvm *kvm = filp->private_data;
154 void __user *argp = (void __user *)arg;
155 int r;
156
157 switch (ioctl) {
ba5c1e9b
CO
158 case KVM_S390_INTERRUPT: {
159 struct kvm_s390_interrupt s390int;
160
161 r = -EFAULT;
162 if (copy_from_user(&s390int, argp, sizeof(s390int)))
163 break;
164 r = kvm_s390_inject_vm(kvm, &s390int);
165 break;
166 }
b0c632db 167 default:
367e1319 168 r = -ENOTTY;
b0c632db
HC
169 }
170
171 return r;
172}
173
e08b9637 174int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 175{
b0c632db
HC
176 int rc;
177 char debug_name[16];
178
e08b9637
CO
179 rc = -EINVAL;
180#ifdef CONFIG_KVM_S390_UCONTROL
181 if (type & ~KVM_VM_S390_UCONTROL)
182 goto out_err;
183 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
184 goto out_err;
185#else
186 if (type)
187 goto out_err;
188#endif
189
b0c632db
HC
190 rc = s390_enable_sie();
191 if (rc)
d89f5eff 192 goto out_err;
b0c632db 193
b290411a
CO
194 rc = -ENOMEM;
195
b0c632db
HC
196 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
197 if (!kvm->arch.sca)
d89f5eff 198 goto out_err;
b0c632db
HC
199
200 sprintf(debug_name, "kvm-%u", current->pid);
201
202 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
203 if (!kvm->arch.dbf)
204 goto out_nodbf;
205
ba5c1e9b
CO
206 spin_lock_init(&kvm->arch.float_int.lock);
207 INIT_LIST_HEAD(&kvm->arch.float_int.list);
208
b0c632db
HC
209 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
210 VM_EVENT(kvm, 3, "%s", "vm created");
211
e08b9637
CO
212 if (type & KVM_VM_S390_UCONTROL) {
213 kvm->arch.gmap = NULL;
214 } else {
215 kvm->arch.gmap = gmap_alloc(current->mm);
216 if (!kvm->arch.gmap)
217 goto out_nogmap;
218 }
d89f5eff 219 return 0;
598841ca
CO
220out_nogmap:
221 debug_unregister(kvm->arch.dbf);
b0c632db
HC
222out_nodbf:
223 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
224out_err:
225 return rc;
b0c632db
HC
226}
227
d329c035
CB
228void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
229{
230 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
fc34531d 231 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
abf4a71e
CO
232 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
233 (__u64) vcpu->arch.sie_block)
234 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
235 smp_mb();
d329c035 236 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 237 kvm_vcpu_uninit(vcpu);
d329c035
CB
238 kfree(vcpu);
239}
240
241static void kvm_free_vcpus(struct kvm *kvm)
242{
243 unsigned int i;
988a2cae 244 struct kvm_vcpu *vcpu;
d329c035 245
988a2cae
GN
246 kvm_for_each_vcpu(i, vcpu, kvm)
247 kvm_arch_vcpu_destroy(vcpu);
248
249 mutex_lock(&kvm->lock);
250 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
251 kvm->vcpus[i] = NULL;
252
253 atomic_set(&kvm->online_vcpus, 0);
254 mutex_unlock(&kvm->lock);
d329c035
CB
255}
256
ad8ba2cd
SY
257void kvm_arch_sync_events(struct kvm *kvm)
258{
259}
260
b0c632db
HC
261void kvm_arch_destroy_vm(struct kvm *kvm)
262{
d329c035 263 kvm_free_vcpus(kvm);
b0c632db 264 free_page((unsigned long)(kvm->arch.sca));
d329c035 265 debug_unregister(kvm->arch.dbf);
598841ca 266 gmap_free(kvm->arch.gmap);
b0c632db
HC
267}
268
269/* Section: vcpu related */
270int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
271{
598841ca 272 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
b0c632db
HC
273 return 0;
274}
275
276void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
277{
6692cef3 278 /* Nothing todo */
b0c632db
HC
279}
280
281void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
282{
283 save_fp_regs(&vcpu->arch.host_fpregs);
284 save_access_regs(vcpu->arch.host_acrs);
285 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
286 restore_fp_regs(&vcpu->arch.guest_fpregs);
287 restore_access_regs(vcpu->arch.guest_acrs);
480e5926 288 gmap_enable(vcpu->arch.gmap);
9e6dabef 289 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
290}
291
292void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
293{
9e6dabef 294 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 295 gmap_disable(vcpu->arch.gmap);
b0c632db
HC
296 save_fp_regs(&vcpu->arch.guest_fpregs);
297 save_access_regs(vcpu->arch.guest_acrs);
298 restore_fp_regs(&vcpu->arch.host_fpregs);
299 restore_access_regs(vcpu->arch.host_acrs);
300}
301
302static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
303{
304 /* this equals initial cpu reset in pop, but we don't switch to ESA */
305 vcpu->arch.sie_block->gpsw.mask = 0UL;
306 vcpu->arch.sie_block->gpsw.addr = 0UL;
307 vcpu->arch.sie_block->prefix = 0UL;
308 vcpu->arch.sie_block->ihcpu = 0xffff;
309 vcpu->arch.sie_block->cputm = 0UL;
310 vcpu->arch.sie_block->ckc = 0UL;
311 vcpu->arch.sie_block->todpr = 0;
312 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
313 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
314 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
315 vcpu->arch.guest_fpregs.fpc = 0;
316 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
317 vcpu->arch.sie_block->gbea = 1;
318}
319
320int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
321{
9e6dabef
CH
322 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
323 CPUSTAT_SM |
324 CPUSTAT_STOPPED);
fc34531d 325 vcpu->arch.sie_block->ecb = 6;
b0c632db 326 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 327 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
328 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
329 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
330 (unsigned long) vcpu);
331 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 332 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 333 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
334 return 0;
335}
336
337struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
338 unsigned int id)
339{
4d47555a
CO
340 struct kvm_vcpu *vcpu;
341 int rc = -EINVAL;
342
343 if (id >= KVM_MAX_VCPUS)
344 goto out;
345
346 rc = -ENOMEM;
b0c632db 347
4d47555a 348 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
b0c632db 349 if (!vcpu)
4d47555a 350 goto out;
b0c632db 351
180c12fb
CB
352 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
353 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
354
355 if (!vcpu->arch.sie_block)
356 goto out_free_cpu;
357
358 vcpu->arch.sie_block->icpua = id;
359 BUG_ON(!kvm->arch.sca);
abf4a71e
CO
360 if (!kvm->arch.sca->cpu[id].sda)
361 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
b0c632db
HC
362 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
363 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
fc34531d 364 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
b0c632db 365
ba5c1e9b
CO
366 spin_lock_init(&vcpu->arch.local_int.lock);
367 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
368 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 369 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
370 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
371 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 372 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 373 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 374
b0c632db
HC
375 rc = kvm_vcpu_init(vcpu, kvm, id);
376 if (rc)
7b06bf2f 377 goto out_free_sie_block;
b0c632db
HC
378 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
379 vcpu->arch.sie_block);
380
b0c632db 381 return vcpu;
7b06bf2f
WY
382out_free_sie_block:
383 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
384out_free_cpu:
385 kfree(vcpu);
4d47555a 386out:
b0c632db
HC
387 return ERR_PTR(rc);
388}
389
b0c632db
HC
390int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
391{
392 /* kvm common code refers to this, but never calls it */
393 BUG();
394 return 0;
395}
396
397static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
398{
b0c632db 399 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
400 return 0;
401}
402
403int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
404{
b0c632db 405 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
406 return 0;
407}
408
409int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
410{
b0c632db 411 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
b0c632db
HC
412 return 0;
413}
414
415int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
416 struct kvm_sregs *sregs)
417{
b0c632db
HC
418 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
419 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
7eef87dc 420 restore_access_regs(vcpu->arch.guest_acrs);
b0c632db
HC
421 return 0;
422}
423
424int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
425 struct kvm_sregs *sregs)
426{
b0c632db
HC
427 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
428 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
429 return 0;
430}
431
432int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
433{
b0c632db
HC
434 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
435 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
7eef87dc 436 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
437 return 0;
438}
439
440int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
441{
b0c632db
HC
442 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
443 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
444 return 0;
445}
446
447static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
448{
449 int rc = 0;
450
9e6dabef 451 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 452 rc = -EBUSY;
d7b0b5eb
CO
453 else {
454 vcpu->run->psw_mask = psw.mask;
455 vcpu->run->psw_addr = psw.addr;
456 }
b0c632db
HC
457 return rc;
458}
459
460int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
461 struct kvm_translation *tr)
462{
463 return -EINVAL; /* not implemented yet */
464}
465
d0bfb940
JK
466int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
467 struct kvm_guest_debug *dbg)
b0c632db
HC
468{
469 return -EINVAL; /* not implemented yet */
470}
471
62d9f0db
MT
472int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
473 struct kvm_mp_state *mp_state)
474{
475 return -EINVAL; /* not implemented yet */
476}
477
478int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
479 struct kvm_mp_state *mp_state)
480{
481 return -EINVAL; /* not implemented yet */
482}
483
b0c632db
HC
484static void __vcpu_run(struct kvm_vcpu *vcpu)
485{
486 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
487
488 if (need_resched())
489 schedule();
490
71cde587
CB
491 if (test_thread_flag(TIF_MCCK_PENDING))
492 s390_handle_mcck();
493
0ff31867
CO
494 kvm_s390_deliver_pending_interrupts(vcpu);
495
b0c632db
HC
496 vcpu->arch.sie_block->icptcode = 0;
497 local_irq_disable();
498 kvm_guest_enter();
499 local_irq_enable();
500 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
501 atomic_read(&vcpu->arch.sie_block->cpuflags));
1f0d0f09
CO
502 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
503 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
504 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
505 }
b0c632db
HC
506 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
507 vcpu->arch.sie_block->icptcode);
508 local_irq_disable();
509 kvm_guest_exit();
510 local_irq_enable();
511
512 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
513}
514
515int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
516{
8f2abe6a 517 int rc;
b0c632db
HC
518 sigset_t sigsaved;
519
9ace903d 520rerun_vcpu:
b0c632db
HC
521 if (vcpu->sigset_active)
522 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
523
9e6dabef 524 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 525
ba5c1e9b
CO
526 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
527
8f2abe6a
CB
528 switch (kvm_run->exit_reason) {
529 case KVM_EXIT_S390_SIEIC:
8f2abe6a 530 case KVM_EXIT_UNKNOWN:
9ace903d 531 case KVM_EXIT_INTR:
8f2abe6a
CB
532 case KVM_EXIT_S390_RESET:
533 break;
534 default:
535 BUG();
536 }
537
d7b0b5eb
CO
538 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
539 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
540
dab4079d 541 might_fault();
8f2abe6a
CB
542
543 do {
544 __vcpu_run(vcpu);
8f2abe6a
CB
545 rc = kvm_handle_sie_intercept(vcpu);
546 } while (!signal_pending(current) && !rc);
547
9ace903d
CE
548 if (rc == SIE_INTERCEPT_RERUNVCPU)
549 goto rerun_vcpu;
550
b1d16c49
CE
551 if (signal_pending(current) && !rc) {
552 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 553 rc = -EINTR;
b1d16c49 554 }
8f2abe6a 555
b8e660b8 556 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
557 /* intercept cannot be handled in-kernel, prepare kvm-run */
558 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
559 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
560 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
561 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
562 rc = 0;
563 }
564
565 if (rc == -EREMOTE) {
566 /* intercept was handled, but userspace support is needed
567 * kvm_run has been prepared by the handler */
568 rc = 0;
569 }
b0c632db 570
d7b0b5eb
CO
571 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
572 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
573
b0c632db
HC
574 if (vcpu->sigset_active)
575 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
576
b0c632db 577 vcpu->stat.exit_userspace++;
7e8e6ab4 578 return rc;
b0c632db
HC
579}
580
092670cd 581static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
582 unsigned long n, int prefix)
583{
584 if (prefix)
585 return copy_to_guest(vcpu, guestdest, from, n);
586 else
587 return copy_to_guest_absolute(vcpu, guestdest, from, n);
588}
589
590/*
591 * store status at address
592 * we use have two special cases:
593 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
594 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
595 */
971eb77f 596int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 597{
092670cd 598 unsigned char archmode = 1;
b0c632db
HC
599 int prefix;
600
601 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
602 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
603 return -EFAULT;
604 addr = SAVE_AREA_BASE;
605 prefix = 0;
606 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
607 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
608 return -EFAULT;
609 addr = SAVE_AREA_BASE;
610 prefix = 1;
611 } else
612 prefix = 0;
613
f64ca217 614 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
615 vcpu->arch.guest_fpregs.fprs, 128, prefix))
616 return -EFAULT;
617
f64ca217 618 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
b0c632db
HC
619 vcpu->arch.guest_gprs, 128, prefix))
620 return -EFAULT;
621
f64ca217 622 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
623 &vcpu->arch.sie_block->gpsw, 16, prefix))
624 return -EFAULT;
625
f64ca217 626 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
627 &vcpu->arch.sie_block->prefix, 4, prefix))
628 return -EFAULT;
629
630 if (__guestcopy(vcpu,
f64ca217 631 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
632 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
633 return -EFAULT;
634
f64ca217 635 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
636 &vcpu->arch.sie_block->todpr, 4, prefix))
637 return -EFAULT;
638
f64ca217 639 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
640 &vcpu->arch.sie_block->cputm, 8, prefix))
641 return -EFAULT;
642
f64ca217 643 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
644 &vcpu->arch.sie_block->ckc, 8, prefix))
645 return -EFAULT;
646
f64ca217 647 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
b0c632db
HC
648 &vcpu->arch.guest_acrs, 64, prefix))
649 return -EFAULT;
650
651 if (__guestcopy(vcpu,
f64ca217 652 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
653 &vcpu->arch.sie_block->gcr, 128, prefix))
654 return -EFAULT;
655 return 0;
656}
657
b0c632db
HC
658long kvm_arch_vcpu_ioctl(struct file *filp,
659 unsigned int ioctl, unsigned long arg)
660{
661 struct kvm_vcpu *vcpu = filp->private_data;
662 void __user *argp = (void __user *)arg;
bc923cc9 663 long r;
b0c632db 664
93736624
AK
665 switch (ioctl) {
666 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
667 struct kvm_s390_interrupt s390int;
668
93736624 669 r = -EFAULT;
ba5c1e9b 670 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
671 break;
672 r = kvm_s390_inject_vcpu(vcpu, &s390int);
673 break;
ba5c1e9b 674 }
b0c632db 675 case KVM_S390_STORE_STATUS:
bc923cc9
AK
676 r = kvm_s390_vcpu_store_status(vcpu, arg);
677 break;
b0c632db
HC
678 case KVM_S390_SET_INITIAL_PSW: {
679 psw_t psw;
680
bc923cc9 681 r = -EFAULT;
b0c632db 682 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
683 break;
684 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
685 break;
b0c632db
HC
686 }
687 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
688 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
689 break;
b0c632db 690 default:
bc923cc9 691 r = -EINVAL;
b0c632db 692 }
bc923cc9 693 return r;
b0c632db
HC
694}
695
696/* Section: memory related */
f7784b8e
MT
697int kvm_arch_prepare_memory_region(struct kvm *kvm,
698 struct kvm_memory_slot *memslot,
699 struct kvm_memory_slot old,
700 struct kvm_userspace_memory_region *mem,
701 int user_alloc)
b0c632db
HC
702{
703 /* A few sanity checks. We can have exactly one memory slot which has
704 to start at guest virtual zero and which has to be located at a
705 page boundary in userland and which has to end at a page boundary.
706 The memory in userland is ok to be fragmented into various different
707 vmas. It is okay to mmap() and munmap() stuff in this slot after
708 doing this call at any time */
709
628eb9b8 710 if (mem->slot)
b0c632db
HC
711 return -EINVAL;
712
713 if (mem->guest_phys_addr)
714 return -EINVAL;
715
598841ca 716 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
717 return -EINVAL;
718
598841ca 719 if (mem->memory_size & 0xffffful)
b0c632db
HC
720 return -EINVAL;
721
2668dab7
CO
722 if (!user_alloc)
723 return -EINVAL;
724
f7784b8e
MT
725 return 0;
726}
727
728void kvm_arch_commit_memory_region(struct kvm *kvm,
729 struct kvm_userspace_memory_region *mem,
730 struct kvm_memory_slot old,
731 int user_alloc)
732{
f7850c92 733 int rc;
f7784b8e 734
598841ca
CO
735
736 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
737 mem->guest_phys_addr, mem->memory_size);
738 if (rc)
f7850c92 739 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 740 return;
b0c632db
HC
741}
742
34d4cb8f
MT
743void kvm_arch_flush_shadow(struct kvm *kvm)
744{
745}
746
b0c632db
HC
747static int __init kvm_s390_init(void)
748{
ef50f7ac 749 int ret;
0ee75bea 750 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
751 if (ret)
752 return ret;
753
754 /*
755 * guests can ask for up to 255+1 double words, we need a full page
25985edc 756 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
757 * only set facilities that are known to work in KVM.
758 */
c2f0e8c8 759 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
760 if (!facilities) {
761 kvm_exit();
762 return -ENOMEM;
763 }
14375bc4 764 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 765 facilities[0] &= 0xff00fff3f47c0000ULL;
9950f8be 766 facilities[1] &= 0x201c000000000000ULL;
ef50f7ac 767 return 0;
b0c632db
HC
768}
769
770static void __exit kvm_s390_exit(void)
771{
ef50f7ac 772 free_page((unsigned long) facilities);
b0c632db
HC
773 kvm_exit();
774}
775
776module_init(kvm_s390_init);
777module_exit(kvm_s390_exit);
This page took 0.416448 seconds and 5 git commands to generate.