KVM: s390: ucontrol: per vcpu address spaces
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
ef50f7ac 30#include <asm/system.h>
8f2abe6a 31#include "kvm-s390.h"
b0c632db
HC
32#include "gaccess.h"
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 38 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 68 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 69 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
70 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 75 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 76 { "diagnose_44", VCPU_STAT(diagnose_44) },
b0c632db
HC
77 { NULL }
78};
79
ef50f7ac 80static unsigned long long *facilities;
b0c632db
HC
81
82/* Section: not file related */
10474ae8 83int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
84{
85 /* every s390 is virtualization enabled ;-) */
10474ae8 86 return 0;
b0c632db
HC
87}
88
89void kvm_arch_hardware_disable(void *garbage)
90{
91}
92
b0c632db
HC
93int kvm_arch_hardware_setup(void)
94{
95 return 0;
96}
97
98void kvm_arch_hardware_unsetup(void)
99{
100}
101
102void kvm_arch_check_processor_compat(void *rtn)
103{
104}
105
106int kvm_arch_init(void *opaque)
107{
108 return 0;
109}
110
111void kvm_arch_exit(void)
112{
113}
114
115/* Section: device related */
116long kvm_arch_dev_ioctl(struct file *filp,
117 unsigned int ioctl, unsigned long arg)
118{
119 if (ioctl == KVM_S390_ENABLE_SIE)
120 return s390_enable_sie();
121 return -EINVAL;
122}
123
124int kvm_dev_ioctl_check_extension(long ext)
125{
d7b0b5eb
CO
126 int r;
127
2bd0ac4e 128 switch (ext) {
d7b0b5eb 129 case KVM_CAP_S390_PSW:
b6cf8788 130 case KVM_CAP_S390_GMAP:
52e16b18 131 case KVM_CAP_SYNC_MMU:
d7b0b5eb
CO
132 r = 1;
133 break;
2bd0ac4e 134 default:
d7b0b5eb 135 r = 0;
2bd0ac4e 136 }
d7b0b5eb 137 return r;
b0c632db
HC
138}
139
140/* Section: vm related */
141/*
142 * Get (and clear) the dirty memory log for a memory slot.
143 */
144int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
145 struct kvm_dirty_log *log)
146{
147 return 0;
148}
149
150long kvm_arch_vm_ioctl(struct file *filp,
151 unsigned int ioctl, unsigned long arg)
152{
153 struct kvm *kvm = filp->private_data;
154 void __user *argp = (void __user *)arg;
155 int r;
156
157 switch (ioctl) {
ba5c1e9b
CO
158 case KVM_S390_INTERRUPT: {
159 struct kvm_s390_interrupt s390int;
160
161 r = -EFAULT;
162 if (copy_from_user(&s390int, argp, sizeof(s390int)))
163 break;
164 r = kvm_s390_inject_vm(kvm, &s390int);
165 break;
166 }
b0c632db 167 default:
367e1319 168 r = -ENOTTY;
b0c632db
HC
169 }
170
171 return r;
172}
173
e08b9637 174int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 175{
b0c632db
HC
176 int rc;
177 char debug_name[16];
178
e08b9637
CO
179 rc = -EINVAL;
180#ifdef CONFIG_KVM_S390_UCONTROL
181 if (type & ~KVM_VM_S390_UCONTROL)
182 goto out_err;
183 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
184 goto out_err;
185#else
186 if (type)
187 goto out_err;
188#endif
189
b0c632db
HC
190 rc = s390_enable_sie();
191 if (rc)
d89f5eff 192 goto out_err;
b0c632db 193
b290411a
CO
194 rc = -ENOMEM;
195
b0c632db
HC
196 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
197 if (!kvm->arch.sca)
d89f5eff 198 goto out_err;
b0c632db
HC
199
200 sprintf(debug_name, "kvm-%u", current->pid);
201
202 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
203 if (!kvm->arch.dbf)
204 goto out_nodbf;
205
ba5c1e9b
CO
206 spin_lock_init(&kvm->arch.float_int.lock);
207 INIT_LIST_HEAD(&kvm->arch.float_int.list);
208
b0c632db
HC
209 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
210 VM_EVENT(kvm, 3, "%s", "vm created");
211
e08b9637
CO
212 if (type & KVM_VM_S390_UCONTROL) {
213 kvm->arch.gmap = NULL;
214 } else {
215 kvm->arch.gmap = gmap_alloc(current->mm);
216 if (!kvm->arch.gmap)
217 goto out_nogmap;
218 }
d89f5eff 219 return 0;
598841ca
CO
220out_nogmap:
221 debug_unregister(kvm->arch.dbf);
b0c632db
HC
222out_nodbf:
223 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
224out_err:
225 return rc;
b0c632db
HC
226}
227
d329c035
CB
228void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
229{
230 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
fc34531d 231 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
abf4a71e
CO
232 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
233 (__u64) vcpu->arch.sie_block)
234 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
235 smp_mb();
27e0393f
CO
236
237 if (kvm_is_ucontrol(vcpu->kvm))
238 gmap_free(vcpu->arch.gmap);
239
d329c035 240 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 241 kvm_vcpu_uninit(vcpu);
d329c035
CB
242 kfree(vcpu);
243}
244
245static void kvm_free_vcpus(struct kvm *kvm)
246{
247 unsigned int i;
988a2cae 248 struct kvm_vcpu *vcpu;
d329c035 249
988a2cae
GN
250 kvm_for_each_vcpu(i, vcpu, kvm)
251 kvm_arch_vcpu_destroy(vcpu);
252
253 mutex_lock(&kvm->lock);
254 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
255 kvm->vcpus[i] = NULL;
256
257 atomic_set(&kvm->online_vcpus, 0);
258 mutex_unlock(&kvm->lock);
d329c035
CB
259}
260
ad8ba2cd
SY
261void kvm_arch_sync_events(struct kvm *kvm)
262{
263}
264
b0c632db
HC
265void kvm_arch_destroy_vm(struct kvm *kvm)
266{
d329c035 267 kvm_free_vcpus(kvm);
b0c632db 268 free_page((unsigned long)(kvm->arch.sca));
d329c035 269 debug_unregister(kvm->arch.dbf);
27e0393f
CO
270 if (!kvm_is_ucontrol(kvm))
271 gmap_free(kvm->arch.gmap);
b0c632db
HC
272}
273
274/* Section: vcpu related */
275int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
276{
27e0393f
CO
277 if (kvm_is_ucontrol(vcpu->kvm)) {
278 vcpu->arch.gmap = gmap_alloc(current->mm);
279 if (!vcpu->arch.gmap)
280 return -ENOMEM;
281 return 0;
282 }
283
598841ca 284 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
b0c632db
HC
285 return 0;
286}
287
288void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
289{
6692cef3 290 /* Nothing todo */
b0c632db
HC
291}
292
293void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
294{
295 save_fp_regs(&vcpu->arch.host_fpregs);
296 save_access_regs(vcpu->arch.host_acrs);
297 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
298 restore_fp_regs(&vcpu->arch.guest_fpregs);
299 restore_access_regs(vcpu->arch.guest_acrs);
480e5926 300 gmap_enable(vcpu->arch.gmap);
9e6dabef 301 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
302}
303
304void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
305{
9e6dabef 306 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 307 gmap_disable(vcpu->arch.gmap);
b0c632db
HC
308 save_fp_regs(&vcpu->arch.guest_fpregs);
309 save_access_regs(vcpu->arch.guest_acrs);
310 restore_fp_regs(&vcpu->arch.host_fpregs);
311 restore_access_regs(vcpu->arch.host_acrs);
312}
313
314static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
315{
316 /* this equals initial cpu reset in pop, but we don't switch to ESA */
317 vcpu->arch.sie_block->gpsw.mask = 0UL;
318 vcpu->arch.sie_block->gpsw.addr = 0UL;
319 vcpu->arch.sie_block->prefix = 0UL;
320 vcpu->arch.sie_block->ihcpu = 0xffff;
321 vcpu->arch.sie_block->cputm = 0UL;
322 vcpu->arch.sie_block->ckc = 0UL;
323 vcpu->arch.sie_block->todpr = 0;
324 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
325 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
326 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
327 vcpu->arch.guest_fpregs.fpc = 0;
328 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
329 vcpu->arch.sie_block->gbea = 1;
330}
331
332int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
333{
9e6dabef
CH
334 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
335 CPUSTAT_SM |
336 CPUSTAT_STOPPED);
fc34531d 337 vcpu->arch.sie_block->ecb = 6;
b0c632db 338 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 339 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
340 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
341 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
342 (unsigned long) vcpu);
343 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 344 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 345 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
346 return 0;
347}
348
349struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
350 unsigned int id)
351{
4d47555a
CO
352 struct kvm_vcpu *vcpu;
353 int rc = -EINVAL;
354
355 if (id >= KVM_MAX_VCPUS)
356 goto out;
357
358 rc = -ENOMEM;
b0c632db 359
4d47555a 360 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
b0c632db 361 if (!vcpu)
4d47555a 362 goto out;
b0c632db 363
180c12fb
CB
364 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
365 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
366
367 if (!vcpu->arch.sie_block)
368 goto out_free_cpu;
369
370 vcpu->arch.sie_block->icpua = id;
371 BUG_ON(!kvm->arch.sca);
abf4a71e
CO
372 if (!kvm->arch.sca->cpu[id].sda)
373 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
b0c632db
HC
374 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
375 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
fc34531d 376 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
b0c632db 377
ba5c1e9b
CO
378 spin_lock_init(&vcpu->arch.local_int.lock);
379 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
380 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 381 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
382 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
383 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 384 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 385 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 386
b0c632db
HC
387 rc = kvm_vcpu_init(vcpu, kvm, id);
388 if (rc)
7b06bf2f 389 goto out_free_sie_block;
b0c632db
HC
390 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
391 vcpu->arch.sie_block);
392
b0c632db 393 return vcpu;
7b06bf2f
WY
394out_free_sie_block:
395 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
396out_free_cpu:
397 kfree(vcpu);
4d47555a 398out:
b0c632db
HC
399 return ERR_PTR(rc);
400}
401
b0c632db
HC
402int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
403{
404 /* kvm common code refers to this, but never calls it */
405 BUG();
406 return 0;
407}
408
409static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
410{
b0c632db 411 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
412 return 0;
413}
414
415int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
416{
b0c632db 417 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
418 return 0;
419}
420
421int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
422{
b0c632db 423 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
b0c632db
HC
424 return 0;
425}
426
427int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
428 struct kvm_sregs *sregs)
429{
b0c632db
HC
430 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
431 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
7eef87dc 432 restore_access_regs(vcpu->arch.guest_acrs);
b0c632db
HC
433 return 0;
434}
435
436int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
437 struct kvm_sregs *sregs)
438{
b0c632db
HC
439 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
440 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
441 return 0;
442}
443
444int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
445{
b0c632db
HC
446 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
447 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
7eef87dc 448 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
449 return 0;
450}
451
452int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
453{
b0c632db
HC
454 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
455 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
456 return 0;
457}
458
459static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
460{
461 int rc = 0;
462
9e6dabef 463 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 464 rc = -EBUSY;
d7b0b5eb
CO
465 else {
466 vcpu->run->psw_mask = psw.mask;
467 vcpu->run->psw_addr = psw.addr;
468 }
b0c632db
HC
469 return rc;
470}
471
472int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
473 struct kvm_translation *tr)
474{
475 return -EINVAL; /* not implemented yet */
476}
477
d0bfb940
JK
478int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
479 struct kvm_guest_debug *dbg)
b0c632db
HC
480{
481 return -EINVAL; /* not implemented yet */
482}
483
62d9f0db
MT
484int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
485 struct kvm_mp_state *mp_state)
486{
487 return -EINVAL; /* not implemented yet */
488}
489
490int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
491 struct kvm_mp_state *mp_state)
492{
493 return -EINVAL; /* not implemented yet */
494}
495
b0c632db
HC
496static void __vcpu_run(struct kvm_vcpu *vcpu)
497{
498 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
499
500 if (need_resched())
501 schedule();
502
71cde587
CB
503 if (test_thread_flag(TIF_MCCK_PENDING))
504 s390_handle_mcck();
505
0ff31867
CO
506 kvm_s390_deliver_pending_interrupts(vcpu);
507
b0c632db
HC
508 vcpu->arch.sie_block->icptcode = 0;
509 local_irq_disable();
510 kvm_guest_enter();
511 local_irq_enable();
512 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
513 atomic_read(&vcpu->arch.sie_block->cpuflags));
1f0d0f09
CO
514 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
515 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
516 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
517 }
b0c632db
HC
518 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
519 vcpu->arch.sie_block->icptcode);
520 local_irq_disable();
521 kvm_guest_exit();
522 local_irq_enable();
523
524 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
525}
526
527int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
528{
8f2abe6a 529 int rc;
b0c632db
HC
530 sigset_t sigsaved;
531
9ace903d 532rerun_vcpu:
b0c632db
HC
533 if (vcpu->sigset_active)
534 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
535
9e6dabef 536 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 537
ba5c1e9b
CO
538 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
539
8f2abe6a
CB
540 switch (kvm_run->exit_reason) {
541 case KVM_EXIT_S390_SIEIC:
8f2abe6a 542 case KVM_EXIT_UNKNOWN:
9ace903d 543 case KVM_EXIT_INTR:
8f2abe6a
CB
544 case KVM_EXIT_S390_RESET:
545 break;
546 default:
547 BUG();
548 }
549
d7b0b5eb
CO
550 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
551 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
552
dab4079d 553 might_fault();
8f2abe6a
CB
554
555 do {
556 __vcpu_run(vcpu);
8f2abe6a
CB
557 rc = kvm_handle_sie_intercept(vcpu);
558 } while (!signal_pending(current) && !rc);
559
9ace903d
CE
560 if (rc == SIE_INTERCEPT_RERUNVCPU)
561 goto rerun_vcpu;
562
b1d16c49
CE
563 if (signal_pending(current) && !rc) {
564 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 565 rc = -EINTR;
b1d16c49 566 }
8f2abe6a 567
b8e660b8 568 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
569 /* intercept cannot be handled in-kernel, prepare kvm-run */
570 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
571 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
572 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
573 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
574 rc = 0;
575 }
576
577 if (rc == -EREMOTE) {
578 /* intercept was handled, but userspace support is needed
579 * kvm_run has been prepared by the handler */
580 rc = 0;
581 }
b0c632db 582
d7b0b5eb
CO
583 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
584 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
585
b0c632db
HC
586 if (vcpu->sigset_active)
587 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
588
b0c632db 589 vcpu->stat.exit_userspace++;
7e8e6ab4 590 return rc;
b0c632db
HC
591}
592
092670cd 593static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
594 unsigned long n, int prefix)
595{
596 if (prefix)
597 return copy_to_guest(vcpu, guestdest, from, n);
598 else
599 return copy_to_guest_absolute(vcpu, guestdest, from, n);
600}
601
602/*
603 * store status at address
604 * we use have two special cases:
605 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
606 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
607 */
971eb77f 608int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 609{
092670cd 610 unsigned char archmode = 1;
b0c632db
HC
611 int prefix;
612
613 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
614 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
615 return -EFAULT;
616 addr = SAVE_AREA_BASE;
617 prefix = 0;
618 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
619 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
620 return -EFAULT;
621 addr = SAVE_AREA_BASE;
622 prefix = 1;
623 } else
624 prefix = 0;
625
f64ca217 626 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
627 vcpu->arch.guest_fpregs.fprs, 128, prefix))
628 return -EFAULT;
629
f64ca217 630 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
b0c632db
HC
631 vcpu->arch.guest_gprs, 128, prefix))
632 return -EFAULT;
633
f64ca217 634 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
635 &vcpu->arch.sie_block->gpsw, 16, prefix))
636 return -EFAULT;
637
f64ca217 638 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
639 &vcpu->arch.sie_block->prefix, 4, prefix))
640 return -EFAULT;
641
642 if (__guestcopy(vcpu,
f64ca217 643 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
644 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
645 return -EFAULT;
646
f64ca217 647 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
648 &vcpu->arch.sie_block->todpr, 4, prefix))
649 return -EFAULT;
650
f64ca217 651 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
652 &vcpu->arch.sie_block->cputm, 8, prefix))
653 return -EFAULT;
654
f64ca217 655 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
656 &vcpu->arch.sie_block->ckc, 8, prefix))
657 return -EFAULT;
658
f64ca217 659 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
b0c632db
HC
660 &vcpu->arch.guest_acrs, 64, prefix))
661 return -EFAULT;
662
663 if (__guestcopy(vcpu,
f64ca217 664 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
665 &vcpu->arch.sie_block->gcr, 128, prefix))
666 return -EFAULT;
667 return 0;
668}
669
b0c632db
HC
670long kvm_arch_vcpu_ioctl(struct file *filp,
671 unsigned int ioctl, unsigned long arg)
672{
673 struct kvm_vcpu *vcpu = filp->private_data;
674 void __user *argp = (void __user *)arg;
bc923cc9 675 long r;
b0c632db 676
93736624
AK
677 switch (ioctl) {
678 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
679 struct kvm_s390_interrupt s390int;
680
93736624 681 r = -EFAULT;
ba5c1e9b 682 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
683 break;
684 r = kvm_s390_inject_vcpu(vcpu, &s390int);
685 break;
ba5c1e9b 686 }
b0c632db 687 case KVM_S390_STORE_STATUS:
bc923cc9
AK
688 r = kvm_s390_vcpu_store_status(vcpu, arg);
689 break;
b0c632db
HC
690 case KVM_S390_SET_INITIAL_PSW: {
691 psw_t psw;
692
bc923cc9 693 r = -EFAULT;
b0c632db 694 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
695 break;
696 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
697 break;
b0c632db
HC
698 }
699 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
700 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
701 break;
27e0393f
CO
702#ifdef CONFIG_KVM_S390_UCONTROL
703 case KVM_S390_UCAS_MAP: {
704 struct kvm_s390_ucas_mapping ucasmap;
705
706 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
707 r = -EFAULT;
708 break;
709 }
710
711 if (!kvm_is_ucontrol(vcpu->kvm)) {
712 r = -EINVAL;
713 break;
714 }
715
716 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
717 ucasmap.vcpu_addr, ucasmap.length);
718 break;
719 }
720 case KVM_S390_UCAS_UNMAP: {
721 struct kvm_s390_ucas_mapping ucasmap;
722
723 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
724 r = -EFAULT;
725 break;
726 }
727
728 if (!kvm_is_ucontrol(vcpu->kvm)) {
729 r = -EINVAL;
730 break;
731 }
732
733 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
734 ucasmap.length);
735 break;
736 }
737#endif
b0c632db 738 default:
bc923cc9 739 r = -EINVAL;
b0c632db 740 }
bc923cc9 741 return r;
b0c632db
HC
742}
743
744/* Section: memory related */
f7784b8e
MT
745int kvm_arch_prepare_memory_region(struct kvm *kvm,
746 struct kvm_memory_slot *memslot,
747 struct kvm_memory_slot old,
748 struct kvm_userspace_memory_region *mem,
749 int user_alloc)
b0c632db
HC
750{
751 /* A few sanity checks. We can have exactly one memory slot which has
752 to start at guest virtual zero and which has to be located at a
753 page boundary in userland and which has to end at a page boundary.
754 The memory in userland is ok to be fragmented into various different
755 vmas. It is okay to mmap() and munmap() stuff in this slot after
756 doing this call at any time */
757
628eb9b8 758 if (mem->slot)
b0c632db
HC
759 return -EINVAL;
760
761 if (mem->guest_phys_addr)
762 return -EINVAL;
763
598841ca 764 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
765 return -EINVAL;
766
598841ca 767 if (mem->memory_size & 0xffffful)
b0c632db
HC
768 return -EINVAL;
769
2668dab7
CO
770 if (!user_alloc)
771 return -EINVAL;
772
f7784b8e
MT
773 return 0;
774}
775
776void kvm_arch_commit_memory_region(struct kvm *kvm,
777 struct kvm_userspace_memory_region *mem,
778 struct kvm_memory_slot old,
779 int user_alloc)
780{
f7850c92 781 int rc;
f7784b8e 782
598841ca
CO
783
784 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
785 mem->guest_phys_addr, mem->memory_size);
786 if (rc)
f7850c92 787 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 788 return;
b0c632db
HC
789}
790
34d4cb8f
MT
791void kvm_arch_flush_shadow(struct kvm *kvm)
792{
793}
794
b0c632db
HC
795static int __init kvm_s390_init(void)
796{
ef50f7ac 797 int ret;
0ee75bea 798 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
799 if (ret)
800 return ret;
801
802 /*
803 * guests can ask for up to 255+1 double words, we need a full page
25985edc 804 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
805 * only set facilities that are known to work in KVM.
806 */
c2f0e8c8 807 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
808 if (!facilities) {
809 kvm_exit();
810 return -ENOMEM;
811 }
14375bc4 812 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 813 facilities[0] &= 0xff00fff3f47c0000ULL;
9950f8be 814 facilities[1] &= 0x201c000000000000ULL;
ef50f7ac 815 return 0;
b0c632db
HC
816}
817
818static void __exit kvm_s390_exit(void)
819{
ef50f7ac 820 free_page((unsigned long) facilities);
b0c632db
HC
821 kvm_exit();
822}
823
824module_init(kvm_s390_init);
825module_exit(kvm_s390_exit);
This page took 0.365511 seconds and 5 git commands to generate.