KVM: emulate lapic tsc deadline timer for guest
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
ef50f7ac 30#include <asm/system.h>
8f2abe6a 31#include "kvm-s390.h"
b0c632db
HC
32#include "gaccess.h"
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 38 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
50 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
51 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
52 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
53 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
54 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
55 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
56 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
57 { "instruction_spx", VCPU_STAT(instruction_spx) },
58 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
59 { "instruction_stap", VCPU_STAT(instruction_stap) },
60 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
61 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
62 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
63 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
64 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 65 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0
CB
66 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
67 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
68 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
69 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
70 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
71 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
e28acfea 72 { "diagnose_44", VCPU_STAT(diagnose_44) },
b0c632db
HC
73 { NULL }
74};
75
ef50f7ac 76static unsigned long long *facilities;
b0c632db
HC
77
78/* Section: not file related */
10474ae8 79int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
80{
81 /* every s390 is virtualization enabled ;-) */
10474ae8 82 return 0;
b0c632db
HC
83}
84
85void kvm_arch_hardware_disable(void *garbage)
86{
87}
88
b0c632db
HC
89int kvm_arch_hardware_setup(void)
90{
91 return 0;
92}
93
94void kvm_arch_hardware_unsetup(void)
95{
96}
97
98void kvm_arch_check_processor_compat(void *rtn)
99{
100}
101
102int kvm_arch_init(void *opaque)
103{
104 return 0;
105}
106
107void kvm_arch_exit(void)
108{
109}
110
111/* Section: device related */
112long kvm_arch_dev_ioctl(struct file *filp,
113 unsigned int ioctl, unsigned long arg)
114{
115 if (ioctl == KVM_S390_ENABLE_SIE)
116 return s390_enable_sie();
117 return -EINVAL;
118}
119
120int kvm_dev_ioctl_check_extension(long ext)
121{
d7b0b5eb
CO
122 int r;
123
2bd0ac4e 124 switch (ext) {
d7b0b5eb 125 case KVM_CAP_S390_PSW:
b6cf8788 126 case KVM_CAP_S390_GMAP:
d7b0b5eb
CO
127 r = 1;
128 break;
2bd0ac4e 129 default:
d7b0b5eb 130 r = 0;
2bd0ac4e 131 }
d7b0b5eb 132 return r;
b0c632db
HC
133}
134
135/* Section: vm related */
136/*
137 * Get (and clear) the dirty memory log for a memory slot.
138 */
139int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
140 struct kvm_dirty_log *log)
141{
142 return 0;
143}
144
145long kvm_arch_vm_ioctl(struct file *filp,
146 unsigned int ioctl, unsigned long arg)
147{
148 struct kvm *kvm = filp->private_data;
149 void __user *argp = (void __user *)arg;
150 int r;
151
152 switch (ioctl) {
ba5c1e9b
CO
153 case KVM_S390_INTERRUPT: {
154 struct kvm_s390_interrupt s390int;
155
156 r = -EFAULT;
157 if (copy_from_user(&s390int, argp, sizeof(s390int)))
158 break;
159 r = kvm_s390_inject_vm(kvm, &s390int);
160 break;
161 }
b0c632db 162 default:
367e1319 163 r = -ENOTTY;
b0c632db
HC
164 }
165
166 return r;
167}
168
d89f5eff 169int kvm_arch_init_vm(struct kvm *kvm)
b0c632db 170{
b0c632db
HC
171 int rc;
172 char debug_name[16];
173
174 rc = s390_enable_sie();
175 if (rc)
d89f5eff 176 goto out_err;
b0c632db
HC
177
178 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
179 if (!kvm->arch.sca)
d89f5eff 180 goto out_err;
b0c632db
HC
181
182 sprintf(debug_name, "kvm-%u", current->pid);
183
184 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
185 if (!kvm->arch.dbf)
186 goto out_nodbf;
187
ba5c1e9b
CO
188 spin_lock_init(&kvm->arch.float_int.lock);
189 INIT_LIST_HEAD(&kvm->arch.float_int.list);
190
b0c632db
HC
191 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
192 VM_EVENT(kvm, 3, "%s", "vm created");
193
598841ca
CO
194 kvm->arch.gmap = gmap_alloc(current->mm);
195 if (!kvm->arch.gmap)
196 goto out_nogmap;
197
d89f5eff 198 return 0;
598841ca
CO
199out_nogmap:
200 debug_unregister(kvm->arch.dbf);
b0c632db
HC
201out_nodbf:
202 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
203out_err:
204 return rc;
b0c632db
HC
205}
206
d329c035
CB
207void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
208{
209 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
fc34531d 210 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
abf4a71e
CO
211 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
212 (__u64) vcpu->arch.sie_block)
213 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
214 smp_mb();
d329c035 215 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 216 kvm_vcpu_uninit(vcpu);
d329c035
CB
217 kfree(vcpu);
218}
219
220static void kvm_free_vcpus(struct kvm *kvm)
221{
222 unsigned int i;
988a2cae 223 struct kvm_vcpu *vcpu;
d329c035 224
988a2cae
GN
225 kvm_for_each_vcpu(i, vcpu, kvm)
226 kvm_arch_vcpu_destroy(vcpu);
227
228 mutex_lock(&kvm->lock);
229 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
230 kvm->vcpus[i] = NULL;
231
232 atomic_set(&kvm->online_vcpus, 0);
233 mutex_unlock(&kvm->lock);
d329c035
CB
234}
235
ad8ba2cd
SY
236void kvm_arch_sync_events(struct kvm *kvm)
237{
238}
239
b0c632db
HC
240void kvm_arch_destroy_vm(struct kvm *kvm)
241{
d329c035 242 kvm_free_vcpus(kvm);
b0c632db 243 free_page((unsigned long)(kvm->arch.sca));
d329c035 244 debug_unregister(kvm->arch.dbf);
598841ca 245 gmap_free(kvm->arch.gmap);
b0c632db
HC
246}
247
248/* Section: vcpu related */
249int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
250{
598841ca 251 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
b0c632db
HC
252 return 0;
253}
254
255void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
256{
6692cef3 257 /* Nothing todo */
b0c632db
HC
258}
259
260void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
261{
262 save_fp_regs(&vcpu->arch.host_fpregs);
263 save_access_regs(vcpu->arch.host_acrs);
264 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
265 restore_fp_regs(&vcpu->arch.guest_fpregs);
266 restore_access_regs(vcpu->arch.guest_acrs);
480e5926 267 gmap_enable(vcpu->arch.gmap);
b0c632db
HC
268}
269
270void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
271{
480e5926 272 gmap_disable(vcpu->arch.gmap);
b0c632db
HC
273 save_fp_regs(&vcpu->arch.guest_fpregs);
274 save_access_regs(vcpu->arch.guest_acrs);
275 restore_fp_regs(&vcpu->arch.host_fpregs);
276 restore_access_regs(vcpu->arch.host_acrs);
277}
278
279static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
280{
281 /* this equals initial cpu reset in pop, but we don't switch to ESA */
282 vcpu->arch.sie_block->gpsw.mask = 0UL;
283 vcpu->arch.sie_block->gpsw.addr = 0UL;
284 vcpu->arch.sie_block->prefix = 0UL;
285 vcpu->arch.sie_block->ihcpu = 0xffff;
286 vcpu->arch.sie_block->cputm = 0UL;
287 vcpu->arch.sie_block->ckc = 0UL;
288 vcpu->arch.sie_block->todpr = 0;
289 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
290 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
291 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
292 vcpu->arch.guest_fpregs.fpc = 0;
293 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
294 vcpu->arch.sie_block->gbea = 1;
295}
296
297int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
298{
598841ca 299 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM);
fc34531d 300 vcpu->arch.sie_block->ecb = 6;
b0c632db 301 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 302 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
303 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
304 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
305 (unsigned long) vcpu);
306 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 307 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 308 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
309 return 0;
310}
311
312struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
313 unsigned int id)
314{
315 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
316 int rc = -ENOMEM;
317
318 if (!vcpu)
319 goto out_nomem;
320
180c12fb
CB
321 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
322 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
323
324 if (!vcpu->arch.sie_block)
325 goto out_free_cpu;
326
327 vcpu->arch.sie_block->icpua = id;
328 BUG_ON(!kvm->arch.sca);
abf4a71e
CO
329 if (!kvm->arch.sca->cpu[id].sda)
330 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
b0c632db
HC
331 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
332 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
fc34531d 333 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
b0c632db 334
ba5c1e9b
CO
335 spin_lock_init(&vcpu->arch.local_int.lock);
336 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
337 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 338 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
339 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
340 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 341 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 342 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 343
b0c632db
HC
344 rc = kvm_vcpu_init(vcpu, kvm, id);
345 if (rc)
7b06bf2f 346 goto out_free_sie_block;
b0c632db
HC
347 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
348 vcpu->arch.sie_block);
349
b0c632db 350 return vcpu;
7b06bf2f
WY
351out_free_sie_block:
352 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
353out_free_cpu:
354 kfree(vcpu);
355out_nomem:
356 return ERR_PTR(rc);
357}
358
b0c632db
HC
359int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
360{
361 /* kvm common code refers to this, but never calls it */
362 BUG();
363 return 0;
364}
365
366static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
367{
b0c632db 368 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
369 return 0;
370}
371
372int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
373{
b0c632db 374 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
375 return 0;
376}
377
378int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
379{
b0c632db 380 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
b0c632db
HC
381 return 0;
382}
383
384int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
385 struct kvm_sregs *sregs)
386{
b0c632db
HC
387 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
388 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
b0c632db
HC
389 return 0;
390}
391
392int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
393 struct kvm_sregs *sregs)
394{
b0c632db
HC
395 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
396 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
397 return 0;
398}
399
400int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
401{
b0c632db
HC
402 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
403 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
b0c632db
HC
404 return 0;
405}
406
407int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
408{
b0c632db
HC
409 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
410 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
411 return 0;
412}
413
414static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
415{
416 int rc = 0;
417
b0c632db
HC
418 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
419 rc = -EBUSY;
d7b0b5eb
CO
420 else {
421 vcpu->run->psw_mask = psw.mask;
422 vcpu->run->psw_addr = psw.addr;
423 }
b0c632db
HC
424 return rc;
425}
426
427int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
428 struct kvm_translation *tr)
429{
430 return -EINVAL; /* not implemented yet */
431}
432
d0bfb940
JK
433int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
434 struct kvm_guest_debug *dbg)
b0c632db
HC
435{
436 return -EINVAL; /* not implemented yet */
437}
438
62d9f0db
MT
439int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
440 struct kvm_mp_state *mp_state)
441{
442 return -EINVAL; /* not implemented yet */
443}
444
445int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
446 struct kvm_mp_state *mp_state)
447{
448 return -EINVAL; /* not implemented yet */
449}
450
b0c632db
HC
451static void __vcpu_run(struct kvm_vcpu *vcpu)
452{
453 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
454
455 if (need_resched())
456 schedule();
457
71cde587
CB
458 if (test_thread_flag(TIF_MCCK_PENDING))
459 s390_handle_mcck();
460
0ff31867
CO
461 kvm_s390_deliver_pending_interrupts(vcpu);
462
b0c632db
HC
463 vcpu->arch.sie_block->icptcode = 0;
464 local_irq_disable();
465 kvm_guest_enter();
466 local_irq_enable();
467 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
468 atomic_read(&vcpu->arch.sie_block->cpuflags));
1f0d0f09
CO
469 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
470 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
471 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
472 }
b0c632db
HC
473 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
474 vcpu->arch.sie_block->icptcode);
475 local_irq_disable();
476 kvm_guest_exit();
477 local_irq_enable();
478
479 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
480}
481
482int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
483{
8f2abe6a 484 int rc;
b0c632db
HC
485 sigset_t sigsaved;
486
9ace903d 487rerun_vcpu:
b0c632db
HC
488 if (vcpu->sigset_active)
489 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
490
491 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
492
ba5c1e9b
CO
493 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
494
8f2abe6a
CB
495 switch (kvm_run->exit_reason) {
496 case KVM_EXIT_S390_SIEIC:
8f2abe6a 497 case KVM_EXIT_UNKNOWN:
9ace903d 498 case KVM_EXIT_INTR:
8f2abe6a
CB
499 case KVM_EXIT_S390_RESET:
500 break;
501 default:
502 BUG();
503 }
504
d7b0b5eb
CO
505 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
506 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
507
dab4079d 508 might_fault();
8f2abe6a
CB
509
510 do {
511 __vcpu_run(vcpu);
8f2abe6a
CB
512 rc = kvm_handle_sie_intercept(vcpu);
513 } while (!signal_pending(current) && !rc);
514
9ace903d
CE
515 if (rc == SIE_INTERCEPT_RERUNVCPU)
516 goto rerun_vcpu;
517
b1d16c49
CE
518 if (signal_pending(current) && !rc) {
519 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 520 rc = -EINTR;
b1d16c49 521 }
8f2abe6a 522
b8e660b8 523 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
524 /* intercept cannot be handled in-kernel, prepare kvm-run */
525 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
526 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
527 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
528 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
529 rc = 0;
530 }
531
532 if (rc == -EREMOTE) {
533 /* intercept was handled, but userspace support is needed
534 * kvm_run has been prepared by the handler */
535 rc = 0;
536 }
b0c632db 537
d7b0b5eb
CO
538 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
539 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
540
b0c632db
HC
541 if (vcpu->sigset_active)
542 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
543
b0c632db 544 vcpu->stat.exit_userspace++;
7e8e6ab4 545 return rc;
b0c632db
HC
546}
547
092670cd 548static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
549 unsigned long n, int prefix)
550{
551 if (prefix)
552 return copy_to_guest(vcpu, guestdest, from, n);
553 else
554 return copy_to_guest_absolute(vcpu, guestdest, from, n);
555}
556
557/*
558 * store status at address
559 * we use have two special cases:
560 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
561 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
562 */
971eb77f 563int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 564{
092670cd 565 unsigned char archmode = 1;
b0c632db
HC
566 int prefix;
567
568 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
569 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
570 return -EFAULT;
571 addr = SAVE_AREA_BASE;
572 prefix = 0;
573 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
574 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
575 return -EFAULT;
576 addr = SAVE_AREA_BASE;
577 prefix = 1;
578 } else
579 prefix = 0;
580
f64ca217 581 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
582 vcpu->arch.guest_fpregs.fprs, 128, prefix))
583 return -EFAULT;
584
f64ca217 585 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
b0c632db
HC
586 vcpu->arch.guest_gprs, 128, prefix))
587 return -EFAULT;
588
f64ca217 589 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
590 &vcpu->arch.sie_block->gpsw, 16, prefix))
591 return -EFAULT;
592
f64ca217 593 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
594 &vcpu->arch.sie_block->prefix, 4, prefix))
595 return -EFAULT;
596
597 if (__guestcopy(vcpu,
f64ca217 598 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
599 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
600 return -EFAULT;
601
f64ca217 602 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
603 &vcpu->arch.sie_block->todpr, 4, prefix))
604 return -EFAULT;
605
f64ca217 606 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
607 &vcpu->arch.sie_block->cputm, 8, prefix))
608 return -EFAULT;
609
f64ca217 610 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
611 &vcpu->arch.sie_block->ckc, 8, prefix))
612 return -EFAULT;
613
f64ca217 614 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
b0c632db
HC
615 &vcpu->arch.guest_acrs, 64, prefix))
616 return -EFAULT;
617
618 if (__guestcopy(vcpu,
f64ca217 619 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
620 &vcpu->arch.sie_block->gcr, 128, prefix))
621 return -EFAULT;
622 return 0;
623}
624
b0c632db
HC
625long kvm_arch_vcpu_ioctl(struct file *filp,
626 unsigned int ioctl, unsigned long arg)
627{
628 struct kvm_vcpu *vcpu = filp->private_data;
629 void __user *argp = (void __user *)arg;
bc923cc9 630 long r;
b0c632db 631
93736624
AK
632 switch (ioctl) {
633 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
634 struct kvm_s390_interrupt s390int;
635
93736624 636 r = -EFAULT;
ba5c1e9b 637 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
638 break;
639 r = kvm_s390_inject_vcpu(vcpu, &s390int);
640 break;
ba5c1e9b 641 }
b0c632db 642 case KVM_S390_STORE_STATUS:
bc923cc9
AK
643 r = kvm_s390_vcpu_store_status(vcpu, arg);
644 break;
b0c632db
HC
645 case KVM_S390_SET_INITIAL_PSW: {
646 psw_t psw;
647
bc923cc9 648 r = -EFAULT;
b0c632db 649 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
650 break;
651 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
652 break;
b0c632db
HC
653 }
654 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
655 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
656 break;
b0c632db 657 default:
bc923cc9 658 r = -EINVAL;
b0c632db 659 }
bc923cc9 660 return r;
b0c632db
HC
661}
662
663/* Section: memory related */
f7784b8e
MT
664int kvm_arch_prepare_memory_region(struct kvm *kvm,
665 struct kvm_memory_slot *memslot,
666 struct kvm_memory_slot old,
667 struct kvm_userspace_memory_region *mem,
668 int user_alloc)
b0c632db
HC
669{
670 /* A few sanity checks. We can have exactly one memory slot which has
671 to start at guest virtual zero and which has to be located at a
672 page boundary in userland and which has to end at a page boundary.
673 The memory in userland is ok to be fragmented into various different
674 vmas. It is okay to mmap() and munmap() stuff in this slot after
675 doing this call at any time */
676
628eb9b8 677 if (mem->slot)
b0c632db
HC
678 return -EINVAL;
679
680 if (mem->guest_phys_addr)
681 return -EINVAL;
682
598841ca 683 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
684 return -EINVAL;
685
598841ca 686 if (mem->memory_size & 0xffffful)
b0c632db
HC
687 return -EINVAL;
688
2668dab7
CO
689 if (!user_alloc)
690 return -EINVAL;
691
f7784b8e
MT
692 return 0;
693}
694
695void kvm_arch_commit_memory_region(struct kvm *kvm,
696 struct kvm_userspace_memory_region *mem,
697 struct kvm_memory_slot old,
698 int user_alloc)
699{
f7850c92 700 int rc;
f7784b8e 701
598841ca
CO
702
703 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
704 mem->guest_phys_addr, mem->memory_size);
705 if (rc)
f7850c92 706 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 707 return;
b0c632db
HC
708}
709
34d4cb8f
MT
710void kvm_arch_flush_shadow(struct kvm *kvm)
711{
712}
713
b0c632db
HC
714static int __init kvm_s390_init(void)
715{
ef50f7ac 716 int ret;
0ee75bea 717 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
718 if (ret)
719 return ret;
720
721 /*
722 * guests can ask for up to 255+1 double words, we need a full page
25985edc 723 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
724 * only set facilities that are known to work in KVM.
725 */
c2f0e8c8 726 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
727 if (!facilities) {
728 kvm_exit();
729 return -ENOMEM;
730 }
14375bc4 731 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 732 facilities[0] &= 0xff00fff3f47c0000ULL;
9950f8be 733 facilities[1] &= 0x201c000000000000ULL;
ef50f7ac 734 return 0;
b0c632db
HC
735}
736
737static void __exit kvm_s390_exit(void)
738{
ef50f7ac 739 free_page((unsigned long) facilities);
b0c632db
HC
740 kvm_exit();
741}
742
743module_init(kvm_s390_init);
744module_exit(kvm_s390_exit);
This page took 0.310486 seconds and 5 git commands to generate.