s390/mm: implement software dirty bits
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
a0616cde 30#include <asm/switch_to.h>
1526bf9c 31#include <asm/sclp.h>
8f2abe6a 32#include "kvm-s390.h"
b0c632db
HC
33#include "gaccess.h"
34
5786fffa
CH
35#define CREATE_TRACE_POINTS
36#include "trace.h"
ade38c31 37#include "trace-s390.h"
5786fffa 38
b0c632db
HC
39#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
40
41struct kvm_stats_debugfs_item debugfs_entries[] = {
42 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 43 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
44 { "exit_validity", VCPU_STAT(exit_validity) },
45 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
46 { "exit_external_request", VCPU_STAT(exit_external_request) },
47 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
48 { "exit_instruction", VCPU_STAT(exit_instruction) },
49 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
50 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 51 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
52 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
53 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 54 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
55 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
56 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
57 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
58 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
59 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
60 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
61 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
62 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
63 { "instruction_spx", VCPU_STAT(instruction_spx) },
64 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
65 { "instruction_stap", VCPU_STAT(instruction_stap) },
66 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
67 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
68 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
69 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
70 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 71 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 72 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 73 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 74 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
75 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
76 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
77 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
78 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
79 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 80 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 81 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 82 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
83 { NULL }
84};
85
ef50f7ac 86static unsigned long long *facilities;
b0c632db
HC
87
88/* Section: not file related */
10474ae8 89int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
90{
91 /* every s390 is virtualization enabled ;-) */
10474ae8 92 return 0;
b0c632db
HC
93}
94
95void kvm_arch_hardware_disable(void *garbage)
96{
97}
98
b0c632db
HC
99int kvm_arch_hardware_setup(void)
100{
101 return 0;
102}
103
104void kvm_arch_hardware_unsetup(void)
105{
106}
107
108void kvm_arch_check_processor_compat(void *rtn)
109{
110}
111
112int kvm_arch_init(void *opaque)
113{
114 return 0;
115}
116
117void kvm_arch_exit(void)
118{
119}
120
121/* Section: device related */
122long kvm_arch_dev_ioctl(struct file *filp,
123 unsigned int ioctl, unsigned long arg)
124{
125 if (ioctl == KVM_S390_ENABLE_SIE)
126 return s390_enable_sie();
127 return -EINVAL;
128}
129
130int kvm_dev_ioctl_check_extension(long ext)
131{
d7b0b5eb
CO
132 int r;
133
2bd0ac4e 134 switch (ext) {
d7b0b5eb 135 case KVM_CAP_S390_PSW:
b6cf8788 136 case KVM_CAP_S390_GMAP:
52e16b18 137 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
138#ifdef CONFIG_KVM_S390_UCONTROL
139 case KVM_CAP_S390_UCONTROL:
140#endif
60b413c9 141 case KVM_CAP_SYNC_REGS:
14eebd91 142 case KVM_CAP_ONE_REG:
d7b0b5eb
CO
143 r = 1;
144 break;
e726b1bd
CB
145 case KVM_CAP_NR_VCPUS:
146 case KVM_CAP_MAX_VCPUS:
147 r = KVM_MAX_VCPUS;
148 break;
1526bf9c 149 case KVM_CAP_S390_COW:
abf09bed 150 r = MACHINE_HAS_ESOP;
1526bf9c 151 break;
2bd0ac4e 152 default:
d7b0b5eb 153 r = 0;
2bd0ac4e 154 }
d7b0b5eb 155 return r;
b0c632db
HC
156}
157
158/* Section: vm related */
159/*
160 * Get (and clear) the dirty memory log for a memory slot.
161 */
162int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
163 struct kvm_dirty_log *log)
164{
165 return 0;
166}
167
168long kvm_arch_vm_ioctl(struct file *filp,
169 unsigned int ioctl, unsigned long arg)
170{
171 struct kvm *kvm = filp->private_data;
172 void __user *argp = (void __user *)arg;
173 int r;
174
175 switch (ioctl) {
ba5c1e9b
CO
176 case KVM_S390_INTERRUPT: {
177 struct kvm_s390_interrupt s390int;
178
179 r = -EFAULT;
180 if (copy_from_user(&s390int, argp, sizeof(s390int)))
181 break;
182 r = kvm_s390_inject_vm(kvm, &s390int);
183 break;
184 }
b0c632db 185 default:
367e1319 186 r = -ENOTTY;
b0c632db
HC
187 }
188
189 return r;
190}
191
e08b9637 192int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 193{
b0c632db
HC
194 int rc;
195 char debug_name[16];
196
e08b9637
CO
197 rc = -EINVAL;
198#ifdef CONFIG_KVM_S390_UCONTROL
199 if (type & ~KVM_VM_S390_UCONTROL)
200 goto out_err;
201 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
202 goto out_err;
203#else
204 if (type)
205 goto out_err;
206#endif
207
b0c632db
HC
208 rc = s390_enable_sie();
209 if (rc)
d89f5eff 210 goto out_err;
b0c632db 211
b290411a
CO
212 rc = -ENOMEM;
213
b0c632db
HC
214 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
215 if (!kvm->arch.sca)
d89f5eff 216 goto out_err;
b0c632db
HC
217
218 sprintf(debug_name, "kvm-%u", current->pid);
219
220 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
221 if (!kvm->arch.dbf)
222 goto out_nodbf;
223
ba5c1e9b
CO
224 spin_lock_init(&kvm->arch.float_int.lock);
225 INIT_LIST_HEAD(&kvm->arch.float_int.list);
226
b0c632db
HC
227 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
228 VM_EVENT(kvm, 3, "%s", "vm created");
229
e08b9637
CO
230 if (type & KVM_VM_S390_UCONTROL) {
231 kvm->arch.gmap = NULL;
232 } else {
233 kvm->arch.gmap = gmap_alloc(current->mm);
234 if (!kvm->arch.gmap)
235 goto out_nogmap;
236 }
d89f5eff 237 return 0;
598841ca
CO
238out_nogmap:
239 debug_unregister(kvm->arch.dbf);
b0c632db
HC
240out_nodbf:
241 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
242out_err:
243 return rc;
b0c632db
HC
244}
245
d329c035
CB
246void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
247{
248 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 249 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
58f9460b
CO
250 if (!kvm_is_ucontrol(vcpu->kvm)) {
251 clear_bit(63 - vcpu->vcpu_id,
252 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
253 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
254 (__u64) vcpu->arch.sie_block)
255 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
256 }
abf4a71e 257 smp_mb();
27e0393f
CO
258
259 if (kvm_is_ucontrol(vcpu->kvm))
260 gmap_free(vcpu->arch.gmap);
261
d329c035 262 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 263 kvm_vcpu_uninit(vcpu);
d329c035
CB
264 kfree(vcpu);
265}
266
267static void kvm_free_vcpus(struct kvm *kvm)
268{
269 unsigned int i;
988a2cae 270 struct kvm_vcpu *vcpu;
d329c035 271
988a2cae
GN
272 kvm_for_each_vcpu(i, vcpu, kvm)
273 kvm_arch_vcpu_destroy(vcpu);
274
275 mutex_lock(&kvm->lock);
276 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
277 kvm->vcpus[i] = NULL;
278
279 atomic_set(&kvm->online_vcpus, 0);
280 mutex_unlock(&kvm->lock);
d329c035
CB
281}
282
ad8ba2cd
SY
283void kvm_arch_sync_events(struct kvm *kvm)
284{
285}
286
b0c632db
HC
287void kvm_arch_destroy_vm(struct kvm *kvm)
288{
d329c035 289 kvm_free_vcpus(kvm);
b0c632db 290 free_page((unsigned long)(kvm->arch.sca));
d329c035 291 debug_unregister(kvm->arch.dbf);
27e0393f
CO
292 if (!kvm_is_ucontrol(kvm))
293 gmap_free(kvm->arch.gmap);
b0c632db
HC
294}
295
296/* Section: vcpu related */
297int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
298{
27e0393f
CO
299 if (kvm_is_ucontrol(vcpu->kvm)) {
300 vcpu->arch.gmap = gmap_alloc(current->mm);
301 if (!vcpu->arch.gmap)
302 return -ENOMEM;
303 return 0;
304 }
305
598841ca 306 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
307 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
308 KVM_SYNC_GPRS |
9eed0735
CB
309 KVM_SYNC_ACRS |
310 KVM_SYNC_CRS;
b0c632db
HC
311 return 0;
312}
313
314void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
315{
6692cef3 316 /* Nothing todo */
b0c632db
HC
317}
318
319void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
320{
321 save_fp_regs(&vcpu->arch.host_fpregs);
322 save_access_regs(vcpu->arch.host_acrs);
323 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
324 restore_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 325 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 326 gmap_enable(vcpu->arch.gmap);
9e6dabef 327 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
328}
329
330void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
331{
9e6dabef 332 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 333 gmap_disable(vcpu->arch.gmap);
b0c632db 334 save_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 335 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
336 restore_fp_regs(&vcpu->arch.host_fpregs);
337 restore_access_regs(vcpu->arch.host_acrs);
338}
339
340static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
341{
342 /* this equals initial cpu reset in pop, but we don't switch to ESA */
343 vcpu->arch.sie_block->gpsw.mask = 0UL;
344 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 345 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
346 vcpu->arch.sie_block->cputm = 0UL;
347 vcpu->arch.sie_block->ckc = 0UL;
348 vcpu->arch.sie_block->todpr = 0;
349 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
350 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
351 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
352 vcpu->arch.guest_fpregs.fpc = 0;
353 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
354 vcpu->arch.sie_block->gbea = 1;
61bde82c 355 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
356}
357
42897d86
MT
358int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
359{
360 return 0;
361}
362
b0c632db
HC
363int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
364{
9e6dabef
CH
365 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
366 CPUSTAT_SM |
367 CPUSTAT_STOPPED);
fc34531d 368 vcpu->arch.sie_block->ecb = 6;
b0c632db 369 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 370 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
371 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
372 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
373 (unsigned long) vcpu);
374 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 375 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 376 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
377 return 0;
378}
379
380struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
381 unsigned int id)
382{
4d47555a
CO
383 struct kvm_vcpu *vcpu;
384 int rc = -EINVAL;
385
386 if (id >= KVM_MAX_VCPUS)
387 goto out;
388
389 rc = -ENOMEM;
b0c632db 390
4d47555a 391 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
b0c632db 392 if (!vcpu)
4d47555a 393 goto out;
b0c632db 394
180c12fb
CB
395 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
396 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
397
398 if (!vcpu->arch.sie_block)
399 goto out_free_cpu;
400
401 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
402 if (!kvm_is_ucontrol(kvm)) {
403 if (!kvm->arch.sca) {
404 WARN_ON_ONCE(1);
405 goto out_free_cpu;
406 }
407 if (!kvm->arch.sca->cpu[id].sda)
408 kvm->arch.sca->cpu[id].sda =
409 (__u64) vcpu->arch.sie_block;
410 vcpu->arch.sie_block->scaoh =
411 (__u32)(((__u64)kvm->arch.sca) >> 32);
412 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
413 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
414 }
b0c632db 415
ba5c1e9b
CO
416 spin_lock_init(&vcpu->arch.local_int.lock);
417 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
418 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 419 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
420 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
421 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 422 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 423 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 424
b0c632db
HC
425 rc = kvm_vcpu_init(vcpu, kvm, id);
426 if (rc)
7b06bf2f 427 goto out_free_sie_block;
b0c632db
HC
428 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
429 vcpu->arch.sie_block);
ade38c31 430 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 431
b0c632db 432 return vcpu;
7b06bf2f
WY
433out_free_sie_block:
434 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
435out_free_cpu:
436 kfree(vcpu);
4d47555a 437out:
b0c632db
HC
438 return ERR_PTR(rc);
439}
440
b0c632db
HC
441int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
442{
443 /* kvm common code refers to this, but never calls it */
444 BUG();
445 return 0;
446}
447
b6d33834
CD
448int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
449{
450 /* kvm common code refers to this, but never calls it */
451 BUG();
452 return 0;
453}
454
14eebd91
CO
455static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
456 struct kvm_one_reg *reg)
457{
458 int r = -EINVAL;
459
460 switch (reg->id) {
29b7c71b
CO
461 case KVM_REG_S390_TODPR:
462 r = put_user(vcpu->arch.sie_block->todpr,
463 (u32 __user *)reg->addr);
464 break;
465 case KVM_REG_S390_EPOCHDIFF:
466 r = put_user(vcpu->arch.sie_block->epoch,
467 (u64 __user *)reg->addr);
468 break;
46a6dd1c
J
469 case KVM_REG_S390_CPU_TIMER:
470 r = put_user(vcpu->arch.sie_block->cputm,
471 (u64 __user *)reg->addr);
472 break;
473 case KVM_REG_S390_CLOCK_COMP:
474 r = put_user(vcpu->arch.sie_block->ckc,
475 (u64 __user *)reg->addr);
476 break;
14eebd91
CO
477 default:
478 break;
479 }
480
481 return r;
482}
483
484static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
485 struct kvm_one_reg *reg)
486{
487 int r = -EINVAL;
488
489 switch (reg->id) {
29b7c71b
CO
490 case KVM_REG_S390_TODPR:
491 r = get_user(vcpu->arch.sie_block->todpr,
492 (u32 __user *)reg->addr);
493 break;
494 case KVM_REG_S390_EPOCHDIFF:
495 r = get_user(vcpu->arch.sie_block->epoch,
496 (u64 __user *)reg->addr);
497 break;
46a6dd1c
J
498 case KVM_REG_S390_CPU_TIMER:
499 r = get_user(vcpu->arch.sie_block->cputm,
500 (u64 __user *)reg->addr);
501 break;
502 case KVM_REG_S390_CLOCK_COMP:
503 r = get_user(vcpu->arch.sie_block->ckc,
504 (u64 __user *)reg->addr);
505 break;
14eebd91
CO
506 default:
507 break;
508 }
509
510 return r;
511}
b6d33834 512
b0c632db
HC
513static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
514{
b0c632db 515 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
516 return 0;
517}
518
519int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
520{
5a32c1af 521 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
522 return 0;
523}
524
525int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
526{
5a32c1af 527 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
528 return 0;
529}
530
531int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
532 struct kvm_sregs *sregs)
533{
59674c1a 534 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 535 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 536 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
537 return 0;
538}
539
540int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
541 struct kvm_sregs *sregs)
542{
59674c1a 543 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 544 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
545 return 0;
546}
547
548int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
549{
b0c632db 550 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
85175587 551 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
7eef87dc 552 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
553 return 0;
554}
555
556int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
557{
b0c632db
HC
558 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
559 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
560 return 0;
561}
562
563static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
564{
565 int rc = 0;
566
9e6dabef 567 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 568 rc = -EBUSY;
d7b0b5eb
CO
569 else {
570 vcpu->run->psw_mask = psw.mask;
571 vcpu->run->psw_addr = psw.addr;
572 }
b0c632db
HC
573 return rc;
574}
575
576int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
577 struct kvm_translation *tr)
578{
579 return -EINVAL; /* not implemented yet */
580}
581
d0bfb940
JK
582int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
583 struct kvm_guest_debug *dbg)
b0c632db
HC
584{
585 return -EINVAL; /* not implemented yet */
586}
587
62d9f0db
MT
588int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
589 struct kvm_mp_state *mp_state)
590{
591 return -EINVAL; /* not implemented yet */
592}
593
594int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
595 struct kvm_mp_state *mp_state)
596{
597 return -EINVAL; /* not implemented yet */
598}
599
e168bf8d 600static int __vcpu_run(struct kvm_vcpu *vcpu)
b0c632db 601{
e168bf8d
CO
602 int rc;
603
5a32c1af 604 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
605
606 if (need_resched())
607 schedule();
608
71cde587
CB
609 if (test_thread_flag(TIF_MCCK_PENDING))
610 s390_handle_mcck();
611
d6b6d166
CO
612 if (!kvm_is_ucontrol(vcpu->kvm))
613 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 614
b0c632db 615 vcpu->arch.sie_block->icptcode = 0;
83987ace 616 preempt_disable();
b0c632db 617 kvm_guest_enter();
83987ace 618 preempt_enable();
b0c632db
HC
619 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
620 atomic_read(&vcpu->arch.sie_block->cpuflags));
5786fffa
CH
621 trace_kvm_s390_sie_enter(vcpu,
622 atomic_read(&vcpu->arch.sie_block->cpuflags));
5a32c1af 623 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
e168bf8d
CO
624 if (rc) {
625 if (kvm_is_ucontrol(vcpu->kvm)) {
626 rc = SIE_INTERCEPT_UCONTROL;
627 } else {
628 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
5786fffa 629 trace_kvm_s390_sie_fault(vcpu);
e168bf8d
CO
630 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
631 rc = 0;
632 }
1f0d0f09 633 }
b0c632db
HC
634 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
635 vcpu->arch.sie_block->icptcode);
5786fffa 636 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
b0c632db 637 kvm_guest_exit();
b0c632db 638
5a32c1af 639 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
e168bf8d 640 return rc;
b0c632db
HC
641}
642
643int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
644{
8f2abe6a 645 int rc;
b0c632db
HC
646 sigset_t sigsaved;
647
9ace903d 648rerun_vcpu:
b0c632db
HC
649 if (vcpu->sigset_active)
650 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
651
9e6dabef 652 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 653
ba5c1e9b
CO
654 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
655
8f2abe6a
CB
656 switch (kvm_run->exit_reason) {
657 case KVM_EXIT_S390_SIEIC:
8f2abe6a 658 case KVM_EXIT_UNKNOWN:
9ace903d 659 case KVM_EXIT_INTR:
8f2abe6a 660 case KVM_EXIT_S390_RESET:
e168bf8d 661 case KVM_EXIT_S390_UCONTROL:
8f2abe6a
CB
662 break;
663 default:
664 BUG();
665 }
666
d7b0b5eb
CO
667 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
668 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
669 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
670 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
671 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
672 }
9eed0735
CB
673 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
674 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
675 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
676 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
677 }
d7b0b5eb 678
dab4079d 679 might_fault();
8f2abe6a
CB
680
681 do {
e168bf8d
CO
682 rc = __vcpu_run(vcpu);
683 if (rc)
684 break;
c0d744a9
CO
685 if (kvm_is_ucontrol(vcpu->kvm))
686 rc = -EOPNOTSUPP;
687 else
688 rc = kvm_handle_sie_intercept(vcpu);
8f2abe6a
CB
689 } while (!signal_pending(current) && !rc);
690
9ace903d
CE
691 if (rc == SIE_INTERCEPT_RERUNVCPU)
692 goto rerun_vcpu;
693
b1d16c49
CE
694 if (signal_pending(current) && !rc) {
695 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 696 rc = -EINTR;
b1d16c49 697 }
8f2abe6a 698
e168bf8d
CO
699#ifdef CONFIG_KVM_S390_UCONTROL
700 if (rc == SIE_INTERCEPT_UCONTROL) {
701 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
702 kvm_run->s390_ucontrol.trans_exc_code =
703 current->thread.gmap_addr;
704 kvm_run->s390_ucontrol.pgm_code = 0x10;
705 rc = 0;
706 }
707#endif
708
b8e660b8 709 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
710 /* intercept cannot be handled in-kernel, prepare kvm-run */
711 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
712 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
713 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
714 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
715 rc = 0;
716 }
717
718 if (rc == -EREMOTE) {
719 /* intercept was handled, but userspace support is needed
720 * kvm_run has been prepared by the handler */
721 rc = 0;
722 }
b0c632db 723
d7b0b5eb
CO
724 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
725 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 726 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
9eed0735 727 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 728
b0c632db
HC
729 if (vcpu->sigset_active)
730 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
731
b0c632db 732 vcpu->stat.exit_userspace++;
7e8e6ab4 733 return rc;
b0c632db
HC
734}
735
092670cd 736static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
737 unsigned long n, int prefix)
738{
739 if (prefix)
740 return copy_to_guest(vcpu, guestdest, from, n);
741 else
742 return copy_to_guest_absolute(vcpu, guestdest, from, n);
743}
744
745/*
746 * store status at address
747 * we use have two special cases:
748 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
749 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
750 */
971eb77f 751int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 752{
092670cd 753 unsigned char archmode = 1;
b0c632db
HC
754 int prefix;
755
756 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
757 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
758 return -EFAULT;
759 addr = SAVE_AREA_BASE;
760 prefix = 0;
761 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
762 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
763 return -EFAULT;
764 addr = SAVE_AREA_BASE;
765 prefix = 1;
766 } else
767 prefix = 0;
768
f64ca217 769 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
770 vcpu->arch.guest_fpregs.fprs, 128, prefix))
771 return -EFAULT;
772
f64ca217 773 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
5a32c1af 774 vcpu->run->s.regs.gprs, 128, prefix))
b0c632db
HC
775 return -EFAULT;
776
f64ca217 777 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
778 &vcpu->arch.sie_block->gpsw, 16, prefix))
779 return -EFAULT;
780
f64ca217 781 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
782 &vcpu->arch.sie_block->prefix, 4, prefix))
783 return -EFAULT;
784
785 if (__guestcopy(vcpu,
f64ca217 786 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
787 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
788 return -EFAULT;
789
f64ca217 790 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
791 &vcpu->arch.sie_block->todpr, 4, prefix))
792 return -EFAULT;
793
f64ca217 794 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
795 &vcpu->arch.sie_block->cputm, 8, prefix))
796 return -EFAULT;
797
f64ca217 798 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
799 &vcpu->arch.sie_block->ckc, 8, prefix))
800 return -EFAULT;
801
f64ca217 802 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
59674c1a 803 &vcpu->run->s.regs.acrs, 64, prefix))
b0c632db
HC
804 return -EFAULT;
805
806 if (__guestcopy(vcpu,
f64ca217 807 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
808 &vcpu->arch.sie_block->gcr, 128, prefix))
809 return -EFAULT;
810 return 0;
811}
812
b0c632db
HC
813long kvm_arch_vcpu_ioctl(struct file *filp,
814 unsigned int ioctl, unsigned long arg)
815{
816 struct kvm_vcpu *vcpu = filp->private_data;
817 void __user *argp = (void __user *)arg;
bc923cc9 818 long r;
b0c632db 819
93736624
AK
820 switch (ioctl) {
821 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
822 struct kvm_s390_interrupt s390int;
823
93736624 824 r = -EFAULT;
ba5c1e9b 825 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
826 break;
827 r = kvm_s390_inject_vcpu(vcpu, &s390int);
828 break;
ba5c1e9b 829 }
b0c632db 830 case KVM_S390_STORE_STATUS:
bc923cc9
AK
831 r = kvm_s390_vcpu_store_status(vcpu, arg);
832 break;
b0c632db
HC
833 case KVM_S390_SET_INITIAL_PSW: {
834 psw_t psw;
835
bc923cc9 836 r = -EFAULT;
b0c632db 837 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
838 break;
839 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
840 break;
b0c632db
HC
841 }
842 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
843 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
844 break;
14eebd91
CO
845 case KVM_SET_ONE_REG:
846 case KVM_GET_ONE_REG: {
847 struct kvm_one_reg reg;
848 r = -EFAULT;
849 if (copy_from_user(&reg, argp, sizeof(reg)))
850 break;
851 if (ioctl == KVM_SET_ONE_REG)
852 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
853 else
854 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
855 break;
856 }
27e0393f
CO
857#ifdef CONFIG_KVM_S390_UCONTROL
858 case KVM_S390_UCAS_MAP: {
859 struct kvm_s390_ucas_mapping ucasmap;
860
861 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
862 r = -EFAULT;
863 break;
864 }
865
866 if (!kvm_is_ucontrol(vcpu->kvm)) {
867 r = -EINVAL;
868 break;
869 }
870
871 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
872 ucasmap.vcpu_addr, ucasmap.length);
873 break;
874 }
875 case KVM_S390_UCAS_UNMAP: {
876 struct kvm_s390_ucas_mapping ucasmap;
877
878 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
879 r = -EFAULT;
880 break;
881 }
882
883 if (!kvm_is_ucontrol(vcpu->kvm)) {
884 r = -EINVAL;
885 break;
886 }
887
888 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
889 ucasmap.length);
890 break;
891 }
892#endif
ccc7910f
CO
893 case KVM_S390_VCPU_FAULT: {
894 r = gmap_fault(arg, vcpu->arch.gmap);
895 if (!IS_ERR_VALUE(r))
896 r = 0;
897 break;
898 }
b0c632db 899 default:
3e6afcf1 900 r = -ENOTTY;
b0c632db 901 }
bc923cc9 902 return r;
b0c632db
HC
903}
904
5b1c1493
CO
905int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
906{
907#ifdef CONFIG_KVM_S390_UCONTROL
908 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
909 && (kvm_is_ucontrol(vcpu->kvm))) {
910 vmf->page = virt_to_page(vcpu->arch.sie_block);
911 get_page(vmf->page);
912 return 0;
913 }
914#endif
915 return VM_FAULT_SIGBUS;
916}
917
db3fe4eb
TY
918void kvm_arch_free_memslot(struct kvm_memory_slot *free,
919 struct kvm_memory_slot *dont)
920{
921}
922
923int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
924{
925 return 0;
926}
927
b0c632db 928/* Section: memory related */
f7784b8e
MT
929int kvm_arch_prepare_memory_region(struct kvm *kvm,
930 struct kvm_memory_slot *memslot,
931 struct kvm_memory_slot old,
932 struct kvm_userspace_memory_region *mem,
933 int user_alloc)
b0c632db
HC
934{
935 /* A few sanity checks. We can have exactly one memory slot which has
936 to start at guest virtual zero and which has to be located at a
937 page boundary in userland and which has to end at a page boundary.
938 The memory in userland is ok to be fragmented into various different
939 vmas. It is okay to mmap() and munmap() stuff in this slot after
940 doing this call at any time */
941
628eb9b8 942 if (mem->slot)
b0c632db
HC
943 return -EINVAL;
944
945 if (mem->guest_phys_addr)
946 return -EINVAL;
947
598841ca 948 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
949 return -EINVAL;
950
598841ca 951 if (mem->memory_size & 0xffffful)
b0c632db
HC
952 return -EINVAL;
953
2668dab7
CO
954 if (!user_alloc)
955 return -EINVAL;
956
f7784b8e
MT
957 return 0;
958}
959
960void kvm_arch_commit_memory_region(struct kvm *kvm,
961 struct kvm_userspace_memory_region *mem,
962 struct kvm_memory_slot old,
963 int user_alloc)
964{
f7850c92 965 int rc;
f7784b8e 966
598841ca
CO
967
968 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
969 mem->guest_phys_addr, mem->memory_size);
970 if (rc)
f7850c92 971 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 972 return;
b0c632db
HC
973}
974
2df72e9b
MT
975void kvm_arch_flush_shadow_all(struct kvm *kvm)
976{
977}
978
979void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
980 struct kvm_memory_slot *slot)
34d4cb8f
MT
981{
982}
983
b0c632db
HC
984static int __init kvm_s390_init(void)
985{
ef50f7ac 986 int ret;
0ee75bea 987 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
988 if (ret)
989 return ret;
990
991 /*
992 * guests can ask for up to 255+1 double words, we need a full page
25985edc 993 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
994 * only set facilities that are known to work in KVM.
995 */
c2f0e8c8 996 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
997 if (!facilities) {
998 kvm_exit();
999 return -ENOMEM;
1000 }
14375bc4 1001 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 1002 facilities[0] &= 0xff00fff3f47c0000ULL;
87cac8f8 1003 facilities[1] &= 0x001c000000000000ULL;
ef50f7ac 1004 return 0;
b0c632db
HC
1005}
1006
1007static void __exit kvm_s390_exit(void)
1008{
ef50f7ac 1009 free_page((unsigned long) facilities);
b0c632db
HC
1010 kvm_exit();
1011}
1012
1013module_init(kvm_s390_init);
1014module_exit(kvm_s390_exit);
This page took 0.391102 seconds and 5 git commands to generate.