KVM: SVM: constify lookup tables
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
a0616cde 30#include <asm/switch_to.h>
1526bf9c 31#include <asm/sclp.h>
8f2abe6a 32#include "kvm-s390.h"
b0c632db
HC
33#include "gaccess.h"
34
5786fffa
CH
35#define CREATE_TRACE_POINTS
36#include "trace.h"
ade38c31 37#include "trace-s390.h"
5786fffa 38
b0c632db
HC
39#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
40
41struct kvm_stats_debugfs_item debugfs_entries[] = {
42 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 43 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
44 { "exit_validity", VCPU_STAT(exit_validity) },
45 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
46 { "exit_external_request", VCPU_STAT(exit_external_request) },
47 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
48 { "exit_instruction", VCPU_STAT(exit_instruction) },
49 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
50 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 51 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
52 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
53 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 54 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
55 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
56 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
57 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
58 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
59 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
60 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
61 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
62 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
63 { "instruction_spx", VCPU_STAT(instruction_spx) },
64 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
65 { "instruction_stap", VCPU_STAT(instruction_stap) },
66 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
67 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
68 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
69 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
70 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 71 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 72 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 73 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 74 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
75 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
76 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
77 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
78 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
79 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 80 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 81 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 82 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
83 { NULL }
84};
85
ef50f7ac 86static unsigned long long *facilities;
b0c632db
HC
87
88/* Section: not file related */
10474ae8 89int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
90{
91 /* every s390 is virtualization enabled ;-) */
10474ae8 92 return 0;
b0c632db
HC
93}
94
95void kvm_arch_hardware_disable(void *garbage)
96{
97}
98
b0c632db
HC
99int kvm_arch_hardware_setup(void)
100{
101 return 0;
102}
103
104void kvm_arch_hardware_unsetup(void)
105{
106}
107
108void kvm_arch_check_processor_compat(void *rtn)
109{
110}
111
112int kvm_arch_init(void *opaque)
113{
114 return 0;
115}
116
117void kvm_arch_exit(void)
118{
119}
120
121/* Section: device related */
122long kvm_arch_dev_ioctl(struct file *filp,
123 unsigned int ioctl, unsigned long arg)
124{
125 if (ioctl == KVM_S390_ENABLE_SIE)
126 return s390_enable_sie();
127 return -EINVAL;
128}
129
130int kvm_dev_ioctl_check_extension(long ext)
131{
d7b0b5eb
CO
132 int r;
133
2bd0ac4e 134 switch (ext) {
d7b0b5eb 135 case KVM_CAP_S390_PSW:
b6cf8788 136 case KVM_CAP_S390_GMAP:
52e16b18 137 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
138#ifdef CONFIG_KVM_S390_UCONTROL
139 case KVM_CAP_S390_UCONTROL:
140#endif
60b413c9 141 case KVM_CAP_SYNC_REGS:
14eebd91 142 case KVM_CAP_ONE_REG:
d7b0b5eb
CO
143 r = 1;
144 break;
e726b1bd
CB
145 case KVM_CAP_NR_VCPUS:
146 case KVM_CAP_MAX_VCPUS:
147 r = KVM_MAX_VCPUS;
148 break;
1526bf9c
CB
149 case KVM_CAP_S390_COW:
150 r = sclp_get_fac85() & 0x2;
151 break;
2bd0ac4e 152 default:
d7b0b5eb 153 r = 0;
2bd0ac4e 154 }
d7b0b5eb 155 return r;
b0c632db
HC
156}
157
158/* Section: vm related */
159/*
160 * Get (and clear) the dirty memory log for a memory slot.
161 */
162int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
163 struct kvm_dirty_log *log)
164{
165 return 0;
166}
167
168long kvm_arch_vm_ioctl(struct file *filp,
169 unsigned int ioctl, unsigned long arg)
170{
171 struct kvm *kvm = filp->private_data;
172 void __user *argp = (void __user *)arg;
173 int r;
174
175 switch (ioctl) {
ba5c1e9b
CO
176 case KVM_S390_INTERRUPT: {
177 struct kvm_s390_interrupt s390int;
178
179 r = -EFAULT;
180 if (copy_from_user(&s390int, argp, sizeof(s390int)))
181 break;
182 r = kvm_s390_inject_vm(kvm, &s390int);
183 break;
184 }
b0c632db 185 default:
367e1319 186 r = -ENOTTY;
b0c632db
HC
187 }
188
189 return r;
190}
191
e08b9637 192int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 193{
b0c632db
HC
194 int rc;
195 char debug_name[16];
196
e08b9637
CO
197 rc = -EINVAL;
198#ifdef CONFIG_KVM_S390_UCONTROL
199 if (type & ~KVM_VM_S390_UCONTROL)
200 goto out_err;
201 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
202 goto out_err;
203#else
204 if (type)
205 goto out_err;
206#endif
207
b0c632db
HC
208 rc = s390_enable_sie();
209 if (rc)
d89f5eff 210 goto out_err;
b0c632db 211
b290411a
CO
212 rc = -ENOMEM;
213
b0c632db
HC
214 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
215 if (!kvm->arch.sca)
d89f5eff 216 goto out_err;
b0c632db
HC
217
218 sprintf(debug_name, "kvm-%u", current->pid);
219
220 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
221 if (!kvm->arch.dbf)
222 goto out_nodbf;
223
ba5c1e9b
CO
224 spin_lock_init(&kvm->arch.float_int.lock);
225 INIT_LIST_HEAD(&kvm->arch.float_int.list);
226
b0c632db
HC
227 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
228 VM_EVENT(kvm, 3, "%s", "vm created");
229
e08b9637
CO
230 if (type & KVM_VM_S390_UCONTROL) {
231 kvm->arch.gmap = NULL;
232 } else {
233 kvm->arch.gmap = gmap_alloc(current->mm);
234 if (!kvm->arch.gmap)
235 goto out_nogmap;
236 }
d89f5eff 237 return 0;
598841ca
CO
238out_nogmap:
239 debug_unregister(kvm->arch.dbf);
b0c632db
HC
240out_nodbf:
241 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
242out_err:
243 return rc;
b0c632db
HC
244}
245
d329c035
CB
246void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
247{
248 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 249 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
58f9460b
CO
250 if (!kvm_is_ucontrol(vcpu->kvm)) {
251 clear_bit(63 - vcpu->vcpu_id,
252 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
253 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
254 (__u64) vcpu->arch.sie_block)
255 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
256 }
abf4a71e 257 smp_mb();
27e0393f
CO
258
259 if (kvm_is_ucontrol(vcpu->kvm))
260 gmap_free(vcpu->arch.gmap);
261
d329c035 262 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 263 kvm_vcpu_uninit(vcpu);
d329c035
CB
264 kfree(vcpu);
265}
266
267static void kvm_free_vcpus(struct kvm *kvm)
268{
269 unsigned int i;
988a2cae 270 struct kvm_vcpu *vcpu;
d329c035 271
988a2cae
GN
272 kvm_for_each_vcpu(i, vcpu, kvm)
273 kvm_arch_vcpu_destroy(vcpu);
274
275 mutex_lock(&kvm->lock);
276 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
277 kvm->vcpus[i] = NULL;
278
279 atomic_set(&kvm->online_vcpus, 0);
280 mutex_unlock(&kvm->lock);
d329c035
CB
281}
282
ad8ba2cd
SY
283void kvm_arch_sync_events(struct kvm *kvm)
284{
285}
286
b0c632db
HC
287void kvm_arch_destroy_vm(struct kvm *kvm)
288{
d329c035 289 kvm_free_vcpus(kvm);
b0c632db 290 free_page((unsigned long)(kvm->arch.sca));
d329c035 291 debug_unregister(kvm->arch.dbf);
27e0393f
CO
292 if (!kvm_is_ucontrol(kvm))
293 gmap_free(kvm->arch.gmap);
b0c632db
HC
294}
295
296/* Section: vcpu related */
297int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
298{
27e0393f
CO
299 if (kvm_is_ucontrol(vcpu->kvm)) {
300 vcpu->arch.gmap = gmap_alloc(current->mm);
301 if (!vcpu->arch.gmap)
302 return -ENOMEM;
303 return 0;
304 }
305
598841ca 306 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
307 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
308 KVM_SYNC_GPRS |
9eed0735
CB
309 KVM_SYNC_ACRS |
310 KVM_SYNC_CRS;
b0c632db
HC
311 return 0;
312}
313
314void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
315{
6692cef3 316 /* Nothing todo */
b0c632db
HC
317}
318
319void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
320{
321 save_fp_regs(&vcpu->arch.host_fpregs);
322 save_access_regs(vcpu->arch.host_acrs);
323 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
324 restore_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 325 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 326 gmap_enable(vcpu->arch.gmap);
9e6dabef 327 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
328}
329
330void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
331{
9e6dabef 332 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 333 gmap_disable(vcpu->arch.gmap);
b0c632db 334 save_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 335 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
336 restore_fp_regs(&vcpu->arch.host_fpregs);
337 restore_access_regs(vcpu->arch.host_acrs);
338}
339
340static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
341{
342 /* this equals initial cpu reset in pop, but we don't switch to ESA */
343 vcpu->arch.sie_block->gpsw.mask = 0UL;
344 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 345 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
346 vcpu->arch.sie_block->cputm = 0UL;
347 vcpu->arch.sie_block->ckc = 0UL;
348 vcpu->arch.sie_block->todpr = 0;
349 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
350 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
351 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
352 vcpu->arch.guest_fpregs.fpc = 0;
353 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
354 vcpu->arch.sie_block->gbea = 1;
61bde82c 355 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
356}
357
358int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
359{
9e6dabef
CH
360 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
361 CPUSTAT_SM |
362 CPUSTAT_STOPPED);
fc34531d 363 vcpu->arch.sie_block->ecb = 6;
b0c632db 364 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 365 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
366 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
367 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
368 (unsigned long) vcpu);
369 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 370 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 371 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
372 return 0;
373}
374
375struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
376 unsigned int id)
377{
4d47555a
CO
378 struct kvm_vcpu *vcpu;
379 int rc = -EINVAL;
380
381 if (id >= KVM_MAX_VCPUS)
382 goto out;
383
384 rc = -ENOMEM;
b0c632db 385
4d47555a 386 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
b0c632db 387 if (!vcpu)
4d47555a 388 goto out;
b0c632db 389
180c12fb
CB
390 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
391 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
392
393 if (!vcpu->arch.sie_block)
394 goto out_free_cpu;
395
396 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
397 if (!kvm_is_ucontrol(kvm)) {
398 if (!kvm->arch.sca) {
399 WARN_ON_ONCE(1);
400 goto out_free_cpu;
401 }
402 if (!kvm->arch.sca->cpu[id].sda)
403 kvm->arch.sca->cpu[id].sda =
404 (__u64) vcpu->arch.sie_block;
405 vcpu->arch.sie_block->scaoh =
406 (__u32)(((__u64)kvm->arch.sca) >> 32);
407 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
408 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
409 }
b0c632db 410
ba5c1e9b
CO
411 spin_lock_init(&vcpu->arch.local_int.lock);
412 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
413 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 414 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
415 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
416 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 417 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 418 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 419
b0c632db
HC
420 rc = kvm_vcpu_init(vcpu, kvm, id);
421 if (rc)
7b06bf2f 422 goto out_free_sie_block;
b0c632db
HC
423 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
424 vcpu->arch.sie_block);
ade38c31 425 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 426
b0c632db 427 return vcpu;
7b06bf2f
WY
428out_free_sie_block:
429 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
430out_free_cpu:
431 kfree(vcpu);
4d47555a 432out:
b0c632db
HC
433 return ERR_PTR(rc);
434}
435
b0c632db
HC
436int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
437{
438 /* kvm common code refers to this, but never calls it */
439 BUG();
440 return 0;
441}
442
b6d33834
CD
443int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
444{
445 /* kvm common code refers to this, but never calls it */
446 BUG();
447 return 0;
448}
449
14eebd91
CO
450static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
451 struct kvm_one_reg *reg)
452{
453 int r = -EINVAL;
454
455 switch (reg->id) {
29b7c71b
CO
456 case KVM_REG_S390_TODPR:
457 r = put_user(vcpu->arch.sie_block->todpr,
458 (u32 __user *)reg->addr);
459 break;
460 case KVM_REG_S390_EPOCHDIFF:
461 r = put_user(vcpu->arch.sie_block->epoch,
462 (u64 __user *)reg->addr);
463 break;
46a6dd1c
J
464 case KVM_REG_S390_CPU_TIMER:
465 r = put_user(vcpu->arch.sie_block->cputm,
466 (u64 __user *)reg->addr);
467 break;
468 case KVM_REG_S390_CLOCK_COMP:
469 r = put_user(vcpu->arch.sie_block->ckc,
470 (u64 __user *)reg->addr);
471 break;
14eebd91
CO
472 default:
473 break;
474 }
475
476 return r;
477}
478
479static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
480 struct kvm_one_reg *reg)
481{
482 int r = -EINVAL;
483
484 switch (reg->id) {
29b7c71b
CO
485 case KVM_REG_S390_TODPR:
486 r = get_user(vcpu->arch.sie_block->todpr,
487 (u32 __user *)reg->addr);
488 break;
489 case KVM_REG_S390_EPOCHDIFF:
490 r = get_user(vcpu->arch.sie_block->epoch,
491 (u64 __user *)reg->addr);
492 break;
46a6dd1c
J
493 case KVM_REG_S390_CPU_TIMER:
494 r = get_user(vcpu->arch.sie_block->cputm,
495 (u64 __user *)reg->addr);
496 break;
497 case KVM_REG_S390_CLOCK_COMP:
498 r = get_user(vcpu->arch.sie_block->ckc,
499 (u64 __user *)reg->addr);
500 break;
14eebd91
CO
501 default:
502 break;
503 }
504
505 return r;
506}
b6d33834 507
b0c632db
HC
508static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
509{
b0c632db 510 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
511 return 0;
512}
513
514int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
515{
5a32c1af 516 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
517 return 0;
518}
519
520int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
521{
5a32c1af 522 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
523 return 0;
524}
525
526int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
527 struct kvm_sregs *sregs)
528{
59674c1a 529 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 530 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 531 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
532 return 0;
533}
534
535int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
536 struct kvm_sregs *sregs)
537{
59674c1a 538 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 539 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
540 return 0;
541}
542
543int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
544{
b0c632db 545 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
85175587 546 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
7eef87dc 547 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
548 return 0;
549}
550
551int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
552{
b0c632db
HC
553 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
554 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
555 return 0;
556}
557
558static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
559{
560 int rc = 0;
561
9e6dabef 562 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 563 rc = -EBUSY;
d7b0b5eb
CO
564 else {
565 vcpu->run->psw_mask = psw.mask;
566 vcpu->run->psw_addr = psw.addr;
567 }
b0c632db
HC
568 return rc;
569}
570
571int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
572 struct kvm_translation *tr)
573{
574 return -EINVAL; /* not implemented yet */
575}
576
d0bfb940
JK
577int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
578 struct kvm_guest_debug *dbg)
b0c632db
HC
579{
580 return -EINVAL; /* not implemented yet */
581}
582
62d9f0db
MT
583int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
584 struct kvm_mp_state *mp_state)
585{
586 return -EINVAL; /* not implemented yet */
587}
588
589int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
590 struct kvm_mp_state *mp_state)
591{
592 return -EINVAL; /* not implemented yet */
593}
594
e168bf8d 595static int __vcpu_run(struct kvm_vcpu *vcpu)
b0c632db 596{
e168bf8d
CO
597 int rc;
598
5a32c1af 599 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
600
601 if (need_resched())
602 schedule();
603
71cde587
CB
604 if (test_thread_flag(TIF_MCCK_PENDING))
605 s390_handle_mcck();
606
d6b6d166
CO
607 if (!kvm_is_ucontrol(vcpu->kvm))
608 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 609
b0c632db
HC
610 vcpu->arch.sie_block->icptcode = 0;
611 local_irq_disable();
612 kvm_guest_enter();
613 local_irq_enable();
614 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
615 atomic_read(&vcpu->arch.sie_block->cpuflags));
5786fffa
CH
616 trace_kvm_s390_sie_enter(vcpu,
617 atomic_read(&vcpu->arch.sie_block->cpuflags));
5a32c1af 618 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
e168bf8d
CO
619 if (rc) {
620 if (kvm_is_ucontrol(vcpu->kvm)) {
621 rc = SIE_INTERCEPT_UCONTROL;
622 } else {
623 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
5786fffa 624 trace_kvm_s390_sie_fault(vcpu);
e168bf8d
CO
625 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
626 rc = 0;
627 }
1f0d0f09 628 }
b0c632db
HC
629 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
630 vcpu->arch.sie_block->icptcode);
5786fffa 631 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
b0c632db
HC
632 local_irq_disable();
633 kvm_guest_exit();
634 local_irq_enable();
635
5a32c1af 636 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
e168bf8d 637 return rc;
b0c632db
HC
638}
639
640int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
641{
8f2abe6a 642 int rc;
b0c632db
HC
643 sigset_t sigsaved;
644
9ace903d 645rerun_vcpu:
b0c632db
HC
646 if (vcpu->sigset_active)
647 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
648
9e6dabef 649 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 650
ba5c1e9b
CO
651 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
652
8f2abe6a
CB
653 switch (kvm_run->exit_reason) {
654 case KVM_EXIT_S390_SIEIC:
8f2abe6a 655 case KVM_EXIT_UNKNOWN:
9ace903d 656 case KVM_EXIT_INTR:
8f2abe6a 657 case KVM_EXIT_S390_RESET:
e168bf8d 658 case KVM_EXIT_S390_UCONTROL:
8f2abe6a
CB
659 break;
660 default:
661 BUG();
662 }
663
d7b0b5eb
CO
664 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
665 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
666 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
667 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
668 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
669 }
9eed0735
CB
670 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
671 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
672 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
673 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
674 }
d7b0b5eb 675
dab4079d 676 might_fault();
8f2abe6a
CB
677
678 do {
e168bf8d
CO
679 rc = __vcpu_run(vcpu);
680 if (rc)
681 break;
c0d744a9
CO
682 if (kvm_is_ucontrol(vcpu->kvm))
683 rc = -EOPNOTSUPP;
684 else
685 rc = kvm_handle_sie_intercept(vcpu);
8f2abe6a
CB
686 } while (!signal_pending(current) && !rc);
687
9ace903d
CE
688 if (rc == SIE_INTERCEPT_RERUNVCPU)
689 goto rerun_vcpu;
690
b1d16c49
CE
691 if (signal_pending(current) && !rc) {
692 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 693 rc = -EINTR;
b1d16c49 694 }
8f2abe6a 695
e168bf8d
CO
696#ifdef CONFIG_KVM_S390_UCONTROL
697 if (rc == SIE_INTERCEPT_UCONTROL) {
698 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
699 kvm_run->s390_ucontrol.trans_exc_code =
700 current->thread.gmap_addr;
701 kvm_run->s390_ucontrol.pgm_code = 0x10;
702 rc = 0;
703 }
704#endif
705
b8e660b8 706 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
707 /* intercept cannot be handled in-kernel, prepare kvm-run */
708 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
709 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
710 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
711 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
712 rc = 0;
713 }
714
715 if (rc == -EREMOTE) {
716 /* intercept was handled, but userspace support is needed
717 * kvm_run has been prepared by the handler */
718 rc = 0;
719 }
b0c632db 720
d7b0b5eb
CO
721 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
722 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 723 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
9eed0735 724 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 725
b0c632db
HC
726 if (vcpu->sigset_active)
727 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
728
b0c632db 729 vcpu->stat.exit_userspace++;
7e8e6ab4 730 return rc;
b0c632db
HC
731}
732
092670cd 733static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
734 unsigned long n, int prefix)
735{
736 if (prefix)
737 return copy_to_guest(vcpu, guestdest, from, n);
738 else
739 return copy_to_guest_absolute(vcpu, guestdest, from, n);
740}
741
742/*
743 * store status at address
744 * we use have two special cases:
745 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
746 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
747 */
971eb77f 748int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 749{
092670cd 750 unsigned char archmode = 1;
b0c632db
HC
751 int prefix;
752
753 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
754 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
755 return -EFAULT;
756 addr = SAVE_AREA_BASE;
757 prefix = 0;
758 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
759 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
760 return -EFAULT;
761 addr = SAVE_AREA_BASE;
762 prefix = 1;
763 } else
764 prefix = 0;
765
f64ca217 766 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
767 vcpu->arch.guest_fpregs.fprs, 128, prefix))
768 return -EFAULT;
769
f64ca217 770 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
5a32c1af 771 vcpu->run->s.regs.gprs, 128, prefix))
b0c632db
HC
772 return -EFAULT;
773
f64ca217 774 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
775 &vcpu->arch.sie_block->gpsw, 16, prefix))
776 return -EFAULT;
777
f64ca217 778 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
779 &vcpu->arch.sie_block->prefix, 4, prefix))
780 return -EFAULT;
781
782 if (__guestcopy(vcpu,
f64ca217 783 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
784 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
785 return -EFAULT;
786
f64ca217 787 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
788 &vcpu->arch.sie_block->todpr, 4, prefix))
789 return -EFAULT;
790
f64ca217 791 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
792 &vcpu->arch.sie_block->cputm, 8, prefix))
793 return -EFAULT;
794
f64ca217 795 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
796 &vcpu->arch.sie_block->ckc, 8, prefix))
797 return -EFAULT;
798
f64ca217 799 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
59674c1a 800 &vcpu->run->s.regs.acrs, 64, prefix))
b0c632db
HC
801 return -EFAULT;
802
803 if (__guestcopy(vcpu,
f64ca217 804 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
805 &vcpu->arch.sie_block->gcr, 128, prefix))
806 return -EFAULT;
807 return 0;
808}
809
b0c632db
HC
810long kvm_arch_vcpu_ioctl(struct file *filp,
811 unsigned int ioctl, unsigned long arg)
812{
813 struct kvm_vcpu *vcpu = filp->private_data;
814 void __user *argp = (void __user *)arg;
bc923cc9 815 long r;
b0c632db 816
93736624
AK
817 switch (ioctl) {
818 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
819 struct kvm_s390_interrupt s390int;
820
93736624 821 r = -EFAULT;
ba5c1e9b 822 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
823 break;
824 r = kvm_s390_inject_vcpu(vcpu, &s390int);
825 break;
ba5c1e9b 826 }
b0c632db 827 case KVM_S390_STORE_STATUS:
bc923cc9
AK
828 r = kvm_s390_vcpu_store_status(vcpu, arg);
829 break;
b0c632db
HC
830 case KVM_S390_SET_INITIAL_PSW: {
831 psw_t psw;
832
bc923cc9 833 r = -EFAULT;
b0c632db 834 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
835 break;
836 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
837 break;
b0c632db
HC
838 }
839 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
840 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
841 break;
14eebd91
CO
842 case KVM_SET_ONE_REG:
843 case KVM_GET_ONE_REG: {
844 struct kvm_one_reg reg;
845 r = -EFAULT;
846 if (copy_from_user(&reg, argp, sizeof(reg)))
847 break;
848 if (ioctl == KVM_SET_ONE_REG)
849 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
850 else
851 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
852 break;
853 }
27e0393f
CO
854#ifdef CONFIG_KVM_S390_UCONTROL
855 case KVM_S390_UCAS_MAP: {
856 struct kvm_s390_ucas_mapping ucasmap;
857
858 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
859 r = -EFAULT;
860 break;
861 }
862
863 if (!kvm_is_ucontrol(vcpu->kvm)) {
864 r = -EINVAL;
865 break;
866 }
867
868 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
869 ucasmap.vcpu_addr, ucasmap.length);
870 break;
871 }
872 case KVM_S390_UCAS_UNMAP: {
873 struct kvm_s390_ucas_mapping ucasmap;
874
875 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
876 r = -EFAULT;
877 break;
878 }
879
880 if (!kvm_is_ucontrol(vcpu->kvm)) {
881 r = -EINVAL;
882 break;
883 }
884
885 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
886 ucasmap.length);
887 break;
888 }
889#endif
ccc7910f
CO
890 case KVM_S390_VCPU_FAULT: {
891 r = gmap_fault(arg, vcpu->arch.gmap);
892 if (!IS_ERR_VALUE(r))
893 r = 0;
894 break;
895 }
b0c632db 896 default:
3e6afcf1 897 r = -ENOTTY;
b0c632db 898 }
bc923cc9 899 return r;
b0c632db
HC
900}
901
5b1c1493
CO
902int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
903{
904#ifdef CONFIG_KVM_S390_UCONTROL
905 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
906 && (kvm_is_ucontrol(vcpu->kvm))) {
907 vmf->page = virt_to_page(vcpu->arch.sie_block);
908 get_page(vmf->page);
909 return 0;
910 }
911#endif
912 return VM_FAULT_SIGBUS;
913}
914
db3fe4eb
TY
915void kvm_arch_free_memslot(struct kvm_memory_slot *free,
916 struct kvm_memory_slot *dont)
917{
918}
919
920int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
921{
922 return 0;
923}
924
b0c632db 925/* Section: memory related */
f7784b8e
MT
926int kvm_arch_prepare_memory_region(struct kvm *kvm,
927 struct kvm_memory_slot *memslot,
928 struct kvm_memory_slot old,
929 struct kvm_userspace_memory_region *mem,
930 int user_alloc)
b0c632db
HC
931{
932 /* A few sanity checks. We can have exactly one memory slot which has
933 to start at guest virtual zero and which has to be located at a
934 page boundary in userland and which has to end at a page boundary.
935 The memory in userland is ok to be fragmented into various different
936 vmas. It is okay to mmap() and munmap() stuff in this slot after
937 doing this call at any time */
938
628eb9b8 939 if (mem->slot)
b0c632db
HC
940 return -EINVAL;
941
942 if (mem->guest_phys_addr)
943 return -EINVAL;
944
598841ca 945 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
946 return -EINVAL;
947
598841ca 948 if (mem->memory_size & 0xffffful)
b0c632db
HC
949 return -EINVAL;
950
2668dab7
CO
951 if (!user_alloc)
952 return -EINVAL;
953
f7784b8e
MT
954 return 0;
955}
956
957void kvm_arch_commit_memory_region(struct kvm *kvm,
958 struct kvm_userspace_memory_region *mem,
959 struct kvm_memory_slot old,
960 int user_alloc)
961{
f7850c92 962 int rc;
f7784b8e 963
598841ca
CO
964
965 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
966 mem->guest_phys_addr, mem->memory_size);
967 if (rc)
f7850c92 968 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 969 return;
b0c632db
HC
970}
971
34d4cb8f
MT
972void kvm_arch_flush_shadow(struct kvm *kvm)
973{
974}
975
b0c632db
HC
976static int __init kvm_s390_init(void)
977{
ef50f7ac 978 int ret;
0ee75bea 979 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
980 if (ret)
981 return ret;
982
983 /*
984 * guests can ask for up to 255+1 double words, we need a full page
25985edc 985 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
986 * only set facilities that are known to work in KVM.
987 */
c2f0e8c8 988 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
989 if (!facilities) {
990 kvm_exit();
991 return -ENOMEM;
992 }
14375bc4 993 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 994 facilities[0] &= 0xff00fff3f47c0000ULL;
9950f8be 995 facilities[1] &= 0x201c000000000000ULL;
ef50f7ac 996 return 0;
b0c632db
HC
997}
998
999static void __exit kvm_s390_exit(void)
1000{
ef50f7ac 1001 free_page((unsigned long) facilities);
b0c632db
HC
1002 kvm_exit();
1003}
1004
1005module_init(kvm_s390_init);
1006module_exit(kvm_s390_exit);
This page took 0.345746 seconds and 5 git commands to generate.