KVM: ia64: Correct itc_offset calculations
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 */
14
15#include <linux/compiler.h>
16#include <linux/err.h>
17#include <linux/fs.h>
ca872302 18#include <linux/hrtimer.h>
b0c632db
HC
19#include <linux/init.h>
20#include <linux/kvm.h>
21#include <linux/kvm_host.h>
22#include <linux/module.h>
23#include <linux/slab.h>
ba5c1e9b 24#include <linux/timer.h>
b0c632db
HC
25#include <asm/lowcore.h>
26#include <asm/pgtable.h>
f5daba1d 27#include <asm/nmi.h>
ef50f7ac 28#include <asm/system.h>
8f2abe6a 29#include "kvm-s390.h"
b0c632db
HC
30#include "gaccess.h"
31
32#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
33
34struct kvm_stats_debugfs_item debugfs_entries[] = {
35 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 36 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
37 { "exit_validity", VCPU_STAT(exit_validity) },
38 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
39 { "exit_external_request", VCPU_STAT(exit_external_request) },
40 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
41 { "exit_instruction", VCPU_STAT(exit_instruction) },
42 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
43 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 44 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
45 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
46 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
47 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
48 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
49 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
50 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
51 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
52 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
53 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
54 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
55 { "instruction_spx", VCPU_STAT(instruction_spx) },
56 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
57 { "instruction_stap", VCPU_STAT(instruction_stap) },
58 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
59 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
60 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
61 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
62 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
5288fbf0
CB
63 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
64 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
65 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
66 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
67 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
68 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
e28acfea 69 { "diagnose_44", VCPU_STAT(diagnose_44) },
b0c632db
HC
70 { NULL }
71};
72
ef50f7ac 73static unsigned long long *facilities;
b0c632db
HC
74
75/* Section: not file related */
76void kvm_arch_hardware_enable(void *garbage)
77{
78 /* every s390 is virtualization enabled ;-) */
79}
80
81void kvm_arch_hardware_disable(void *garbage)
82{
83}
84
b0c632db
HC
85int kvm_arch_hardware_setup(void)
86{
87 return 0;
88}
89
90void kvm_arch_hardware_unsetup(void)
91{
92}
93
94void kvm_arch_check_processor_compat(void *rtn)
95{
96}
97
98int kvm_arch_init(void *opaque)
99{
100 return 0;
101}
102
103void kvm_arch_exit(void)
104{
105}
106
107/* Section: device related */
108long kvm_arch_dev_ioctl(struct file *filp,
109 unsigned int ioctl, unsigned long arg)
110{
111 if (ioctl == KVM_S390_ENABLE_SIE)
112 return s390_enable_sie();
113 return -EINVAL;
114}
115
116int kvm_dev_ioctl_check_extension(long ext)
117{
2bd0ac4e 118 switch (ext) {
2bd0ac4e
CO
119 default:
120 return 0;
121 }
b0c632db
HC
122}
123
124/* Section: vm related */
125/*
126 * Get (and clear) the dirty memory log for a memory slot.
127 */
128int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
129 struct kvm_dirty_log *log)
130{
131 return 0;
132}
133
134long kvm_arch_vm_ioctl(struct file *filp,
135 unsigned int ioctl, unsigned long arg)
136{
137 struct kvm *kvm = filp->private_data;
138 void __user *argp = (void __user *)arg;
139 int r;
140
141 switch (ioctl) {
ba5c1e9b
CO
142 case KVM_S390_INTERRUPT: {
143 struct kvm_s390_interrupt s390int;
144
145 r = -EFAULT;
146 if (copy_from_user(&s390int, argp, sizeof(s390int)))
147 break;
148 r = kvm_s390_inject_vm(kvm, &s390int);
149 break;
150 }
b0c632db
HC
151 default:
152 r = -EINVAL;
153 }
154
155 return r;
156}
157
158struct kvm *kvm_arch_create_vm(void)
159{
160 struct kvm *kvm;
161 int rc;
162 char debug_name[16];
163
164 rc = s390_enable_sie();
165 if (rc)
166 goto out_nokvm;
167
168 rc = -ENOMEM;
169 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
170 if (!kvm)
171 goto out_nokvm;
172
173 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
174 if (!kvm->arch.sca)
175 goto out_nosca;
176
177 sprintf(debug_name, "kvm-%u", current->pid);
178
179 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
180 if (!kvm->arch.dbf)
181 goto out_nodbf;
182
ba5c1e9b
CO
183 spin_lock_init(&kvm->arch.float_int.lock);
184 INIT_LIST_HEAD(&kvm->arch.float_int.list);
185
b0c632db
HC
186 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
187 VM_EVENT(kvm, 3, "%s", "vm created");
188
b0c632db
HC
189 return kvm;
190out_nodbf:
191 free_page((unsigned long)(kvm->arch.sca));
192out_nosca:
193 kfree(kvm);
194out_nokvm:
195 return ERR_PTR(rc);
196}
197
d329c035
CB
198void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
199{
200 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
abf4a71e
CO
201 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
202 (__u64) vcpu->arch.sie_block)
203 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
204 smp_mb();
d329c035 205 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 206 kvm_vcpu_uninit(vcpu);
d329c035
CB
207 kfree(vcpu);
208}
209
210static void kvm_free_vcpus(struct kvm *kvm)
211{
212 unsigned int i;
213
214 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
215 if (kvm->vcpus[i]) {
216 kvm_arch_vcpu_destroy(kvm->vcpus[i]);
217 kvm->vcpus[i] = NULL;
218 }
219 }
220}
221
ad8ba2cd
SY
222void kvm_arch_sync_events(struct kvm *kvm)
223{
224}
225
b0c632db
HC
226void kvm_arch_destroy_vm(struct kvm *kvm)
227{
d329c035 228 kvm_free_vcpus(kvm);
dfdded7c 229 kvm_free_physmem(kvm);
b0c632db 230 free_page((unsigned long)(kvm->arch.sca));
d329c035 231 debug_unregister(kvm->arch.dbf);
b0c632db 232 kfree(kvm);
b0c632db
HC
233}
234
235/* Section: vcpu related */
236int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
237{
238 return 0;
239}
240
241void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
242{
6692cef3 243 /* Nothing todo */
b0c632db
HC
244}
245
246void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
247{
248 save_fp_regs(&vcpu->arch.host_fpregs);
249 save_access_regs(vcpu->arch.host_acrs);
250 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
251 restore_fp_regs(&vcpu->arch.guest_fpregs);
252 restore_access_regs(vcpu->arch.guest_acrs);
b0c632db
HC
253}
254
255void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
256{
257 save_fp_regs(&vcpu->arch.guest_fpregs);
258 save_access_regs(vcpu->arch.guest_acrs);
259 restore_fp_regs(&vcpu->arch.host_fpregs);
260 restore_access_regs(vcpu->arch.host_acrs);
261}
262
263static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
264{
265 /* this equals initial cpu reset in pop, but we don't switch to ESA */
266 vcpu->arch.sie_block->gpsw.mask = 0UL;
267 vcpu->arch.sie_block->gpsw.addr = 0UL;
268 vcpu->arch.sie_block->prefix = 0UL;
269 vcpu->arch.sie_block->ihcpu = 0xffff;
270 vcpu->arch.sie_block->cputm = 0UL;
271 vcpu->arch.sie_block->ckc = 0UL;
272 vcpu->arch.sie_block->todpr = 0;
273 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
274 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
275 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
276 vcpu->arch.guest_fpregs.fpc = 0;
277 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
278 vcpu->arch.sie_block->gbea = 1;
279}
280
4da29e90
CB
281/* The current code can have up to 256 pages for virtio */
282#define VIRTIODESCSPACE (256ul * 4096ul)
283
b0c632db
HC
284int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
285{
286 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
4da29e90
CB
287 vcpu->arch.sie_block->gmslm = vcpu->kvm->arch.guest_memsize +
288 vcpu->kvm->arch.guest_origin +
289 VIRTIODESCSPACE - 1ul;
290 vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin;
b0c632db
HC
291 vcpu->arch.sie_block->ecb = 2;
292 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 293 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
294 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
295 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
296 (unsigned long) vcpu);
297 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 298 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 299 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
300 return 0;
301}
302
303struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
304 unsigned int id)
305{
306 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
307 int rc = -ENOMEM;
308
309 if (!vcpu)
310 goto out_nomem;
311
180c12fb
CB
312 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
313 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
314
315 if (!vcpu->arch.sie_block)
316 goto out_free_cpu;
317
318 vcpu->arch.sie_block->icpua = id;
319 BUG_ON(!kvm->arch.sca);
abf4a71e
CO
320 if (!kvm->arch.sca->cpu[id].sda)
321 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
322 else
323 BUG_ON(!kvm->vcpus[id]); /* vcpu does already exist */
b0c632db
HC
324 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
325 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
326
ba5c1e9b
CO
327 spin_lock_init(&vcpu->arch.local_int.lock);
328 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
329 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 330 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
331 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
332 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 333 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 334 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 335
b0c632db
HC
336 rc = kvm_vcpu_init(vcpu, kvm, id);
337 if (rc)
338 goto out_free_cpu;
339 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
340 vcpu->arch.sie_block);
341
b0c632db
HC
342 return vcpu;
343out_free_cpu:
344 kfree(vcpu);
345out_nomem:
346 return ERR_PTR(rc);
347}
348
b0c632db
HC
349int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
350{
351 /* kvm common code refers to this, but never calls it */
352 BUG();
353 return 0;
354}
355
356static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
357{
358 vcpu_load(vcpu);
359 kvm_s390_vcpu_initial_reset(vcpu);
360 vcpu_put(vcpu);
361 return 0;
362}
363
364int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
365{
366 vcpu_load(vcpu);
367 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
368 vcpu_put(vcpu);
369 return 0;
370}
371
372int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
373{
374 vcpu_load(vcpu);
375 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
376 vcpu_put(vcpu);
377 return 0;
378}
379
380int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
381 struct kvm_sregs *sregs)
382{
383 vcpu_load(vcpu);
384 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
385 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
386 vcpu_put(vcpu);
387 return 0;
388}
389
390int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
391 struct kvm_sregs *sregs)
392{
393 vcpu_load(vcpu);
394 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
395 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
396 vcpu_put(vcpu);
397 return 0;
398}
399
400int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
401{
402 vcpu_load(vcpu);
403 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
404 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
405 vcpu_put(vcpu);
406 return 0;
407}
408
409int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
410{
411 vcpu_load(vcpu);
412 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
413 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
414 vcpu_put(vcpu);
415 return 0;
416}
417
418static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
419{
420 int rc = 0;
421
422 vcpu_load(vcpu);
423 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
424 rc = -EBUSY;
425 else
426 vcpu->arch.sie_block->gpsw = psw;
427 vcpu_put(vcpu);
428 return rc;
429}
430
431int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
432 struct kvm_translation *tr)
433{
434 return -EINVAL; /* not implemented yet */
435}
436
d0bfb940
JK
437int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
438 struct kvm_guest_debug *dbg)
b0c632db
HC
439{
440 return -EINVAL; /* not implemented yet */
441}
442
62d9f0db
MT
443int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
444 struct kvm_mp_state *mp_state)
445{
446 return -EINVAL; /* not implemented yet */
447}
448
449int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
450 struct kvm_mp_state *mp_state)
451{
452 return -EINVAL; /* not implemented yet */
453}
454
b0c632db
HC
455static void __vcpu_run(struct kvm_vcpu *vcpu)
456{
457 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
458
459 if (need_resched())
460 schedule();
461
71cde587
CB
462 if (test_thread_flag(TIF_MCCK_PENDING))
463 s390_handle_mcck();
464
0ff31867
CO
465 kvm_s390_deliver_pending_interrupts(vcpu);
466
b0c632db
HC
467 vcpu->arch.sie_block->icptcode = 0;
468 local_irq_disable();
469 kvm_guest_enter();
470 local_irq_enable();
471 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
472 atomic_read(&vcpu->arch.sie_block->cpuflags));
1f0d0f09
CO
473 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
474 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
475 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
476 }
b0c632db
HC
477 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
478 vcpu->arch.sie_block->icptcode);
479 local_irq_disable();
480 kvm_guest_exit();
481 local_irq_enable();
482
483 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
484}
485
486int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
487{
8f2abe6a 488 int rc;
b0c632db
HC
489 sigset_t sigsaved;
490
491 vcpu_load(vcpu);
492
51e4d5ab
CO
493 /* verify, that memory has been registered */
494 if (!vcpu->kvm->arch.guest_memsize) {
495 vcpu_put(vcpu);
496 return -EINVAL;
497 }
498
b0c632db
HC
499 if (vcpu->sigset_active)
500 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
501
502 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
503
ba5c1e9b
CO
504 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
505
8f2abe6a
CB
506 switch (kvm_run->exit_reason) {
507 case KVM_EXIT_S390_SIEIC:
508 vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
509 vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
510 break;
511 case KVM_EXIT_UNKNOWN:
512 case KVM_EXIT_S390_RESET:
513 break;
514 default:
515 BUG();
516 }
517
dab4079d 518 might_fault();
8f2abe6a
CB
519
520 do {
521 __vcpu_run(vcpu);
8f2abe6a
CB
522 rc = kvm_handle_sie_intercept(vcpu);
523 } while (!signal_pending(current) && !rc);
524
525 if (signal_pending(current) && !rc)
526 rc = -EINTR;
527
528 if (rc == -ENOTSUPP) {
529 /* intercept cannot be handled in-kernel, prepare kvm-run */
530 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
531 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
532 kvm_run->s390_sieic.mask = vcpu->arch.sie_block->gpsw.mask;
533 kvm_run->s390_sieic.addr = vcpu->arch.sie_block->gpsw.addr;
534 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
535 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
536 rc = 0;
537 }
538
539 if (rc == -EREMOTE) {
540 /* intercept was handled, but userspace support is needed
541 * kvm_run has been prepared by the handler */
542 rc = 0;
543 }
b0c632db
HC
544
545 if (vcpu->sigset_active)
546 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
547
548 vcpu_put(vcpu);
549
550 vcpu->stat.exit_userspace++;
7e8e6ab4 551 return rc;
b0c632db
HC
552}
553
554static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
555 unsigned long n, int prefix)
556{
557 if (prefix)
558 return copy_to_guest(vcpu, guestdest, from, n);
559 else
560 return copy_to_guest_absolute(vcpu, guestdest, from, n);
561}
562
563/*
564 * store status at address
565 * we use have two special cases:
566 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
567 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
568 */
569int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
570{
571 const unsigned char archmode = 1;
572 int prefix;
573
574 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
575 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
576 return -EFAULT;
577 addr = SAVE_AREA_BASE;
578 prefix = 0;
579 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
580 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
581 return -EFAULT;
582 addr = SAVE_AREA_BASE;
583 prefix = 1;
584 } else
585 prefix = 0;
586
587 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
588 vcpu->arch.guest_fpregs.fprs, 128, prefix))
589 return -EFAULT;
590
591 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
592 vcpu->arch.guest_gprs, 128, prefix))
593 return -EFAULT;
594
595 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
596 &vcpu->arch.sie_block->gpsw, 16, prefix))
597 return -EFAULT;
598
599 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
600 &vcpu->arch.sie_block->prefix, 4, prefix))
601 return -EFAULT;
602
603 if (__guestcopy(vcpu,
604 addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
605 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
606 return -EFAULT;
607
608 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
609 &vcpu->arch.sie_block->todpr, 4, prefix))
610 return -EFAULT;
611
612 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
613 &vcpu->arch.sie_block->cputm, 8, prefix))
614 return -EFAULT;
615
616 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
617 &vcpu->arch.sie_block->ckc, 8, prefix))
618 return -EFAULT;
619
620 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
621 &vcpu->arch.guest_acrs, 64, prefix))
622 return -EFAULT;
623
624 if (__guestcopy(vcpu,
625 addr + offsetof(struct save_area_s390x, ctrl_regs),
626 &vcpu->arch.sie_block->gcr, 128, prefix))
627 return -EFAULT;
628 return 0;
629}
630
631static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
632{
633 int rc;
634
635 vcpu_load(vcpu);
636 rc = __kvm_s390_vcpu_store_status(vcpu, addr);
637 vcpu_put(vcpu);
638 return rc;
639}
640
641long kvm_arch_vcpu_ioctl(struct file *filp,
642 unsigned int ioctl, unsigned long arg)
643{
644 struct kvm_vcpu *vcpu = filp->private_data;
645 void __user *argp = (void __user *)arg;
646
647 switch (ioctl) {
ba5c1e9b
CO
648 case KVM_S390_INTERRUPT: {
649 struct kvm_s390_interrupt s390int;
650
651 if (copy_from_user(&s390int, argp, sizeof(s390int)))
652 return -EFAULT;
653 return kvm_s390_inject_vcpu(vcpu, &s390int);
654 }
b0c632db
HC
655 case KVM_S390_STORE_STATUS:
656 return kvm_s390_vcpu_store_status(vcpu, arg);
657 case KVM_S390_SET_INITIAL_PSW: {
658 psw_t psw;
659
660 if (copy_from_user(&psw, argp, sizeof(psw)))
661 return -EFAULT;
662 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
663 }
664 case KVM_S390_INITIAL_RESET:
665 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
666 default:
667 ;
668 }
669 return -EINVAL;
670}
671
672/* Section: memory related */
673int kvm_arch_set_memory_region(struct kvm *kvm,
674 struct kvm_userspace_memory_region *mem,
675 struct kvm_memory_slot old,
676 int user_alloc)
677{
2668dab7
CO
678 int i;
679
b0c632db
HC
680 /* A few sanity checks. We can have exactly one memory slot which has
681 to start at guest virtual zero and which has to be located at a
682 page boundary in userland and which has to end at a page boundary.
683 The memory in userland is ok to be fragmented into various different
684 vmas. It is okay to mmap() and munmap() stuff in this slot after
685 doing this call at any time */
686
2668dab7 687 if (mem->slot || kvm->arch.guest_memsize)
b0c632db
HC
688 return -EINVAL;
689
690 if (mem->guest_phys_addr)
691 return -EINVAL;
692
693 if (mem->userspace_addr & (PAGE_SIZE - 1))
694 return -EINVAL;
695
696 if (mem->memory_size & (PAGE_SIZE - 1))
697 return -EINVAL;
698
2668dab7
CO
699 if (!user_alloc)
700 return -EINVAL;
701
702 /* lock all vcpus */
703 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
704 if (!kvm->vcpus[i])
705 continue;
706 if (!mutex_trylock(&kvm->vcpus[i]->mutex))
707 goto fail_out;
708 }
709
b0c632db
HC
710 kvm->arch.guest_origin = mem->userspace_addr;
711 kvm->arch.guest_memsize = mem->memory_size;
712
2668dab7
CO
713 /* update sie control blocks, and unlock all vcpus */
714 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
715 if (kvm->vcpus[i]) {
716 kvm->vcpus[i]->arch.sie_block->gmsor =
717 kvm->arch.guest_origin;
718 kvm->vcpus[i]->arch.sie_block->gmslm =
719 kvm->arch.guest_memsize +
720 kvm->arch.guest_origin +
721 VIRTIODESCSPACE - 1ul;
722 mutex_unlock(&kvm->vcpus[i]->mutex);
723 }
724 }
b0c632db
HC
725
726 return 0;
2668dab7
CO
727
728fail_out:
729 for (; i >= 0; i--)
730 mutex_unlock(&kvm->vcpus[i]->mutex);
731 return -EINVAL;
b0c632db
HC
732}
733
34d4cb8f
MT
734void kvm_arch_flush_shadow(struct kvm *kvm)
735{
736}
737
b0c632db
HC
738gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
739{
740 return gfn;
741}
742
743static int __init kvm_s390_init(void)
744{
ef50f7ac
CB
745 int ret;
746 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
747 if (ret)
748 return ret;
749
750 /*
751 * guests can ask for up to 255+1 double words, we need a full page
752 * to hold the maximum amount of facilites. On the other hand, we
753 * only set facilities that are known to work in KVM.
754 */
755 facilities = (unsigned long long *) get_zeroed_page(GFP_DMA);
756 if (!facilities) {
757 kvm_exit();
758 return -ENOMEM;
759 }
760 stfle(facilities, 1);
761 facilities[0] &= 0xff00fff3f0700000ULL;
762 return 0;
b0c632db
HC
763}
764
765static void __exit kvm_s390_exit(void)
766{
ef50f7ac 767 free_page((unsigned long) facilities);
b0c632db
HC
768 kvm_exit();
769}
770
771module_init(kvm_s390_init);
772module_exit(kvm_s390_exit);
This page took 0.168593 seconds and 5 git commands to generate.