2 * s390host.c -- hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008,2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
30 #include <asm/system.h>
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
36 struct kvm_stats_debugfs_item debugfs_entries
[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace
) },
38 { "exit_null", VCPU_STAT(exit_null
) },
39 { "exit_validity", VCPU_STAT(exit_validity
) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request
) },
41 { "exit_external_request", VCPU_STAT(exit_external_request
) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt
) },
43 { "exit_instruction", VCPU_STAT(exit_instruction
) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption
) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program
) },
46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg
) },
47 { "instruction_lctl", VCPU_STAT(instruction_lctl
) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal
) },
49 { "deliver_service_signal", VCPU_STAT(deliver_service_signal
) },
50 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt
) },
51 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal
) },
52 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal
) },
53 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal
) },
54 { "deliver_program_interruption", VCPU_STAT(deliver_program_int
) },
55 { "exit_wait_state", VCPU_STAT(exit_wait_state
) },
56 { "instruction_stidp", VCPU_STAT(instruction_stidp
) },
57 { "instruction_spx", VCPU_STAT(instruction_spx
) },
58 { "instruction_stpx", VCPU_STAT(instruction_stpx
) },
59 { "instruction_stap", VCPU_STAT(instruction_stap
) },
60 { "instruction_storage_key", VCPU_STAT(instruction_storage_key
) },
61 { "instruction_stsch", VCPU_STAT(instruction_stsch
) },
62 { "instruction_chsc", VCPU_STAT(instruction_chsc
) },
63 { "instruction_stsi", VCPU_STAT(instruction_stsi
) },
64 { "instruction_stfl", VCPU_STAT(instruction_stfl
) },
65 { "instruction_tprot", VCPU_STAT(instruction_tprot
) },
66 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense
) },
67 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency
) },
68 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop
) },
69 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch
) },
70 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix
) },
71 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart
) },
72 { "diagnose_44", VCPU_STAT(diagnose_44
) },
76 static unsigned long long *facilities
;
78 /* Section: not file related */
79 int kvm_arch_hardware_enable(void *garbage
)
81 /* every s390 is virtualization enabled ;-) */
85 void kvm_arch_hardware_disable(void *garbage
)
89 int kvm_arch_hardware_setup(void)
94 void kvm_arch_hardware_unsetup(void)
98 void kvm_arch_check_processor_compat(void *rtn
)
102 int kvm_arch_init(void *opaque
)
107 void kvm_arch_exit(void)
111 /* Section: device related */
112 long kvm_arch_dev_ioctl(struct file
*filp
,
113 unsigned int ioctl
, unsigned long arg
)
115 if (ioctl
== KVM_S390_ENABLE_SIE
)
116 return s390_enable_sie();
120 int kvm_dev_ioctl_check_extension(long ext
)
125 case KVM_CAP_S390_PSW
:
126 case KVM_CAP_S390_GMAP
:
135 /* Section: vm related */
137 * Get (and clear) the dirty memory log for a memory slot.
139 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
140 struct kvm_dirty_log
*log
)
145 long kvm_arch_vm_ioctl(struct file
*filp
,
146 unsigned int ioctl
, unsigned long arg
)
148 struct kvm
*kvm
= filp
->private_data
;
149 void __user
*argp
= (void __user
*)arg
;
153 case KVM_S390_INTERRUPT
: {
154 struct kvm_s390_interrupt s390int
;
157 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
159 r
= kvm_s390_inject_vm(kvm
, &s390int
);
169 int kvm_arch_init_vm(struct kvm
*kvm
)
174 rc
= s390_enable_sie();
180 kvm
->arch
.sca
= (struct sca_block
*) get_zeroed_page(GFP_KERNEL
);
184 sprintf(debug_name
, "kvm-%u", current
->pid
);
186 kvm
->arch
.dbf
= debug_register(debug_name
, 8, 2, 8 * sizeof(long));
190 spin_lock_init(&kvm
->arch
.float_int
.lock
);
191 INIT_LIST_HEAD(&kvm
->arch
.float_int
.list
);
193 debug_register_view(kvm
->arch
.dbf
, &debug_sprintf_view
);
194 VM_EVENT(kvm
, 3, "%s", "vm created");
196 kvm
->arch
.gmap
= gmap_alloc(current
->mm
);
202 debug_unregister(kvm
->arch
.dbf
);
204 free_page((unsigned long)(kvm
->arch
.sca
));
209 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
211 VCPU_EVENT(vcpu
, 3, "%s", "free cpu");
212 clear_bit(63 - vcpu
->vcpu_id
, (unsigned long *) &vcpu
->kvm
->arch
.sca
->mcn
);
213 if (vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sda
==
214 (__u64
) vcpu
->arch
.sie_block
)
215 vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sda
= 0;
217 free_page((unsigned long)(vcpu
->arch
.sie_block
));
218 kvm_vcpu_uninit(vcpu
);
222 static void kvm_free_vcpus(struct kvm
*kvm
)
225 struct kvm_vcpu
*vcpu
;
227 kvm_for_each_vcpu(i
, vcpu
, kvm
)
228 kvm_arch_vcpu_destroy(vcpu
);
230 mutex_lock(&kvm
->lock
);
231 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
232 kvm
->vcpus
[i
] = NULL
;
234 atomic_set(&kvm
->online_vcpus
, 0);
235 mutex_unlock(&kvm
->lock
);
238 void kvm_arch_sync_events(struct kvm
*kvm
)
242 void kvm_arch_destroy_vm(struct kvm
*kvm
)
245 free_page((unsigned long)(kvm
->arch
.sca
));
246 debug_unregister(kvm
->arch
.dbf
);
247 gmap_free(kvm
->arch
.gmap
);
250 /* Section: vcpu related */
251 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
253 vcpu
->arch
.gmap
= vcpu
->kvm
->arch
.gmap
;
257 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
262 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
264 save_fp_regs(&vcpu
->arch
.host_fpregs
);
265 save_access_regs(vcpu
->arch
.host_acrs
);
266 vcpu
->arch
.guest_fpregs
.fpc
&= FPC_VALID_MASK
;
267 restore_fp_regs(&vcpu
->arch
.guest_fpregs
);
268 restore_access_regs(vcpu
->arch
.guest_acrs
);
269 gmap_enable(vcpu
->arch
.gmap
);
272 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
274 gmap_disable(vcpu
->arch
.gmap
);
275 save_fp_regs(&vcpu
->arch
.guest_fpregs
);
276 save_access_regs(vcpu
->arch
.guest_acrs
);
277 restore_fp_regs(&vcpu
->arch
.host_fpregs
);
278 restore_access_regs(vcpu
->arch
.host_acrs
);
281 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu
*vcpu
)
283 /* this equals initial cpu reset in pop, but we don't switch to ESA */
284 vcpu
->arch
.sie_block
->gpsw
.mask
= 0UL;
285 vcpu
->arch
.sie_block
->gpsw
.addr
= 0UL;
286 vcpu
->arch
.sie_block
->prefix
= 0UL;
287 vcpu
->arch
.sie_block
->ihcpu
= 0xffff;
288 vcpu
->arch
.sie_block
->cputm
= 0UL;
289 vcpu
->arch
.sie_block
->ckc
= 0UL;
290 vcpu
->arch
.sie_block
->todpr
= 0;
291 memset(vcpu
->arch
.sie_block
->gcr
, 0, 16 * sizeof(__u64
));
292 vcpu
->arch
.sie_block
->gcr
[0] = 0xE0UL
;
293 vcpu
->arch
.sie_block
->gcr
[14] = 0xC2000000UL
;
294 vcpu
->arch
.guest_fpregs
.fpc
= 0;
295 asm volatile("lfpc %0" : : "Q" (vcpu
->arch
.guest_fpregs
.fpc
));
296 vcpu
->arch
.sie_block
->gbea
= 1;
299 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
301 atomic_set(&vcpu
->arch
.sie_block
->cpuflags
, CPUSTAT_ZARCH
| CPUSTAT_SM
);
302 vcpu
->arch
.sie_block
->ecb
= 6;
303 vcpu
->arch
.sie_block
->eca
= 0xC1002001U
;
304 vcpu
->arch
.sie_block
->fac
= (int) (long) facilities
;
305 hrtimer_init(&vcpu
->arch
.ckc_timer
, CLOCK_REALTIME
, HRTIMER_MODE_ABS
);
306 tasklet_init(&vcpu
->arch
.tasklet
, kvm_s390_tasklet
,
307 (unsigned long) vcpu
);
308 vcpu
->arch
.ckc_timer
.function
= kvm_s390_idle_wakeup
;
309 get_cpu_id(&vcpu
->arch
.cpu_id
);
310 vcpu
->arch
.cpu_id
.version
= 0xff;
314 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
,
317 struct kvm_vcpu
*vcpu
;
320 if (id
>= KVM_MAX_VCPUS
)
325 vcpu
= kzalloc(sizeof(struct kvm_vcpu
), GFP_KERNEL
);
329 vcpu
->arch
.sie_block
= (struct kvm_s390_sie_block
*)
330 get_zeroed_page(GFP_KERNEL
);
332 if (!vcpu
->arch
.sie_block
)
335 vcpu
->arch
.sie_block
->icpua
= id
;
336 BUG_ON(!kvm
->arch
.sca
);
337 if (!kvm
->arch
.sca
->cpu
[id
].sda
)
338 kvm
->arch
.sca
->cpu
[id
].sda
= (__u64
) vcpu
->arch
.sie_block
;
339 vcpu
->arch
.sie_block
->scaoh
= (__u32
)(((__u64
)kvm
->arch
.sca
) >> 32);
340 vcpu
->arch
.sie_block
->scaol
= (__u32
)(__u64
)kvm
->arch
.sca
;
341 set_bit(63 - id
, (unsigned long *) &kvm
->arch
.sca
->mcn
);
343 spin_lock_init(&vcpu
->arch
.local_int
.lock
);
344 INIT_LIST_HEAD(&vcpu
->arch
.local_int
.list
);
345 vcpu
->arch
.local_int
.float_int
= &kvm
->arch
.float_int
;
346 spin_lock(&kvm
->arch
.float_int
.lock
);
347 kvm
->arch
.float_int
.local_int
[id
] = &vcpu
->arch
.local_int
;
348 init_waitqueue_head(&vcpu
->arch
.local_int
.wq
);
349 vcpu
->arch
.local_int
.cpuflags
= &vcpu
->arch
.sie_block
->cpuflags
;
350 spin_unlock(&kvm
->arch
.float_int
.lock
);
352 rc
= kvm_vcpu_init(vcpu
, kvm
, id
);
354 goto out_free_sie_block
;
355 VM_EVENT(kvm
, 3, "create cpu %d at %p, sie block at %p", id
, vcpu
,
356 vcpu
->arch
.sie_block
);
360 free_page((unsigned long)(vcpu
->arch
.sie_block
));
367 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
369 /* kvm common code refers to this, but never calls it */
374 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu
*vcpu
)
376 kvm_s390_vcpu_initial_reset(vcpu
);
380 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
382 memcpy(&vcpu
->arch
.guest_gprs
, ®s
->gprs
, sizeof(regs
->gprs
));
386 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
388 memcpy(®s
->gprs
, &vcpu
->arch
.guest_gprs
, sizeof(regs
->gprs
));
392 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
393 struct kvm_sregs
*sregs
)
395 memcpy(&vcpu
->arch
.guest_acrs
, &sregs
->acrs
, sizeof(sregs
->acrs
));
396 memcpy(&vcpu
->arch
.sie_block
->gcr
, &sregs
->crs
, sizeof(sregs
->crs
));
400 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
401 struct kvm_sregs
*sregs
)
403 memcpy(&sregs
->acrs
, &vcpu
->arch
.guest_acrs
, sizeof(sregs
->acrs
));
404 memcpy(&sregs
->crs
, &vcpu
->arch
.sie_block
->gcr
, sizeof(sregs
->crs
));
408 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
410 memcpy(&vcpu
->arch
.guest_fpregs
.fprs
, &fpu
->fprs
, sizeof(fpu
->fprs
));
411 vcpu
->arch
.guest_fpregs
.fpc
= fpu
->fpc
;
415 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
417 memcpy(&fpu
->fprs
, &vcpu
->arch
.guest_fpregs
.fprs
, sizeof(fpu
->fprs
));
418 fpu
->fpc
= vcpu
->arch
.guest_fpregs
.fpc
;
422 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu
*vcpu
, psw_t psw
)
426 if (atomic_read(&vcpu
->arch
.sie_block
->cpuflags
) & CPUSTAT_RUNNING
)
429 vcpu
->run
->psw_mask
= psw
.mask
;
430 vcpu
->run
->psw_addr
= psw
.addr
;
435 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
436 struct kvm_translation
*tr
)
438 return -EINVAL
; /* not implemented yet */
441 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
442 struct kvm_guest_debug
*dbg
)
444 return -EINVAL
; /* not implemented yet */
447 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
448 struct kvm_mp_state
*mp_state
)
450 return -EINVAL
; /* not implemented yet */
453 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
454 struct kvm_mp_state
*mp_state
)
456 return -EINVAL
; /* not implemented yet */
459 static void __vcpu_run(struct kvm_vcpu
*vcpu
)
461 memcpy(&vcpu
->arch
.sie_block
->gg14
, &vcpu
->arch
.guest_gprs
[14], 16);
466 if (test_thread_flag(TIF_MCCK_PENDING
))
469 kvm_s390_deliver_pending_interrupts(vcpu
);
471 vcpu
->arch
.sie_block
->icptcode
= 0;
475 VCPU_EVENT(vcpu
, 6, "entering sie flags %x",
476 atomic_read(&vcpu
->arch
.sie_block
->cpuflags
));
477 if (sie64a(vcpu
->arch
.sie_block
, vcpu
->arch
.guest_gprs
)) {
478 VCPU_EVENT(vcpu
, 3, "%s", "fault in sie instruction");
479 kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
481 VCPU_EVENT(vcpu
, 6, "exit sie icptcode %d",
482 vcpu
->arch
.sie_block
->icptcode
);
487 memcpy(&vcpu
->arch
.guest_gprs
[14], &vcpu
->arch
.sie_block
->gg14
, 16);
490 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
496 if (vcpu
->sigset_active
)
497 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
499 atomic_set_mask(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
501 BUG_ON(vcpu
->kvm
->arch
.float_int
.local_int
[vcpu
->vcpu_id
] == NULL
);
503 switch (kvm_run
->exit_reason
) {
504 case KVM_EXIT_S390_SIEIC
:
505 case KVM_EXIT_UNKNOWN
:
507 case KVM_EXIT_S390_RESET
:
513 vcpu
->arch
.sie_block
->gpsw
.mask
= kvm_run
->psw_mask
;
514 vcpu
->arch
.sie_block
->gpsw
.addr
= kvm_run
->psw_addr
;
520 rc
= kvm_handle_sie_intercept(vcpu
);
521 } while (!signal_pending(current
) && !rc
);
523 if (rc
== SIE_INTERCEPT_RERUNVCPU
)
526 if (signal_pending(current
) && !rc
) {
527 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
531 if (rc
== -EOPNOTSUPP
) {
532 /* intercept cannot be handled in-kernel, prepare kvm-run */
533 kvm_run
->exit_reason
= KVM_EXIT_S390_SIEIC
;
534 kvm_run
->s390_sieic
.icptcode
= vcpu
->arch
.sie_block
->icptcode
;
535 kvm_run
->s390_sieic
.ipa
= vcpu
->arch
.sie_block
->ipa
;
536 kvm_run
->s390_sieic
.ipb
= vcpu
->arch
.sie_block
->ipb
;
540 if (rc
== -EREMOTE
) {
541 /* intercept was handled, but userspace support is needed
542 * kvm_run has been prepared by the handler */
546 kvm_run
->psw_mask
= vcpu
->arch
.sie_block
->gpsw
.mask
;
547 kvm_run
->psw_addr
= vcpu
->arch
.sie_block
->gpsw
.addr
;
549 if (vcpu
->sigset_active
)
550 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
552 vcpu
->stat
.exit_userspace
++;
556 static int __guestcopy(struct kvm_vcpu
*vcpu
, u64 guestdest
, void *from
,
557 unsigned long n
, int prefix
)
560 return copy_to_guest(vcpu
, guestdest
, from
, n
);
562 return copy_to_guest_absolute(vcpu
, guestdest
, from
, n
);
566 * store status at address
567 * we use have two special cases:
568 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
569 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
571 int kvm_s390_vcpu_store_status(struct kvm_vcpu
*vcpu
, unsigned long addr
)
573 unsigned char archmode
= 1;
576 if (addr
== KVM_S390_STORE_STATUS_NOADDR
) {
577 if (copy_to_guest_absolute(vcpu
, 163ul, &archmode
, 1))
579 addr
= SAVE_AREA_BASE
;
581 } else if (addr
== KVM_S390_STORE_STATUS_PREFIXED
) {
582 if (copy_to_guest(vcpu
, 163ul, &archmode
, 1))
584 addr
= SAVE_AREA_BASE
;
589 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, fp_regs
),
590 vcpu
->arch
.guest_fpregs
.fprs
, 128, prefix
))
593 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, gp_regs
),
594 vcpu
->arch
.guest_gprs
, 128, prefix
))
597 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, psw
),
598 &vcpu
->arch
.sie_block
->gpsw
, 16, prefix
))
601 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, pref_reg
),
602 &vcpu
->arch
.sie_block
->prefix
, 4, prefix
))
605 if (__guestcopy(vcpu
,
606 addr
+ offsetof(struct save_area
, fp_ctrl_reg
),
607 &vcpu
->arch
.guest_fpregs
.fpc
, 4, prefix
))
610 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, tod_reg
),
611 &vcpu
->arch
.sie_block
->todpr
, 4, prefix
))
614 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, timer
),
615 &vcpu
->arch
.sie_block
->cputm
, 8, prefix
))
618 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, clk_cmp
),
619 &vcpu
->arch
.sie_block
->ckc
, 8, prefix
))
622 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, acc_regs
),
623 &vcpu
->arch
.guest_acrs
, 64, prefix
))
626 if (__guestcopy(vcpu
,
627 addr
+ offsetof(struct save_area
, ctrl_regs
),
628 &vcpu
->arch
.sie_block
->gcr
, 128, prefix
))
633 long kvm_arch_vcpu_ioctl(struct file
*filp
,
634 unsigned int ioctl
, unsigned long arg
)
636 struct kvm_vcpu
*vcpu
= filp
->private_data
;
637 void __user
*argp
= (void __user
*)arg
;
641 case KVM_S390_INTERRUPT
: {
642 struct kvm_s390_interrupt s390int
;
645 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
647 r
= kvm_s390_inject_vcpu(vcpu
, &s390int
);
650 case KVM_S390_STORE_STATUS
:
651 r
= kvm_s390_vcpu_store_status(vcpu
, arg
);
653 case KVM_S390_SET_INITIAL_PSW
: {
657 if (copy_from_user(&psw
, argp
, sizeof(psw
)))
659 r
= kvm_arch_vcpu_ioctl_set_initial_psw(vcpu
, psw
);
662 case KVM_S390_INITIAL_RESET
:
663 r
= kvm_arch_vcpu_ioctl_initial_reset(vcpu
);
671 /* Section: memory related */
672 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
673 struct kvm_memory_slot
*memslot
,
674 struct kvm_memory_slot old
,
675 struct kvm_userspace_memory_region
*mem
,
678 /* A few sanity checks. We can have exactly one memory slot which has
679 to start at guest virtual zero and which has to be located at a
680 page boundary in userland and which has to end at a page boundary.
681 The memory in userland is ok to be fragmented into various different
682 vmas. It is okay to mmap() and munmap() stuff in this slot after
683 doing this call at any time */
688 if (mem
->guest_phys_addr
)
691 if (mem
->userspace_addr
& 0xffffful
)
694 if (mem
->memory_size
& 0xffffful
)
703 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
704 struct kvm_userspace_memory_region
*mem
,
705 struct kvm_memory_slot old
,
711 rc
= gmap_map_segment(kvm
->arch
.gmap
, mem
->userspace_addr
,
712 mem
->guest_phys_addr
, mem
->memory_size
);
714 printk(KERN_WARNING
"kvm-s390: failed to commit memory region\n");
718 void kvm_arch_flush_shadow(struct kvm
*kvm
)
722 static int __init
kvm_s390_init(void)
725 ret
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
730 * guests can ask for up to 255+1 double words, we need a full page
731 * to hold the maximum amount of facilities. On the other hand, we
732 * only set facilities that are known to work in KVM.
734 facilities
= (unsigned long long *) get_zeroed_page(GFP_KERNEL
|GFP_DMA
);
739 memcpy(facilities
, S390_lowcore
.stfle_fac_list
, 16);
740 facilities
[0] &= 0xff00fff3f47c0000ULL
;
741 facilities
[1] &= 0x201c000000000000ULL
;
745 static void __exit
kvm_s390_exit(void)
747 free_page((unsigned long) facilities
);
751 module_init(kvm_s390_init
);
752 module_exit(kvm_s390_exit
);