2 * s390host.c -- hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008,2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
30 #include <asm/system.h>
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
36 struct kvm_stats_debugfs_item debugfs_entries
[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace
) },
38 { "exit_null", VCPU_STAT(exit_null
) },
39 { "exit_validity", VCPU_STAT(exit_validity
) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request
) },
41 { "exit_external_request", VCPU_STAT(exit_external_request
) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt
) },
43 { "exit_instruction", VCPU_STAT(exit_instruction
) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption
) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program
) },
46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg
) },
47 { "instruction_lctl", VCPU_STAT(instruction_lctl
) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal
) },
49 { "deliver_external_call", VCPU_STAT(deliver_external_call
) },
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal
) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt
) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal
) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal
) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal
) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int
) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state
) },
57 { "instruction_stidp", VCPU_STAT(instruction_stidp
) },
58 { "instruction_spx", VCPU_STAT(instruction_spx
) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx
) },
60 { "instruction_stap", VCPU_STAT(instruction_stap
) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key
) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch
) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc
) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi
) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl
) },
66 { "instruction_tprot", VCPU_STAT(instruction_tprot
) },
67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense
) },
68 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running
) },
69 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call
) },
70 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency
) },
71 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop
) },
72 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch
) },
73 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix
) },
74 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart
) },
75 { "diagnose_10", VCPU_STAT(diagnose_10
) },
76 { "diagnose_44", VCPU_STAT(diagnose_44
) },
80 static unsigned long long *facilities
;
82 /* Section: not file related */
83 int kvm_arch_hardware_enable(void *garbage
)
85 /* every s390 is virtualization enabled ;-) */
89 void kvm_arch_hardware_disable(void *garbage
)
93 int kvm_arch_hardware_setup(void)
98 void kvm_arch_hardware_unsetup(void)
102 void kvm_arch_check_processor_compat(void *rtn
)
106 int kvm_arch_init(void *opaque
)
111 void kvm_arch_exit(void)
115 /* Section: device related */
116 long kvm_arch_dev_ioctl(struct file
*filp
,
117 unsigned int ioctl
, unsigned long arg
)
119 if (ioctl
== KVM_S390_ENABLE_SIE
)
120 return s390_enable_sie();
124 int kvm_dev_ioctl_check_extension(long ext
)
129 case KVM_CAP_S390_PSW
:
130 case KVM_CAP_S390_GMAP
:
139 /* Section: vm related */
141 * Get (and clear) the dirty memory log for a memory slot.
143 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
144 struct kvm_dirty_log
*log
)
149 long kvm_arch_vm_ioctl(struct file
*filp
,
150 unsigned int ioctl
, unsigned long arg
)
152 struct kvm
*kvm
= filp
->private_data
;
153 void __user
*argp
= (void __user
*)arg
;
157 case KVM_S390_INTERRUPT
: {
158 struct kvm_s390_interrupt s390int
;
161 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
163 r
= kvm_s390_inject_vm(kvm
, &s390int
);
173 int kvm_arch_init_vm(struct kvm
*kvm
)
178 rc
= s390_enable_sie();
184 kvm
->arch
.sca
= (struct sca_block
*) get_zeroed_page(GFP_KERNEL
);
188 sprintf(debug_name
, "kvm-%u", current
->pid
);
190 kvm
->arch
.dbf
= debug_register(debug_name
, 8, 2, 8 * sizeof(long));
194 spin_lock_init(&kvm
->arch
.float_int
.lock
);
195 INIT_LIST_HEAD(&kvm
->arch
.float_int
.list
);
197 debug_register_view(kvm
->arch
.dbf
, &debug_sprintf_view
);
198 VM_EVENT(kvm
, 3, "%s", "vm created");
200 kvm
->arch
.gmap
= gmap_alloc(current
->mm
);
206 debug_unregister(kvm
->arch
.dbf
);
208 free_page((unsigned long)(kvm
->arch
.sca
));
213 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
215 VCPU_EVENT(vcpu
, 3, "%s", "free cpu");
216 clear_bit(63 - vcpu
->vcpu_id
, (unsigned long *) &vcpu
->kvm
->arch
.sca
->mcn
);
217 if (vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sda
==
218 (__u64
) vcpu
->arch
.sie_block
)
219 vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sda
= 0;
221 free_page((unsigned long)(vcpu
->arch
.sie_block
));
222 kvm_vcpu_uninit(vcpu
);
226 static void kvm_free_vcpus(struct kvm
*kvm
)
229 struct kvm_vcpu
*vcpu
;
231 kvm_for_each_vcpu(i
, vcpu
, kvm
)
232 kvm_arch_vcpu_destroy(vcpu
);
234 mutex_lock(&kvm
->lock
);
235 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
236 kvm
->vcpus
[i
] = NULL
;
238 atomic_set(&kvm
->online_vcpus
, 0);
239 mutex_unlock(&kvm
->lock
);
242 void kvm_arch_sync_events(struct kvm
*kvm
)
246 void kvm_arch_destroy_vm(struct kvm
*kvm
)
249 free_page((unsigned long)(kvm
->arch
.sca
));
250 debug_unregister(kvm
->arch
.dbf
);
251 gmap_free(kvm
->arch
.gmap
);
254 /* Section: vcpu related */
255 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
257 vcpu
->arch
.gmap
= vcpu
->kvm
->arch
.gmap
;
261 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
266 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
268 save_fp_regs(&vcpu
->arch
.host_fpregs
);
269 save_access_regs(vcpu
->arch
.host_acrs
);
270 vcpu
->arch
.guest_fpregs
.fpc
&= FPC_VALID_MASK
;
271 restore_fp_regs(&vcpu
->arch
.guest_fpregs
);
272 restore_access_regs(vcpu
->arch
.guest_acrs
);
273 gmap_enable(vcpu
->arch
.gmap
);
274 atomic_set_mask(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
277 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
279 atomic_clear_mask(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
280 gmap_disable(vcpu
->arch
.gmap
);
281 save_fp_regs(&vcpu
->arch
.guest_fpregs
);
282 save_access_regs(vcpu
->arch
.guest_acrs
);
283 restore_fp_regs(&vcpu
->arch
.host_fpregs
);
284 restore_access_regs(vcpu
->arch
.host_acrs
);
287 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu
*vcpu
)
289 /* this equals initial cpu reset in pop, but we don't switch to ESA */
290 vcpu
->arch
.sie_block
->gpsw
.mask
= 0UL;
291 vcpu
->arch
.sie_block
->gpsw
.addr
= 0UL;
292 vcpu
->arch
.sie_block
->prefix
= 0UL;
293 vcpu
->arch
.sie_block
->ihcpu
= 0xffff;
294 vcpu
->arch
.sie_block
->cputm
= 0UL;
295 vcpu
->arch
.sie_block
->ckc
= 0UL;
296 vcpu
->arch
.sie_block
->todpr
= 0;
297 memset(vcpu
->arch
.sie_block
->gcr
, 0, 16 * sizeof(__u64
));
298 vcpu
->arch
.sie_block
->gcr
[0] = 0xE0UL
;
299 vcpu
->arch
.sie_block
->gcr
[14] = 0xC2000000UL
;
300 vcpu
->arch
.guest_fpregs
.fpc
= 0;
301 asm volatile("lfpc %0" : : "Q" (vcpu
->arch
.guest_fpregs
.fpc
));
302 vcpu
->arch
.sie_block
->gbea
= 1;
305 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
307 atomic_set(&vcpu
->arch
.sie_block
->cpuflags
, CPUSTAT_ZARCH
|
310 vcpu
->arch
.sie_block
->ecb
= 6;
311 vcpu
->arch
.sie_block
->eca
= 0xC1002001U
;
312 vcpu
->arch
.sie_block
->fac
= (int) (long) facilities
;
313 hrtimer_init(&vcpu
->arch
.ckc_timer
, CLOCK_REALTIME
, HRTIMER_MODE_ABS
);
314 tasklet_init(&vcpu
->arch
.tasklet
, kvm_s390_tasklet
,
315 (unsigned long) vcpu
);
316 vcpu
->arch
.ckc_timer
.function
= kvm_s390_idle_wakeup
;
317 get_cpu_id(&vcpu
->arch
.cpu_id
);
318 vcpu
->arch
.cpu_id
.version
= 0xff;
322 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
,
325 struct kvm_vcpu
*vcpu
;
328 if (id
>= KVM_MAX_VCPUS
)
333 vcpu
= kzalloc(sizeof(struct kvm_vcpu
), GFP_KERNEL
);
337 vcpu
->arch
.sie_block
= (struct kvm_s390_sie_block
*)
338 get_zeroed_page(GFP_KERNEL
);
340 if (!vcpu
->arch
.sie_block
)
343 vcpu
->arch
.sie_block
->icpua
= id
;
344 BUG_ON(!kvm
->arch
.sca
);
345 if (!kvm
->arch
.sca
->cpu
[id
].sda
)
346 kvm
->arch
.sca
->cpu
[id
].sda
= (__u64
) vcpu
->arch
.sie_block
;
347 vcpu
->arch
.sie_block
->scaoh
= (__u32
)(((__u64
)kvm
->arch
.sca
) >> 32);
348 vcpu
->arch
.sie_block
->scaol
= (__u32
)(__u64
)kvm
->arch
.sca
;
349 set_bit(63 - id
, (unsigned long *) &kvm
->arch
.sca
->mcn
);
351 spin_lock_init(&vcpu
->arch
.local_int
.lock
);
352 INIT_LIST_HEAD(&vcpu
->arch
.local_int
.list
);
353 vcpu
->arch
.local_int
.float_int
= &kvm
->arch
.float_int
;
354 spin_lock(&kvm
->arch
.float_int
.lock
);
355 kvm
->arch
.float_int
.local_int
[id
] = &vcpu
->arch
.local_int
;
356 init_waitqueue_head(&vcpu
->arch
.local_int
.wq
);
357 vcpu
->arch
.local_int
.cpuflags
= &vcpu
->arch
.sie_block
->cpuflags
;
358 spin_unlock(&kvm
->arch
.float_int
.lock
);
360 rc
= kvm_vcpu_init(vcpu
, kvm
, id
);
362 goto out_free_sie_block
;
363 VM_EVENT(kvm
, 3, "create cpu %d at %p, sie block at %p", id
, vcpu
,
364 vcpu
->arch
.sie_block
);
368 free_page((unsigned long)(vcpu
->arch
.sie_block
));
375 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
377 /* kvm common code refers to this, but never calls it */
382 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu
*vcpu
)
384 kvm_s390_vcpu_initial_reset(vcpu
);
388 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
390 memcpy(&vcpu
->arch
.guest_gprs
, ®s
->gprs
, sizeof(regs
->gprs
));
394 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
396 memcpy(®s
->gprs
, &vcpu
->arch
.guest_gprs
, sizeof(regs
->gprs
));
400 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
401 struct kvm_sregs
*sregs
)
403 memcpy(&vcpu
->arch
.guest_acrs
, &sregs
->acrs
, sizeof(sregs
->acrs
));
404 memcpy(&vcpu
->arch
.sie_block
->gcr
, &sregs
->crs
, sizeof(sregs
->crs
));
405 restore_access_regs(vcpu
->arch
.guest_acrs
);
409 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
410 struct kvm_sregs
*sregs
)
412 memcpy(&sregs
->acrs
, &vcpu
->arch
.guest_acrs
, sizeof(sregs
->acrs
));
413 memcpy(&sregs
->crs
, &vcpu
->arch
.sie_block
->gcr
, sizeof(sregs
->crs
));
417 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
419 memcpy(&vcpu
->arch
.guest_fpregs
.fprs
, &fpu
->fprs
, sizeof(fpu
->fprs
));
420 vcpu
->arch
.guest_fpregs
.fpc
= fpu
->fpc
;
421 restore_fp_regs(&vcpu
->arch
.guest_fpregs
);
425 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
427 memcpy(&fpu
->fprs
, &vcpu
->arch
.guest_fpregs
.fprs
, sizeof(fpu
->fprs
));
428 fpu
->fpc
= vcpu
->arch
.guest_fpregs
.fpc
;
432 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu
*vcpu
, psw_t psw
)
436 if (!(atomic_read(&vcpu
->arch
.sie_block
->cpuflags
) & CPUSTAT_STOPPED
))
439 vcpu
->run
->psw_mask
= psw
.mask
;
440 vcpu
->run
->psw_addr
= psw
.addr
;
445 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
446 struct kvm_translation
*tr
)
448 return -EINVAL
; /* not implemented yet */
451 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
452 struct kvm_guest_debug
*dbg
)
454 return -EINVAL
; /* not implemented yet */
457 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
458 struct kvm_mp_state
*mp_state
)
460 return -EINVAL
; /* not implemented yet */
463 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
464 struct kvm_mp_state
*mp_state
)
466 return -EINVAL
; /* not implemented yet */
469 static void __vcpu_run(struct kvm_vcpu
*vcpu
)
471 memcpy(&vcpu
->arch
.sie_block
->gg14
, &vcpu
->arch
.guest_gprs
[14], 16);
476 if (test_thread_flag(TIF_MCCK_PENDING
))
479 kvm_s390_deliver_pending_interrupts(vcpu
);
481 vcpu
->arch
.sie_block
->icptcode
= 0;
485 VCPU_EVENT(vcpu
, 6, "entering sie flags %x",
486 atomic_read(&vcpu
->arch
.sie_block
->cpuflags
));
487 if (sie64a(vcpu
->arch
.sie_block
, vcpu
->arch
.guest_gprs
)) {
488 VCPU_EVENT(vcpu
, 3, "%s", "fault in sie instruction");
489 kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
491 VCPU_EVENT(vcpu
, 6, "exit sie icptcode %d",
492 vcpu
->arch
.sie_block
->icptcode
);
497 memcpy(&vcpu
->arch
.guest_gprs
[14], &vcpu
->arch
.sie_block
->gg14
, 16);
500 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
506 if (vcpu
->sigset_active
)
507 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
509 atomic_clear_mask(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
511 BUG_ON(vcpu
->kvm
->arch
.float_int
.local_int
[vcpu
->vcpu_id
] == NULL
);
513 switch (kvm_run
->exit_reason
) {
514 case KVM_EXIT_S390_SIEIC
:
515 case KVM_EXIT_UNKNOWN
:
517 case KVM_EXIT_S390_RESET
:
523 vcpu
->arch
.sie_block
->gpsw
.mask
= kvm_run
->psw_mask
;
524 vcpu
->arch
.sie_block
->gpsw
.addr
= kvm_run
->psw_addr
;
530 rc
= kvm_handle_sie_intercept(vcpu
);
531 } while (!signal_pending(current
) && !rc
);
533 if (rc
== SIE_INTERCEPT_RERUNVCPU
)
536 if (signal_pending(current
) && !rc
) {
537 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
541 if (rc
== -EOPNOTSUPP
) {
542 /* intercept cannot be handled in-kernel, prepare kvm-run */
543 kvm_run
->exit_reason
= KVM_EXIT_S390_SIEIC
;
544 kvm_run
->s390_sieic
.icptcode
= vcpu
->arch
.sie_block
->icptcode
;
545 kvm_run
->s390_sieic
.ipa
= vcpu
->arch
.sie_block
->ipa
;
546 kvm_run
->s390_sieic
.ipb
= vcpu
->arch
.sie_block
->ipb
;
550 if (rc
== -EREMOTE
) {
551 /* intercept was handled, but userspace support is needed
552 * kvm_run has been prepared by the handler */
556 kvm_run
->psw_mask
= vcpu
->arch
.sie_block
->gpsw
.mask
;
557 kvm_run
->psw_addr
= vcpu
->arch
.sie_block
->gpsw
.addr
;
559 if (vcpu
->sigset_active
)
560 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
562 vcpu
->stat
.exit_userspace
++;
566 static int __guestcopy(struct kvm_vcpu
*vcpu
, u64 guestdest
, void *from
,
567 unsigned long n
, int prefix
)
570 return copy_to_guest(vcpu
, guestdest
, from
, n
);
572 return copy_to_guest_absolute(vcpu
, guestdest
, from
, n
);
576 * store status at address
577 * we use have two special cases:
578 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
579 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
581 int kvm_s390_vcpu_store_status(struct kvm_vcpu
*vcpu
, unsigned long addr
)
583 unsigned char archmode
= 1;
586 if (addr
== KVM_S390_STORE_STATUS_NOADDR
) {
587 if (copy_to_guest_absolute(vcpu
, 163ul, &archmode
, 1))
589 addr
= SAVE_AREA_BASE
;
591 } else if (addr
== KVM_S390_STORE_STATUS_PREFIXED
) {
592 if (copy_to_guest(vcpu
, 163ul, &archmode
, 1))
594 addr
= SAVE_AREA_BASE
;
599 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, fp_regs
),
600 vcpu
->arch
.guest_fpregs
.fprs
, 128, prefix
))
603 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, gp_regs
),
604 vcpu
->arch
.guest_gprs
, 128, prefix
))
607 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, psw
),
608 &vcpu
->arch
.sie_block
->gpsw
, 16, prefix
))
611 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, pref_reg
),
612 &vcpu
->arch
.sie_block
->prefix
, 4, prefix
))
615 if (__guestcopy(vcpu
,
616 addr
+ offsetof(struct save_area
, fp_ctrl_reg
),
617 &vcpu
->arch
.guest_fpregs
.fpc
, 4, prefix
))
620 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, tod_reg
),
621 &vcpu
->arch
.sie_block
->todpr
, 4, prefix
))
624 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, timer
),
625 &vcpu
->arch
.sie_block
->cputm
, 8, prefix
))
628 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, clk_cmp
),
629 &vcpu
->arch
.sie_block
->ckc
, 8, prefix
))
632 if (__guestcopy(vcpu
, addr
+ offsetof(struct save_area
, acc_regs
),
633 &vcpu
->arch
.guest_acrs
, 64, prefix
))
636 if (__guestcopy(vcpu
,
637 addr
+ offsetof(struct save_area
, ctrl_regs
),
638 &vcpu
->arch
.sie_block
->gcr
, 128, prefix
))
643 long kvm_arch_vcpu_ioctl(struct file
*filp
,
644 unsigned int ioctl
, unsigned long arg
)
646 struct kvm_vcpu
*vcpu
= filp
->private_data
;
647 void __user
*argp
= (void __user
*)arg
;
651 case KVM_S390_INTERRUPT
: {
652 struct kvm_s390_interrupt s390int
;
655 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
657 r
= kvm_s390_inject_vcpu(vcpu
, &s390int
);
660 case KVM_S390_STORE_STATUS
:
661 r
= kvm_s390_vcpu_store_status(vcpu
, arg
);
663 case KVM_S390_SET_INITIAL_PSW
: {
667 if (copy_from_user(&psw
, argp
, sizeof(psw
)))
669 r
= kvm_arch_vcpu_ioctl_set_initial_psw(vcpu
, psw
);
672 case KVM_S390_INITIAL_RESET
:
673 r
= kvm_arch_vcpu_ioctl_initial_reset(vcpu
);
681 /* Section: memory related */
682 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
683 struct kvm_memory_slot
*memslot
,
684 struct kvm_memory_slot old
,
685 struct kvm_userspace_memory_region
*mem
,
688 /* A few sanity checks. We can have exactly one memory slot which has
689 to start at guest virtual zero and which has to be located at a
690 page boundary in userland and which has to end at a page boundary.
691 The memory in userland is ok to be fragmented into various different
692 vmas. It is okay to mmap() and munmap() stuff in this slot after
693 doing this call at any time */
698 if (mem
->guest_phys_addr
)
701 if (mem
->userspace_addr
& 0xffffful
)
704 if (mem
->memory_size
& 0xffffful
)
713 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
714 struct kvm_userspace_memory_region
*mem
,
715 struct kvm_memory_slot old
,
721 rc
= gmap_map_segment(kvm
->arch
.gmap
, mem
->userspace_addr
,
722 mem
->guest_phys_addr
, mem
->memory_size
);
724 printk(KERN_WARNING
"kvm-s390: failed to commit memory region\n");
728 void kvm_arch_flush_shadow(struct kvm
*kvm
)
732 static int __init
kvm_s390_init(void)
735 ret
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
740 * guests can ask for up to 255+1 double words, we need a full page
741 * to hold the maximum amount of facilities. On the other hand, we
742 * only set facilities that are known to work in KVM.
744 facilities
= (unsigned long long *) get_zeroed_page(GFP_KERNEL
|GFP_DMA
);
749 memcpy(facilities
, S390_lowcore
.stfle_fac_list
, 16);
750 facilities
[0] &= 0xff00fff3f47c0000ULL
;
751 facilities
[1] &= 0x201c000000000000ULL
;
755 static void __exit
kvm_s390_exit(void)
757 free_page((unsigned long) facilities
);
761 module_init(kvm_s390_init
);
762 module_exit(kvm_s390_exit
);