2 * hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
14 * Jason J. Herne <jjherne@us.ibm.com>
17 #include <linux/compiler.h>
18 #include <linux/err.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/mman.h>
25 #include <linux/module.h>
26 #include <linux/random.h>
27 #include <linux/slab.h>
28 #include <linux/timer.h>
29 #include <linux/vmalloc.h>
30 #include <linux/bitmap.h>
31 #include <asm/asm-offsets.h>
32 #include <asm/lowcore.h>
34 #include <asm/pgtable.h>
37 #include <asm/switch_to.h>
40 #include <asm/cpacf.h>
41 #include <asm/timex.h>
45 #define KMSG_COMPONENT "kvm-s390"
47 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
49 #define CREATE_TRACE_POINTS
51 #include "trace-s390.h"
53 #define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
55 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
56 (KVM_MAX_VCPUS + LOCAL_IRQS))
58 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
60 struct kvm_stats_debugfs_item debugfs_entries
[] = {
61 { "userspace_handled", VCPU_STAT(exit_userspace
) },
62 { "exit_null", VCPU_STAT(exit_null
) },
63 { "exit_validity", VCPU_STAT(exit_validity
) },
64 { "exit_stop_request", VCPU_STAT(exit_stop_request
) },
65 { "exit_external_request", VCPU_STAT(exit_external_request
) },
66 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt
) },
67 { "exit_instruction", VCPU_STAT(exit_instruction
) },
68 { "exit_pei", VCPU_STAT(exit_pei
) },
69 { "exit_program_interruption", VCPU_STAT(exit_program_interruption
) },
70 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program
) },
71 { "exit_operation_exception", VCPU_STAT(exit_operation_exception
) },
72 { "halt_successful_poll", VCPU_STAT(halt_successful_poll
) },
73 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll
) },
74 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid
) },
75 { "halt_wakeup", VCPU_STAT(halt_wakeup
) },
76 { "instruction_lctlg", VCPU_STAT(instruction_lctlg
) },
77 { "instruction_lctl", VCPU_STAT(instruction_lctl
) },
78 { "instruction_stctl", VCPU_STAT(instruction_stctl
) },
79 { "instruction_stctg", VCPU_STAT(instruction_stctg
) },
80 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal
) },
81 { "deliver_external_call", VCPU_STAT(deliver_external_call
) },
82 { "deliver_service_signal", VCPU_STAT(deliver_service_signal
) },
83 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt
) },
84 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal
) },
85 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal
) },
86 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal
) },
87 { "deliver_program_interruption", VCPU_STAT(deliver_program_int
) },
88 { "exit_wait_state", VCPU_STAT(exit_wait_state
) },
89 { "instruction_pfmf", VCPU_STAT(instruction_pfmf
) },
90 { "instruction_stidp", VCPU_STAT(instruction_stidp
) },
91 { "instruction_spx", VCPU_STAT(instruction_spx
) },
92 { "instruction_stpx", VCPU_STAT(instruction_stpx
) },
93 { "instruction_stap", VCPU_STAT(instruction_stap
) },
94 { "instruction_storage_key", VCPU_STAT(instruction_storage_key
) },
95 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock
) },
96 { "instruction_stsch", VCPU_STAT(instruction_stsch
) },
97 { "instruction_chsc", VCPU_STAT(instruction_chsc
) },
98 { "instruction_essa", VCPU_STAT(instruction_essa
) },
99 { "instruction_stsi", VCPU_STAT(instruction_stsi
) },
100 { "instruction_stfl", VCPU_STAT(instruction_stfl
) },
101 { "instruction_tprot", VCPU_STAT(instruction_tprot
) },
102 { "instruction_sthyi", VCPU_STAT(instruction_sthyi
) },
103 { "instruction_sie", VCPU_STAT(instruction_sie
) },
104 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense
) },
105 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running
) },
106 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call
) },
107 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency
) },
108 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency
) },
109 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start
) },
110 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop
) },
111 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status
) },
112 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status
) },
113 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status
) },
114 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch
) },
115 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix
) },
116 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart
) },
117 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset
) },
118 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset
) },
119 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown
) },
120 { "diagnose_10", VCPU_STAT(diagnose_10
) },
121 { "diagnose_44", VCPU_STAT(diagnose_44
) },
122 { "diagnose_9c", VCPU_STAT(diagnose_9c
) },
123 { "diagnose_258", VCPU_STAT(diagnose_258
) },
124 { "diagnose_308", VCPU_STAT(diagnose_308
) },
125 { "diagnose_500", VCPU_STAT(diagnose_500
) },
129 /* allow nested virtualization in KVM (if enabled by user space) */
131 module_param(nested
, int, S_IRUGO
);
132 MODULE_PARM_DESC(nested
, "Nested virtualization support");
134 /* upper facilities limit for kvm */
135 unsigned long kvm_s390_fac_list_mask
[16] = { FACILITIES_KVM
};
137 unsigned long kvm_s390_fac_list_mask_size(void)
139 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask
) > S390_ARCH_FAC_MASK_SIZE_U64
);
140 return ARRAY_SIZE(kvm_s390_fac_list_mask
);
143 /* available cpu features supported by kvm */
144 static DECLARE_BITMAP(kvm_s390_available_cpu_feat
, KVM_S390_VM_CPU_FEAT_NR_BITS
);
145 /* available subfunctions indicated via query / "test bit" */
146 static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc
;
148 static struct gmap_notifier gmap_notifier
;
149 static struct gmap_notifier vsie_gmap_notifier
;
150 debug_info_t
*kvm_s390_dbf
;
152 /* Section: not file related */
153 int kvm_arch_hardware_enable(void)
155 /* every s390 is virtualization enabled ;-) */
159 static void kvm_gmap_notifier(struct gmap
*gmap
, unsigned long start
,
163 * This callback is executed during stop_machine(). All CPUs are therefore
164 * temporarily stopped. In order not to change guest behavior, we have to
165 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
166 * so a CPU won't be stopped while calculating with the epoch.
168 static int kvm_clock_sync(struct notifier_block
*notifier
, unsigned long val
,
172 struct kvm_vcpu
*vcpu
;
174 unsigned long long *delta
= v
;
176 list_for_each_entry(kvm
, &vm_list
, vm_list
) {
177 kvm
->arch
.epoch
-= *delta
;
178 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
179 vcpu
->arch
.sie_block
->epoch
-= *delta
;
180 if (vcpu
->arch
.cputm_enabled
)
181 vcpu
->arch
.cputm_start
+= *delta
;
182 if (vcpu
->arch
.vsie_block
)
183 vcpu
->arch
.vsie_block
->epoch
-= *delta
;
189 static struct notifier_block kvm_clock_notifier
= {
190 .notifier_call
= kvm_clock_sync
,
193 int kvm_arch_hardware_setup(void)
195 gmap_notifier
.notifier_call
= kvm_gmap_notifier
;
196 gmap_register_pte_notifier(&gmap_notifier
);
197 vsie_gmap_notifier
.notifier_call
= kvm_s390_vsie_gmap_notifier
;
198 gmap_register_pte_notifier(&vsie_gmap_notifier
);
199 atomic_notifier_chain_register(&s390_epoch_delta_notifier
,
200 &kvm_clock_notifier
);
204 void kvm_arch_hardware_unsetup(void)
206 gmap_unregister_pte_notifier(&gmap_notifier
);
207 gmap_unregister_pte_notifier(&vsie_gmap_notifier
);
208 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier
,
209 &kvm_clock_notifier
);
212 static void allow_cpu_feat(unsigned long nr
)
214 set_bit_inv(nr
, kvm_s390_available_cpu_feat
);
217 static inline int plo_test_bit(unsigned char nr
)
219 register unsigned long r0
asm("0") = (unsigned long) nr
| 0x100;
220 int cc
= 3; /* subfunction not available */
223 /* Parameter registers are ignored for "test bit" */
233 static void kvm_s390_cpu_feat_init(void)
237 for (i
= 0; i
< 256; ++i
) {
239 kvm_s390_available_subfunc
.plo
[i
>> 3] |= 0x80 >> (i
& 7);
242 if (test_facility(28)) /* TOD-clock steering */
243 ptff(kvm_s390_available_subfunc
.ptff
,
244 sizeof(kvm_s390_available_subfunc
.ptff
),
247 if (test_facility(17)) { /* MSA */
248 __cpacf_query(CPACF_KMAC
, (cpacf_mask_t
*)
249 kvm_s390_available_subfunc
.kmac
);
250 __cpacf_query(CPACF_KMC
, (cpacf_mask_t
*)
251 kvm_s390_available_subfunc
.kmc
);
252 __cpacf_query(CPACF_KM
, (cpacf_mask_t
*)
253 kvm_s390_available_subfunc
.km
);
254 __cpacf_query(CPACF_KIMD
, (cpacf_mask_t
*)
255 kvm_s390_available_subfunc
.kimd
);
256 __cpacf_query(CPACF_KLMD
, (cpacf_mask_t
*)
257 kvm_s390_available_subfunc
.klmd
);
259 if (test_facility(76)) /* MSA3 */
260 __cpacf_query(CPACF_PCKMO
, (cpacf_mask_t
*)
261 kvm_s390_available_subfunc
.pckmo
);
262 if (test_facility(77)) { /* MSA4 */
263 __cpacf_query(CPACF_KMCTR
, (cpacf_mask_t
*)
264 kvm_s390_available_subfunc
.kmctr
);
265 __cpacf_query(CPACF_KMF
, (cpacf_mask_t
*)
266 kvm_s390_available_subfunc
.kmf
);
267 __cpacf_query(CPACF_KMO
, (cpacf_mask_t
*)
268 kvm_s390_available_subfunc
.kmo
);
269 __cpacf_query(CPACF_PCC
, (cpacf_mask_t
*)
270 kvm_s390_available_subfunc
.pcc
);
272 if (test_facility(57)) /* MSA5 */
273 __cpacf_query(CPACF_PPNO
, (cpacf_mask_t
*)
274 kvm_s390_available_subfunc
.ppno
);
276 if (MACHINE_HAS_ESOP
)
277 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP
);
279 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
280 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
282 if (!sclp
.has_sief2
|| !MACHINE_HAS_ESOP
|| !sclp
.has_64bscao
||
283 !test_facility(3) || !nested
)
285 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2
);
286 if (sclp
.has_64bscao
)
287 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO
);
289 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF
);
291 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE
);
293 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS
);
295 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB
);
297 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI
);
299 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS
);
301 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
302 * all skey handling functions read/set the skey from the PGSTE
303 * instead of the real storage key.
305 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
306 * pages being detected as preserved although they are resident.
308 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
309 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
311 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
312 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
313 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
315 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
316 * cannot easily shadow the SCA because of the ipte lock.
320 int kvm_arch_init(void *opaque
)
322 kvm_s390_dbf
= debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
326 if (debug_register_view(kvm_s390_dbf
, &debug_sprintf_view
)) {
327 debug_unregister(kvm_s390_dbf
);
331 kvm_s390_cpu_feat_init();
333 /* Register floating interrupt controller interface. */
334 return kvm_register_device_ops(&kvm_flic_ops
, KVM_DEV_TYPE_FLIC
);
337 void kvm_arch_exit(void)
339 debug_unregister(kvm_s390_dbf
);
342 /* Section: device related */
343 long kvm_arch_dev_ioctl(struct file
*filp
,
344 unsigned int ioctl
, unsigned long arg
)
346 if (ioctl
== KVM_S390_ENABLE_SIE
)
347 return s390_enable_sie();
351 int kvm_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
356 case KVM_CAP_S390_PSW
:
357 case KVM_CAP_S390_GMAP
:
358 case KVM_CAP_SYNC_MMU
:
359 #ifdef CONFIG_KVM_S390_UCONTROL
360 case KVM_CAP_S390_UCONTROL
:
362 case KVM_CAP_ASYNC_PF
:
363 case KVM_CAP_SYNC_REGS
:
364 case KVM_CAP_ONE_REG
:
365 case KVM_CAP_ENABLE_CAP
:
366 case KVM_CAP_S390_CSS_SUPPORT
:
367 case KVM_CAP_IOEVENTFD
:
368 case KVM_CAP_DEVICE_CTRL
:
369 case KVM_CAP_ENABLE_CAP_VM
:
370 case KVM_CAP_S390_IRQCHIP
:
371 case KVM_CAP_VM_ATTRIBUTES
:
372 case KVM_CAP_MP_STATE
:
373 case KVM_CAP_S390_INJECT_IRQ
:
374 case KVM_CAP_S390_USER_SIGP
:
375 case KVM_CAP_S390_USER_STSI
:
376 case KVM_CAP_S390_SKEYS
:
377 case KVM_CAP_S390_IRQ_STATE
:
378 case KVM_CAP_S390_USER_INSTR0
:
381 case KVM_CAP_S390_MEM_OP
:
384 case KVM_CAP_NR_VCPUS
:
385 case KVM_CAP_MAX_VCPUS
:
386 r
= KVM_S390_BSCA_CPU_SLOTS
;
387 if (sclp
.has_esca
&& sclp
.has_64bscao
)
388 r
= KVM_S390_ESCA_CPU_SLOTS
;
390 case KVM_CAP_NR_MEMSLOTS
:
391 r
= KVM_USER_MEM_SLOTS
;
393 case KVM_CAP_S390_COW
:
394 r
= MACHINE_HAS_ESOP
;
396 case KVM_CAP_S390_VECTOR_REGISTERS
:
399 case KVM_CAP_S390_RI
:
400 r
= test_facility(64);
408 static void kvm_s390_sync_dirty_log(struct kvm
*kvm
,
409 struct kvm_memory_slot
*memslot
)
411 gfn_t cur_gfn
, last_gfn
;
412 unsigned long address
;
413 struct gmap
*gmap
= kvm
->arch
.gmap
;
415 /* Loop over all guest pages */
416 last_gfn
= memslot
->base_gfn
+ memslot
->npages
;
417 for (cur_gfn
= memslot
->base_gfn
; cur_gfn
<= last_gfn
; cur_gfn
++) {
418 address
= gfn_to_hva_memslot(memslot
, cur_gfn
);
420 if (test_and_clear_guest_dirty(gmap
->mm
, address
))
421 mark_page_dirty(kvm
, cur_gfn
);
422 if (fatal_signal_pending(current
))
428 /* Section: vm related */
429 static void sca_del_vcpu(struct kvm_vcpu
*vcpu
);
432 * Get (and clear) the dirty memory log for a memory slot.
434 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
435 struct kvm_dirty_log
*log
)
439 struct kvm_memslots
*slots
;
440 struct kvm_memory_slot
*memslot
;
443 mutex_lock(&kvm
->slots_lock
);
446 if (log
->slot
>= KVM_USER_MEM_SLOTS
)
449 slots
= kvm_memslots(kvm
);
450 memslot
= id_to_memslot(slots
, log
->slot
);
452 if (!memslot
->dirty_bitmap
)
455 kvm_s390_sync_dirty_log(kvm
, memslot
);
456 r
= kvm_get_dirty_log(kvm
, log
, &is_dirty
);
460 /* Clear the dirty log */
462 n
= kvm_dirty_bitmap_bytes(memslot
);
463 memset(memslot
->dirty_bitmap
, 0, n
);
467 mutex_unlock(&kvm
->slots_lock
);
471 static void icpt_operexc_on_all_vcpus(struct kvm
*kvm
)
474 struct kvm_vcpu
*vcpu
;
476 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
477 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC
, vcpu
);
481 static int kvm_vm_ioctl_enable_cap(struct kvm
*kvm
, struct kvm_enable_cap
*cap
)
489 case KVM_CAP_S390_IRQCHIP
:
490 VM_EVENT(kvm
, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
491 kvm
->arch
.use_irqchip
= 1;
494 case KVM_CAP_S390_USER_SIGP
:
495 VM_EVENT(kvm
, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
496 kvm
->arch
.user_sigp
= 1;
499 case KVM_CAP_S390_VECTOR_REGISTERS
:
500 mutex_lock(&kvm
->lock
);
501 if (kvm
->created_vcpus
) {
503 } else if (MACHINE_HAS_VX
) {
504 set_kvm_facility(kvm
->arch
.model
.fac_mask
, 129);
505 set_kvm_facility(kvm
->arch
.model
.fac_list
, 129);
509 mutex_unlock(&kvm
->lock
);
510 VM_EVENT(kvm
, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
511 r
? "(not available)" : "(success)");
513 case KVM_CAP_S390_RI
:
515 mutex_lock(&kvm
->lock
);
516 if (kvm
->created_vcpus
) {
518 } else if (test_facility(64)) {
519 set_kvm_facility(kvm
->arch
.model
.fac_mask
, 64);
520 set_kvm_facility(kvm
->arch
.model
.fac_list
, 64);
523 mutex_unlock(&kvm
->lock
);
524 VM_EVENT(kvm
, 3, "ENABLE: CAP_S390_RI %s",
525 r
? "(not available)" : "(success)");
527 case KVM_CAP_S390_USER_STSI
:
528 VM_EVENT(kvm
, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
529 kvm
->arch
.user_stsi
= 1;
532 case KVM_CAP_S390_USER_INSTR0
:
533 VM_EVENT(kvm
, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
534 kvm
->arch
.user_instr0
= 1;
535 icpt_operexc_on_all_vcpus(kvm
);
545 static int kvm_s390_get_mem_control(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
549 switch (attr
->attr
) {
550 case KVM_S390_VM_MEM_LIMIT_SIZE
:
552 VM_EVENT(kvm
, 3, "QUERY: max guest memory: %lu bytes",
553 kvm
->arch
.mem_limit
);
554 if (put_user(kvm
->arch
.mem_limit
, (u64 __user
*)attr
->addr
))
564 static int kvm_s390_set_mem_control(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
568 switch (attr
->attr
) {
569 case KVM_S390_VM_MEM_ENABLE_CMMA
:
575 VM_EVENT(kvm
, 3, "%s", "ENABLE: CMMA support");
576 mutex_lock(&kvm
->lock
);
577 if (!kvm
->created_vcpus
) {
578 kvm
->arch
.use_cmma
= 1;
581 mutex_unlock(&kvm
->lock
);
583 case KVM_S390_VM_MEM_CLR_CMMA
:
588 if (!kvm
->arch
.use_cmma
)
591 VM_EVENT(kvm
, 3, "%s", "RESET: CMMA states");
592 mutex_lock(&kvm
->lock
);
593 idx
= srcu_read_lock(&kvm
->srcu
);
594 s390_reset_cmma(kvm
->arch
.gmap
->mm
);
595 srcu_read_unlock(&kvm
->srcu
, idx
);
596 mutex_unlock(&kvm
->lock
);
599 case KVM_S390_VM_MEM_LIMIT_SIZE
: {
600 unsigned long new_limit
;
602 if (kvm_is_ucontrol(kvm
))
605 if (get_user(new_limit
, (u64 __user
*)attr
->addr
))
608 if (kvm
->arch
.mem_limit
!= KVM_S390_NO_MEM_LIMIT
&&
609 new_limit
> kvm
->arch
.mem_limit
)
615 /* gmap_create takes last usable address */
616 if (new_limit
!= KVM_S390_NO_MEM_LIMIT
)
620 mutex_lock(&kvm
->lock
);
621 if (!kvm
->created_vcpus
) {
622 /* gmap_create will round the limit up */
623 struct gmap
*new = gmap_create(current
->mm
, new_limit
);
628 gmap_remove(kvm
->arch
.gmap
);
630 kvm
->arch
.gmap
= new;
634 mutex_unlock(&kvm
->lock
);
635 VM_EVENT(kvm
, 3, "SET: max guest address: %lu", new_limit
);
636 VM_EVENT(kvm
, 3, "New guest asce: 0x%pK",
637 (void *) kvm
->arch
.gmap
->asce
);
647 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu
*vcpu
);
649 static int kvm_s390_vm_set_crypto(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
651 struct kvm_vcpu
*vcpu
;
654 if (!test_kvm_facility(kvm
, 76))
657 mutex_lock(&kvm
->lock
);
658 switch (attr
->attr
) {
659 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW
:
661 kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
,
662 sizeof(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
));
663 kvm
->arch
.crypto
.aes_kw
= 1;
664 VM_EVENT(kvm
, 3, "%s", "ENABLE: AES keywrapping support");
666 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW
:
668 kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
,
669 sizeof(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
));
670 kvm
->arch
.crypto
.dea_kw
= 1;
671 VM_EVENT(kvm
, 3, "%s", "ENABLE: DEA keywrapping support");
673 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW
:
674 kvm
->arch
.crypto
.aes_kw
= 0;
675 memset(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
, 0,
676 sizeof(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
));
677 VM_EVENT(kvm
, 3, "%s", "DISABLE: AES keywrapping support");
679 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW
:
680 kvm
->arch
.crypto
.dea_kw
= 0;
681 memset(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
, 0,
682 sizeof(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
));
683 VM_EVENT(kvm
, 3, "%s", "DISABLE: DEA keywrapping support");
686 mutex_unlock(&kvm
->lock
);
690 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
691 kvm_s390_vcpu_crypto_setup(vcpu
);
694 mutex_unlock(&kvm
->lock
);
698 static int kvm_s390_set_tod_high(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
702 if (copy_from_user(>od_high
, (void __user
*)attr
->addr
,
708 VM_EVENT(kvm
, 3, "SET: TOD extension: 0x%x", gtod_high
);
713 static int kvm_s390_set_tod_low(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
717 if (copy_from_user(>od
, (void __user
*)attr
->addr
, sizeof(gtod
)))
720 kvm_s390_set_tod_clock(kvm
, gtod
);
721 VM_EVENT(kvm
, 3, "SET: TOD base: 0x%llx", gtod
);
725 static int kvm_s390_set_tod(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
732 switch (attr
->attr
) {
733 case KVM_S390_VM_TOD_HIGH
:
734 ret
= kvm_s390_set_tod_high(kvm
, attr
);
736 case KVM_S390_VM_TOD_LOW
:
737 ret
= kvm_s390_set_tod_low(kvm
, attr
);
746 static int kvm_s390_get_tod_high(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
750 if (copy_to_user((void __user
*)attr
->addr
, >od_high
,
753 VM_EVENT(kvm
, 3, "QUERY: TOD extension: 0x%x", gtod_high
);
758 static int kvm_s390_get_tod_low(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
762 gtod
= kvm_s390_get_tod_clock_fast(kvm
);
763 if (copy_to_user((void __user
*)attr
->addr
, >od
, sizeof(gtod
)))
765 VM_EVENT(kvm
, 3, "QUERY: TOD base: 0x%llx", gtod
);
770 static int kvm_s390_get_tod(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
777 switch (attr
->attr
) {
778 case KVM_S390_VM_TOD_HIGH
:
779 ret
= kvm_s390_get_tod_high(kvm
, attr
);
781 case KVM_S390_VM_TOD_LOW
:
782 ret
= kvm_s390_get_tod_low(kvm
, attr
);
791 static int kvm_s390_set_processor(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
793 struct kvm_s390_vm_cpu_processor
*proc
;
794 u16 lowest_ibc
, unblocked_ibc
;
797 mutex_lock(&kvm
->lock
);
798 if (kvm
->created_vcpus
) {
802 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
807 if (!copy_from_user(proc
, (void __user
*)attr
->addr
,
809 kvm
->arch
.model
.cpuid
= proc
->cpuid
;
810 lowest_ibc
= sclp
.ibc
>> 16 & 0xfff;
811 unblocked_ibc
= sclp
.ibc
& 0xfff;
812 if (lowest_ibc
&& proc
->ibc
) {
813 if (proc
->ibc
> unblocked_ibc
)
814 kvm
->arch
.model
.ibc
= unblocked_ibc
;
815 else if (proc
->ibc
< lowest_ibc
)
816 kvm
->arch
.model
.ibc
= lowest_ibc
;
818 kvm
->arch
.model
.ibc
= proc
->ibc
;
820 memcpy(kvm
->arch
.model
.fac_list
, proc
->fac_list
,
821 S390_ARCH_FAC_LIST_SIZE_BYTE
);
826 mutex_unlock(&kvm
->lock
);
830 static int kvm_s390_set_processor_feat(struct kvm
*kvm
,
831 struct kvm_device_attr
*attr
)
833 struct kvm_s390_vm_cpu_feat data
;
836 if (copy_from_user(&data
, (void __user
*)attr
->addr
, sizeof(data
)))
838 if (!bitmap_subset((unsigned long *) data
.feat
,
839 kvm_s390_available_cpu_feat
,
840 KVM_S390_VM_CPU_FEAT_NR_BITS
))
843 mutex_lock(&kvm
->lock
);
844 if (!atomic_read(&kvm
->online_vcpus
)) {
845 bitmap_copy(kvm
->arch
.cpu_feat
, (unsigned long *) data
.feat
,
846 KVM_S390_VM_CPU_FEAT_NR_BITS
);
849 mutex_unlock(&kvm
->lock
);
853 static int kvm_s390_set_processor_subfunc(struct kvm
*kvm
,
854 struct kvm_device_attr
*attr
)
857 * Once supported by kernel + hw, we have to store the subfunctions
858 * in kvm->arch and remember that user space configured them.
863 static int kvm_s390_set_cpu_model(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
867 switch (attr
->attr
) {
868 case KVM_S390_VM_CPU_PROCESSOR
:
869 ret
= kvm_s390_set_processor(kvm
, attr
);
871 case KVM_S390_VM_CPU_PROCESSOR_FEAT
:
872 ret
= kvm_s390_set_processor_feat(kvm
, attr
);
874 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC
:
875 ret
= kvm_s390_set_processor_subfunc(kvm
, attr
);
881 static int kvm_s390_get_processor(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
883 struct kvm_s390_vm_cpu_processor
*proc
;
886 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
891 proc
->cpuid
= kvm
->arch
.model
.cpuid
;
892 proc
->ibc
= kvm
->arch
.model
.ibc
;
893 memcpy(&proc
->fac_list
, kvm
->arch
.model
.fac_list
,
894 S390_ARCH_FAC_LIST_SIZE_BYTE
);
895 if (copy_to_user((void __user
*)attr
->addr
, proc
, sizeof(*proc
)))
902 static int kvm_s390_get_machine(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
904 struct kvm_s390_vm_cpu_machine
*mach
;
907 mach
= kzalloc(sizeof(*mach
), GFP_KERNEL
);
912 get_cpu_id((struct cpuid
*) &mach
->cpuid
);
913 mach
->ibc
= sclp
.ibc
;
914 memcpy(&mach
->fac_mask
, kvm
->arch
.model
.fac_mask
,
915 S390_ARCH_FAC_LIST_SIZE_BYTE
);
916 memcpy((unsigned long *)&mach
->fac_list
, S390_lowcore
.stfle_fac_list
,
917 S390_ARCH_FAC_LIST_SIZE_BYTE
);
918 if (copy_to_user((void __user
*)attr
->addr
, mach
, sizeof(*mach
)))
925 static int kvm_s390_get_processor_feat(struct kvm
*kvm
,
926 struct kvm_device_attr
*attr
)
928 struct kvm_s390_vm_cpu_feat data
;
930 bitmap_copy((unsigned long *) data
.feat
, kvm
->arch
.cpu_feat
,
931 KVM_S390_VM_CPU_FEAT_NR_BITS
);
932 if (copy_to_user((void __user
*)attr
->addr
, &data
, sizeof(data
)))
937 static int kvm_s390_get_machine_feat(struct kvm
*kvm
,
938 struct kvm_device_attr
*attr
)
940 struct kvm_s390_vm_cpu_feat data
;
942 bitmap_copy((unsigned long *) data
.feat
,
943 kvm_s390_available_cpu_feat
,
944 KVM_S390_VM_CPU_FEAT_NR_BITS
);
945 if (copy_to_user((void __user
*)attr
->addr
, &data
, sizeof(data
)))
950 static int kvm_s390_get_processor_subfunc(struct kvm
*kvm
,
951 struct kvm_device_attr
*attr
)
954 * Once we can actually configure subfunctions (kernel + hw support),
955 * we have to check if they were already set by user space, if so copy
956 * them from kvm->arch.
961 static int kvm_s390_get_machine_subfunc(struct kvm
*kvm
,
962 struct kvm_device_attr
*attr
)
964 if (copy_to_user((void __user
*)attr
->addr
, &kvm_s390_available_subfunc
,
965 sizeof(struct kvm_s390_vm_cpu_subfunc
)))
969 static int kvm_s390_get_cpu_model(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
973 switch (attr
->attr
) {
974 case KVM_S390_VM_CPU_PROCESSOR
:
975 ret
= kvm_s390_get_processor(kvm
, attr
);
977 case KVM_S390_VM_CPU_MACHINE
:
978 ret
= kvm_s390_get_machine(kvm
, attr
);
980 case KVM_S390_VM_CPU_PROCESSOR_FEAT
:
981 ret
= kvm_s390_get_processor_feat(kvm
, attr
);
983 case KVM_S390_VM_CPU_MACHINE_FEAT
:
984 ret
= kvm_s390_get_machine_feat(kvm
, attr
);
986 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC
:
987 ret
= kvm_s390_get_processor_subfunc(kvm
, attr
);
989 case KVM_S390_VM_CPU_MACHINE_SUBFUNC
:
990 ret
= kvm_s390_get_machine_subfunc(kvm
, attr
);
996 static int kvm_s390_vm_set_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
1000 switch (attr
->group
) {
1001 case KVM_S390_VM_MEM_CTRL
:
1002 ret
= kvm_s390_set_mem_control(kvm
, attr
);
1004 case KVM_S390_VM_TOD
:
1005 ret
= kvm_s390_set_tod(kvm
, attr
);
1007 case KVM_S390_VM_CPU_MODEL
:
1008 ret
= kvm_s390_set_cpu_model(kvm
, attr
);
1010 case KVM_S390_VM_CRYPTO
:
1011 ret
= kvm_s390_vm_set_crypto(kvm
, attr
);
1021 static int kvm_s390_vm_get_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
1025 switch (attr
->group
) {
1026 case KVM_S390_VM_MEM_CTRL
:
1027 ret
= kvm_s390_get_mem_control(kvm
, attr
);
1029 case KVM_S390_VM_TOD
:
1030 ret
= kvm_s390_get_tod(kvm
, attr
);
1032 case KVM_S390_VM_CPU_MODEL
:
1033 ret
= kvm_s390_get_cpu_model(kvm
, attr
);
1043 static int kvm_s390_vm_has_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
1047 switch (attr
->group
) {
1048 case KVM_S390_VM_MEM_CTRL
:
1049 switch (attr
->attr
) {
1050 case KVM_S390_VM_MEM_ENABLE_CMMA
:
1051 case KVM_S390_VM_MEM_CLR_CMMA
:
1052 ret
= sclp
.has_cmma
? 0 : -ENXIO
;
1054 case KVM_S390_VM_MEM_LIMIT_SIZE
:
1062 case KVM_S390_VM_TOD
:
1063 switch (attr
->attr
) {
1064 case KVM_S390_VM_TOD_LOW
:
1065 case KVM_S390_VM_TOD_HIGH
:
1073 case KVM_S390_VM_CPU_MODEL
:
1074 switch (attr
->attr
) {
1075 case KVM_S390_VM_CPU_PROCESSOR
:
1076 case KVM_S390_VM_CPU_MACHINE
:
1077 case KVM_S390_VM_CPU_PROCESSOR_FEAT
:
1078 case KVM_S390_VM_CPU_MACHINE_FEAT
:
1079 case KVM_S390_VM_CPU_MACHINE_SUBFUNC
:
1082 /* configuring subfunctions is not supported yet */
1083 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC
:
1089 case KVM_S390_VM_CRYPTO
:
1090 switch (attr
->attr
) {
1091 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW
:
1092 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW
:
1093 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW
:
1094 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW
:
1110 static long kvm_s390_get_skeys(struct kvm
*kvm
, struct kvm_s390_skeys
*args
)
1116 if (args
->flags
!= 0)
1119 /* Is this guest using storage keys? */
1120 if (!mm_use_skey(current
->mm
))
1121 return KVM_S390_GET_SKEYS_NONE
;
1123 /* Enforce sane limit on memory allocation */
1124 if (args
->count
< 1 || args
->count
> KVM_S390_SKEYS_MAX
)
1127 keys
= kmalloc_array(args
->count
, sizeof(uint8_t),
1128 GFP_KERNEL
| __GFP_NOWARN
);
1130 keys
= vmalloc(sizeof(uint8_t) * args
->count
);
1134 down_read(¤t
->mm
->mmap_sem
);
1135 for (i
= 0; i
< args
->count
; i
++) {
1136 hva
= gfn_to_hva(kvm
, args
->start_gfn
+ i
);
1137 if (kvm_is_error_hva(hva
)) {
1142 r
= get_guest_storage_key(current
->mm
, hva
, &keys
[i
]);
1146 up_read(¤t
->mm
->mmap_sem
);
1149 r
= copy_to_user((uint8_t __user
*)args
->skeydata_addr
, keys
,
1150 sizeof(uint8_t) * args
->count
);
1159 static long kvm_s390_set_skeys(struct kvm
*kvm
, struct kvm_s390_skeys
*args
)
1165 if (args
->flags
!= 0)
1168 /* Enforce sane limit on memory allocation */
1169 if (args
->count
< 1 || args
->count
> KVM_S390_SKEYS_MAX
)
1172 keys
= kmalloc_array(args
->count
, sizeof(uint8_t),
1173 GFP_KERNEL
| __GFP_NOWARN
);
1175 keys
= vmalloc(sizeof(uint8_t) * args
->count
);
1179 r
= copy_from_user(keys
, (uint8_t __user
*)args
->skeydata_addr
,
1180 sizeof(uint8_t) * args
->count
);
1186 /* Enable storage key handling for the guest */
1187 r
= s390_enable_skey();
1191 down_read(¤t
->mm
->mmap_sem
);
1192 for (i
= 0; i
< args
->count
; i
++) {
1193 hva
= gfn_to_hva(kvm
, args
->start_gfn
+ i
);
1194 if (kvm_is_error_hva(hva
)) {
1199 /* Lowest order bit is reserved */
1200 if (keys
[i
] & 0x01) {
1205 r
= set_guest_storage_key(current
->mm
, hva
, keys
[i
], 0);
1209 up_read(¤t
->mm
->mmap_sem
);
1215 long kvm_arch_vm_ioctl(struct file
*filp
,
1216 unsigned int ioctl
, unsigned long arg
)
1218 struct kvm
*kvm
= filp
->private_data
;
1219 void __user
*argp
= (void __user
*)arg
;
1220 struct kvm_device_attr attr
;
1224 case KVM_S390_INTERRUPT
: {
1225 struct kvm_s390_interrupt s390int
;
1228 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
1230 r
= kvm_s390_inject_vm(kvm
, &s390int
);
1233 case KVM_ENABLE_CAP
: {
1234 struct kvm_enable_cap cap
;
1236 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
1238 r
= kvm_vm_ioctl_enable_cap(kvm
, &cap
);
1241 case KVM_CREATE_IRQCHIP
: {
1242 struct kvm_irq_routing_entry routing
;
1245 if (kvm
->arch
.use_irqchip
) {
1246 /* Set up dummy routing. */
1247 memset(&routing
, 0, sizeof(routing
));
1248 r
= kvm_set_irq_routing(kvm
, &routing
, 0, 0);
1252 case KVM_SET_DEVICE_ATTR
: {
1254 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
1256 r
= kvm_s390_vm_set_attr(kvm
, &attr
);
1259 case KVM_GET_DEVICE_ATTR
: {
1261 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
1263 r
= kvm_s390_vm_get_attr(kvm
, &attr
);
1266 case KVM_HAS_DEVICE_ATTR
: {
1268 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
1270 r
= kvm_s390_vm_has_attr(kvm
, &attr
);
1273 case KVM_S390_GET_SKEYS
: {
1274 struct kvm_s390_skeys args
;
1277 if (copy_from_user(&args
, argp
,
1278 sizeof(struct kvm_s390_skeys
)))
1280 r
= kvm_s390_get_skeys(kvm
, &args
);
1283 case KVM_S390_SET_SKEYS
: {
1284 struct kvm_s390_skeys args
;
1287 if (copy_from_user(&args
, argp
,
1288 sizeof(struct kvm_s390_skeys
)))
1290 r
= kvm_s390_set_skeys(kvm
, &args
);
1300 static int kvm_s390_query_ap_config(u8
*config
)
1302 u32 fcn_code
= 0x04000000UL
;
1305 memset(config
, 0, 128);
1309 ".long 0xb2af0000\n" /* PQAP(QCI) */
1315 : "r" (fcn_code
), "r" (config
)
1316 : "cc", "0", "2", "memory"
1322 static int kvm_s390_apxa_installed(void)
1327 if (test_facility(12)) {
1328 cc
= kvm_s390_query_ap_config(config
);
1331 pr_err("PQAP(QCI) failed with cc=%d", cc
);
1333 return config
[0] & 0x40;
1339 static void kvm_s390_set_crycb_format(struct kvm
*kvm
)
1341 kvm
->arch
.crypto
.crycbd
= (__u32
)(unsigned long) kvm
->arch
.crypto
.crycb
;
1343 if (kvm_s390_apxa_installed())
1344 kvm
->arch
.crypto
.crycbd
|= CRYCB_FORMAT2
;
1346 kvm
->arch
.crypto
.crycbd
|= CRYCB_FORMAT1
;
1349 static u64
kvm_s390_get_initial_cpuid(void)
1354 cpuid
.version
= 0xff;
1355 return *((u64
*) &cpuid
);
1358 static void kvm_s390_crypto_init(struct kvm
*kvm
)
1360 if (!test_kvm_facility(kvm
, 76))
1363 kvm
->arch
.crypto
.crycb
= &kvm
->arch
.sie_page2
->crycb
;
1364 kvm_s390_set_crycb_format(kvm
);
1366 /* Enable AES/DEA protected key functions by default */
1367 kvm
->arch
.crypto
.aes_kw
= 1;
1368 kvm
->arch
.crypto
.dea_kw
= 1;
1369 get_random_bytes(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
,
1370 sizeof(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
));
1371 get_random_bytes(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
,
1372 sizeof(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
));
1375 static void sca_dispose(struct kvm
*kvm
)
1377 if (kvm
->arch
.use_esca
)
1378 free_pages_exact(kvm
->arch
.sca
, sizeof(struct esca_block
));
1380 free_page((unsigned long)(kvm
->arch
.sca
));
1381 kvm
->arch
.sca
= NULL
;
1384 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
1386 gfp_t alloc_flags
= GFP_KERNEL
;
1388 char debug_name
[16];
1389 static unsigned long sca_offset
;
1392 #ifdef CONFIG_KVM_S390_UCONTROL
1393 if (type
& ~KVM_VM_S390_UCONTROL
)
1395 if ((type
& KVM_VM_S390_UCONTROL
) && (!capable(CAP_SYS_ADMIN
)))
1402 rc
= s390_enable_sie();
1408 ratelimit_state_init(&kvm
->arch
.sthyi_limit
, 5 * HZ
, 500);
1410 kvm
->arch
.use_esca
= 0; /* start with basic SCA */
1411 if (!sclp
.has_64bscao
)
1412 alloc_flags
|= GFP_DMA
;
1413 rwlock_init(&kvm
->arch
.sca_lock
);
1414 kvm
->arch
.sca
= (struct bsca_block
*) get_zeroed_page(alloc_flags
);
1417 spin_lock(&kvm_lock
);
1419 if (sca_offset
+ sizeof(struct bsca_block
) > PAGE_SIZE
)
1421 kvm
->arch
.sca
= (struct bsca_block
*)
1422 ((char *) kvm
->arch
.sca
+ sca_offset
);
1423 spin_unlock(&kvm_lock
);
1425 sprintf(debug_name
, "kvm-%u", current
->pid
);
1427 kvm
->arch
.dbf
= debug_register(debug_name
, 32, 1, 7 * sizeof(long));
1431 kvm
->arch
.sie_page2
=
1432 (struct sie_page2
*) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
1433 if (!kvm
->arch
.sie_page2
)
1436 /* Populate the facility mask initially. */
1437 memcpy(kvm
->arch
.model
.fac_mask
, S390_lowcore
.stfle_fac_list
,
1438 S390_ARCH_FAC_LIST_SIZE_BYTE
);
1439 for (i
= 0; i
< S390_ARCH_FAC_LIST_SIZE_U64
; i
++) {
1440 if (i
< kvm_s390_fac_list_mask_size())
1441 kvm
->arch
.model
.fac_mask
[i
] &= kvm_s390_fac_list_mask
[i
];
1443 kvm
->arch
.model
.fac_mask
[i
] = 0UL;
1446 /* Populate the facility list initially. */
1447 kvm
->arch
.model
.fac_list
= kvm
->arch
.sie_page2
->fac_list
;
1448 memcpy(kvm
->arch
.model
.fac_list
, kvm
->arch
.model
.fac_mask
,
1449 S390_ARCH_FAC_LIST_SIZE_BYTE
);
1451 set_kvm_facility(kvm
->arch
.model
.fac_mask
, 74);
1452 set_kvm_facility(kvm
->arch
.model
.fac_list
, 74);
1454 kvm
->arch
.model
.cpuid
= kvm_s390_get_initial_cpuid();
1455 kvm
->arch
.model
.ibc
= sclp
.ibc
& 0x0fff;
1457 kvm_s390_crypto_init(kvm
);
1459 spin_lock_init(&kvm
->arch
.float_int
.lock
);
1460 for (i
= 0; i
< FIRQ_LIST_COUNT
; i
++)
1461 INIT_LIST_HEAD(&kvm
->arch
.float_int
.lists
[i
]);
1462 init_waitqueue_head(&kvm
->arch
.ipte_wq
);
1463 mutex_init(&kvm
->arch
.ipte_mutex
);
1465 debug_register_view(kvm
->arch
.dbf
, &debug_sprintf_view
);
1466 VM_EVENT(kvm
, 3, "vm created with type %lu", type
);
1468 if (type
& KVM_VM_S390_UCONTROL
) {
1469 kvm
->arch
.gmap
= NULL
;
1470 kvm
->arch
.mem_limit
= KVM_S390_NO_MEM_LIMIT
;
1472 if (sclp
.hamax
== U64_MAX
)
1473 kvm
->arch
.mem_limit
= TASK_MAX_SIZE
;
1475 kvm
->arch
.mem_limit
= min_t(unsigned long, TASK_MAX_SIZE
,
1477 kvm
->arch
.gmap
= gmap_create(current
->mm
, kvm
->arch
.mem_limit
- 1);
1478 if (!kvm
->arch
.gmap
)
1480 kvm
->arch
.gmap
->private = kvm
;
1481 kvm
->arch
.gmap
->pfault_enabled
= 0;
1484 kvm
->arch
.css_support
= 0;
1485 kvm
->arch
.use_irqchip
= 0;
1486 kvm
->arch
.epoch
= 0;
1488 spin_lock_init(&kvm
->arch
.start_stop_lock
);
1489 kvm_s390_vsie_init(kvm
);
1490 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm
, current
->pid
);
1494 free_page((unsigned long)kvm
->arch
.sie_page2
);
1495 debug_unregister(kvm
->arch
.dbf
);
1497 KVM_EVENT(3, "creation of vm failed: %d", rc
);
1501 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
1503 VCPU_EVENT(vcpu
, 3, "%s", "free cpu");
1504 trace_kvm_s390_destroy_vcpu(vcpu
->vcpu_id
);
1505 kvm_s390_clear_local_irqs(vcpu
);
1506 kvm_clear_async_pf_completion_queue(vcpu
);
1507 if (!kvm_is_ucontrol(vcpu
->kvm
))
1510 if (kvm_is_ucontrol(vcpu
->kvm
))
1511 gmap_remove(vcpu
->arch
.gmap
);
1513 if (vcpu
->kvm
->arch
.use_cmma
)
1514 kvm_s390_vcpu_unsetup_cmma(vcpu
);
1515 free_page((unsigned long)(vcpu
->arch
.sie_block
));
1517 kvm_vcpu_uninit(vcpu
);
1518 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
1521 static void kvm_free_vcpus(struct kvm
*kvm
)
1524 struct kvm_vcpu
*vcpu
;
1526 kvm_for_each_vcpu(i
, vcpu
, kvm
)
1527 kvm_arch_vcpu_destroy(vcpu
);
1529 mutex_lock(&kvm
->lock
);
1530 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
1531 kvm
->vcpus
[i
] = NULL
;
1533 atomic_set(&kvm
->online_vcpus
, 0);
1534 mutex_unlock(&kvm
->lock
);
1537 void kvm_arch_destroy_vm(struct kvm
*kvm
)
1539 kvm_free_vcpus(kvm
);
1541 debug_unregister(kvm
->arch
.dbf
);
1542 free_page((unsigned long)kvm
->arch
.sie_page2
);
1543 if (!kvm_is_ucontrol(kvm
))
1544 gmap_remove(kvm
->arch
.gmap
);
1545 kvm_s390_destroy_adapters(kvm
);
1546 kvm_s390_clear_float_irqs(kvm
);
1547 kvm_s390_vsie_destroy(kvm
);
1548 KVM_EVENT(3, "vm 0x%pK destroyed", kvm
);
1551 /* Section: vcpu related */
1552 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu
*vcpu
)
1554 vcpu
->arch
.gmap
= gmap_create(current
->mm
, -1UL);
1555 if (!vcpu
->arch
.gmap
)
1557 vcpu
->arch
.gmap
->private = vcpu
->kvm
;
1562 static void sca_del_vcpu(struct kvm_vcpu
*vcpu
)
1564 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
1565 if (vcpu
->kvm
->arch
.use_esca
) {
1566 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
1568 clear_bit_inv(vcpu
->vcpu_id
, (unsigned long *) sca
->mcn
);
1569 sca
->cpu
[vcpu
->vcpu_id
].sda
= 0;
1571 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
1573 clear_bit_inv(vcpu
->vcpu_id
, (unsigned long *) &sca
->mcn
);
1574 sca
->cpu
[vcpu
->vcpu_id
].sda
= 0;
1576 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
1579 static void sca_add_vcpu(struct kvm_vcpu
*vcpu
)
1581 read_lock(&vcpu
->kvm
->arch
.sca_lock
);
1582 if (vcpu
->kvm
->arch
.use_esca
) {
1583 struct esca_block
*sca
= vcpu
->kvm
->arch
.sca
;
1585 sca
->cpu
[vcpu
->vcpu_id
].sda
= (__u64
) vcpu
->arch
.sie_block
;
1586 vcpu
->arch
.sie_block
->scaoh
= (__u32
)(((__u64
)sca
) >> 32);
1587 vcpu
->arch
.sie_block
->scaol
= (__u32
)(__u64
)sca
& ~0x3fU
;
1588 vcpu
->arch
.sie_block
->ecb2
|= 0x04U
;
1589 set_bit_inv(vcpu
->vcpu_id
, (unsigned long *) sca
->mcn
);
1591 struct bsca_block
*sca
= vcpu
->kvm
->arch
.sca
;
1593 sca
->cpu
[vcpu
->vcpu_id
].sda
= (__u64
) vcpu
->arch
.sie_block
;
1594 vcpu
->arch
.sie_block
->scaoh
= (__u32
)(((__u64
)sca
) >> 32);
1595 vcpu
->arch
.sie_block
->scaol
= (__u32
)(__u64
)sca
;
1596 set_bit_inv(vcpu
->vcpu_id
, (unsigned long *) &sca
->mcn
);
1598 read_unlock(&vcpu
->kvm
->arch
.sca_lock
);
1601 /* Basic SCA to Extended SCA data copy routines */
1602 static inline void sca_copy_entry(struct esca_entry
*d
, struct bsca_entry
*s
)
1605 d
->sigp_ctrl
.c
= s
->sigp_ctrl
.c
;
1606 d
->sigp_ctrl
.scn
= s
->sigp_ctrl
.scn
;
1609 static void sca_copy_b_to_e(struct esca_block
*d
, struct bsca_block
*s
)
1613 d
->ipte_control
= s
->ipte_control
;
1615 for (i
= 0; i
< KVM_S390_BSCA_CPU_SLOTS
; i
++)
1616 sca_copy_entry(&d
->cpu
[i
], &s
->cpu
[i
]);
1619 static int sca_switch_to_extended(struct kvm
*kvm
)
1621 struct bsca_block
*old_sca
= kvm
->arch
.sca
;
1622 struct esca_block
*new_sca
;
1623 struct kvm_vcpu
*vcpu
;
1624 unsigned int vcpu_idx
;
1627 new_sca
= alloc_pages_exact(sizeof(*new_sca
), GFP_KERNEL
|__GFP_ZERO
);
1631 scaoh
= (u32
)((u64
)(new_sca
) >> 32);
1632 scaol
= (u32
)(u64
)(new_sca
) & ~0x3fU
;
1634 kvm_s390_vcpu_block_all(kvm
);
1635 write_lock(&kvm
->arch
.sca_lock
);
1637 sca_copy_b_to_e(new_sca
, old_sca
);
1639 kvm_for_each_vcpu(vcpu_idx
, vcpu
, kvm
) {
1640 vcpu
->arch
.sie_block
->scaoh
= scaoh
;
1641 vcpu
->arch
.sie_block
->scaol
= scaol
;
1642 vcpu
->arch
.sie_block
->ecb2
|= 0x04U
;
1644 kvm
->arch
.sca
= new_sca
;
1645 kvm
->arch
.use_esca
= 1;
1647 write_unlock(&kvm
->arch
.sca_lock
);
1648 kvm_s390_vcpu_unblock_all(kvm
);
1650 free_page((unsigned long)old_sca
);
1652 VM_EVENT(kvm
, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
1653 old_sca
, kvm
->arch
.sca
);
1657 static int sca_can_add_vcpu(struct kvm
*kvm
, unsigned int id
)
1661 if (id
< KVM_S390_BSCA_CPU_SLOTS
)
1663 if (!sclp
.has_esca
|| !sclp
.has_64bscao
)
1666 mutex_lock(&kvm
->lock
);
1667 rc
= kvm
->arch
.use_esca
? 0 : sca_switch_to_extended(kvm
);
1668 mutex_unlock(&kvm
->lock
);
1670 return rc
== 0 && id
< KVM_S390_ESCA_CPU_SLOTS
;
1673 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
1675 vcpu
->arch
.pfault_token
= KVM_S390_PFAULT_TOKEN_INVALID
;
1676 kvm_clear_async_pf_completion_queue(vcpu
);
1677 vcpu
->run
->kvm_valid_regs
= KVM_SYNC_PREFIX
|
1683 kvm_s390_set_prefix(vcpu
, 0);
1684 if (test_kvm_facility(vcpu
->kvm
, 64))
1685 vcpu
->run
->kvm_valid_regs
|= KVM_SYNC_RICCB
;
1686 /* fprs can be synchronized via vrs, even if the guest has no vx. With
1687 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1690 vcpu
->run
->kvm_valid_regs
|= KVM_SYNC_VRS
;
1692 vcpu
->run
->kvm_valid_regs
|= KVM_SYNC_FPRS
;
1694 if (kvm_is_ucontrol(vcpu
->kvm
))
1695 return __kvm_ucontrol_vcpu_init(vcpu
);
1700 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1701 static void __start_cpu_timer_accounting(struct kvm_vcpu
*vcpu
)
1703 WARN_ON_ONCE(vcpu
->arch
.cputm_start
!= 0);
1704 raw_write_seqcount_begin(&vcpu
->arch
.cputm_seqcount
);
1705 vcpu
->arch
.cputm_start
= get_tod_clock_fast();
1706 raw_write_seqcount_end(&vcpu
->arch
.cputm_seqcount
);
1709 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1710 static void __stop_cpu_timer_accounting(struct kvm_vcpu
*vcpu
)
1712 WARN_ON_ONCE(vcpu
->arch
.cputm_start
== 0);
1713 raw_write_seqcount_begin(&vcpu
->arch
.cputm_seqcount
);
1714 vcpu
->arch
.sie_block
->cputm
-= get_tod_clock_fast() - vcpu
->arch
.cputm_start
;
1715 vcpu
->arch
.cputm_start
= 0;
1716 raw_write_seqcount_end(&vcpu
->arch
.cputm_seqcount
);
1719 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1720 static void __enable_cpu_timer_accounting(struct kvm_vcpu
*vcpu
)
1722 WARN_ON_ONCE(vcpu
->arch
.cputm_enabled
);
1723 vcpu
->arch
.cputm_enabled
= true;
1724 __start_cpu_timer_accounting(vcpu
);
1727 /* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1728 static void __disable_cpu_timer_accounting(struct kvm_vcpu
*vcpu
)
1730 WARN_ON_ONCE(!vcpu
->arch
.cputm_enabled
);
1731 __stop_cpu_timer_accounting(vcpu
);
1732 vcpu
->arch
.cputm_enabled
= false;
1735 static void enable_cpu_timer_accounting(struct kvm_vcpu
*vcpu
)
1737 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1738 __enable_cpu_timer_accounting(vcpu
);
1742 static void disable_cpu_timer_accounting(struct kvm_vcpu
*vcpu
)
1744 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1745 __disable_cpu_timer_accounting(vcpu
);
1749 /* set the cpu timer - may only be called from the VCPU thread itself */
1750 void kvm_s390_set_cpu_timer(struct kvm_vcpu
*vcpu
, __u64 cputm
)
1752 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1753 raw_write_seqcount_begin(&vcpu
->arch
.cputm_seqcount
);
1754 if (vcpu
->arch
.cputm_enabled
)
1755 vcpu
->arch
.cputm_start
= get_tod_clock_fast();
1756 vcpu
->arch
.sie_block
->cputm
= cputm
;
1757 raw_write_seqcount_end(&vcpu
->arch
.cputm_seqcount
);
1761 /* update and get the cpu timer - can also be called from other VCPU threads */
1762 __u64
kvm_s390_get_cpu_timer(struct kvm_vcpu
*vcpu
)
1767 if (unlikely(!vcpu
->arch
.cputm_enabled
))
1768 return vcpu
->arch
.sie_block
->cputm
;
1770 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1772 seq
= raw_read_seqcount(&vcpu
->arch
.cputm_seqcount
);
1774 * If the writer would ever execute a read in the critical
1775 * section, e.g. in irq context, we have a deadlock.
1777 WARN_ON_ONCE((seq
& 1) && smp_processor_id() == vcpu
->cpu
);
1778 value
= vcpu
->arch
.sie_block
->cputm
;
1779 /* if cputm_start is 0, accounting is being started/stopped */
1780 if (likely(vcpu
->arch
.cputm_start
))
1781 value
-= get_tod_clock_fast() - vcpu
->arch
.cputm_start
;
1782 } while (read_seqcount_retry(&vcpu
->arch
.cputm_seqcount
, seq
& ~1));
1787 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1789 /* Save host register state */
1791 vcpu
->arch
.host_fpregs
.fpc
= current
->thread
.fpu
.fpc
;
1792 vcpu
->arch
.host_fpregs
.regs
= current
->thread
.fpu
.regs
;
1795 current
->thread
.fpu
.regs
= vcpu
->run
->s
.regs
.vrs
;
1797 current
->thread
.fpu
.regs
= vcpu
->run
->s
.regs
.fprs
;
1798 current
->thread
.fpu
.fpc
= vcpu
->run
->s
.regs
.fpc
;
1799 if (test_fp_ctl(current
->thread
.fpu
.fpc
))
1800 /* User space provided an invalid FPC, let's clear it */
1801 current
->thread
.fpu
.fpc
= 0;
1803 save_access_regs(vcpu
->arch
.host_acrs
);
1804 restore_access_regs(vcpu
->run
->s
.regs
.acrs
);
1805 gmap_enable(vcpu
->arch
.enabled_gmap
);
1806 atomic_or(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
1807 if (vcpu
->arch
.cputm_enabled
&& !is_vcpu_idle(vcpu
))
1808 __start_cpu_timer_accounting(vcpu
);
1812 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
1815 if (vcpu
->arch
.cputm_enabled
&& !is_vcpu_idle(vcpu
))
1816 __stop_cpu_timer_accounting(vcpu
);
1817 atomic_andnot(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
1818 vcpu
->arch
.enabled_gmap
= gmap_get_enabled();
1819 gmap_disable(vcpu
->arch
.enabled_gmap
);
1821 /* Save guest register state */
1823 vcpu
->run
->s
.regs
.fpc
= current
->thread
.fpu
.fpc
;
1825 /* Restore host register state */
1826 current
->thread
.fpu
.fpc
= vcpu
->arch
.host_fpregs
.fpc
;
1827 current
->thread
.fpu
.regs
= vcpu
->arch
.host_fpregs
.regs
;
1829 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
1830 restore_access_regs(vcpu
->arch
.host_acrs
);
1833 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu
*vcpu
)
1835 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1836 vcpu
->arch
.sie_block
->gpsw
.mask
= 0UL;
1837 vcpu
->arch
.sie_block
->gpsw
.addr
= 0UL;
1838 kvm_s390_set_prefix(vcpu
, 0);
1839 kvm_s390_set_cpu_timer(vcpu
, 0);
1840 vcpu
->arch
.sie_block
->ckc
= 0UL;
1841 vcpu
->arch
.sie_block
->todpr
= 0;
1842 memset(vcpu
->arch
.sie_block
->gcr
, 0, 16 * sizeof(__u64
));
1843 vcpu
->arch
.sie_block
->gcr
[0] = 0xE0UL
;
1844 vcpu
->arch
.sie_block
->gcr
[14] = 0xC2000000UL
;
1845 /* make sure the new fpc will be lazily loaded */
1847 current
->thread
.fpu
.fpc
= 0;
1848 vcpu
->arch
.sie_block
->gbea
= 1;
1849 vcpu
->arch
.sie_block
->pp
= 0;
1850 vcpu
->arch
.pfault_token
= KVM_S390_PFAULT_TOKEN_INVALID
;
1851 kvm_clear_async_pf_completion_queue(vcpu
);
1852 if (!kvm_s390_user_cpu_state_ctrl(vcpu
->kvm
))
1853 kvm_s390_vcpu_stop(vcpu
);
1854 kvm_s390_clear_local_irqs(vcpu
);
1857 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
1859 mutex_lock(&vcpu
->kvm
->lock
);
1861 vcpu
->arch
.sie_block
->epoch
= vcpu
->kvm
->arch
.epoch
;
1863 mutex_unlock(&vcpu
->kvm
->lock
);
1864 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
1865 vcpu
->arch
.gmap
= vcpu
->kvm
->arch
.gmap
;
1868 if (test_kvm_facility(vcpu
->kvm
, 74) || vcpu
->kvm
->arch
.user_instr0
)
1869 vcpu
->arch
.sie_block
->ictl
|= ICTL_OPEREXC
;
1870 /* make vcpu_load load the right gmap on the first trigger */
1871 vcpu
->arch
.enabled_gmap
= vcpu
->arch
.gmap
;
1874 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu
*vcpu
)
1876 if (!test_kvm_facility(vcpu
->kvm
, 76))
1879 vcpu
->arch
.sie_block
->ecb3
&= ~(ECB3_AES
| ECB3_DEA
);
1881 if (vcpu
->kvm
->arch
.crypto
.aes_kw
)
1882 vcpu
->arch
.sie_block
->ecb3
|= ECB3_AES
;
1883 if (vcpu
->kvm
->arch
.crypto
.dea_kw
)
1884 vcpu
->arch
.sie_block
->ecb3
|= ECB3_DEA
;
1886 vcpu
->arch
.sie_block
->crycbd
= vcpu
->kvm
->arch
.crypto
.crycbd
;
1889 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu
*vcpu
)
1891 free_page(vcpu
->arch
.sie_block
->cbrlo
);
1892 vcpu
->arch
.sie_block
->cbrlo
= 0;
1895 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu
*vcpu
)
1897 vcpu
->arch
.sie_block
->cbrlo
= get_zeroed_page(GFP_KERNEL
);
1898 if (!vcpu
->arch
.sie_block
->cbrlo
)
1901 vcpu
->arch
.sie_block
->ecb2
|= 0x80;
1902 vcpu
->arch
.sie_block
->ecb2
&= ~0x08;
1906 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu
*vcpu
)
1908 struct kvm_s390_cpu_model
*model
= &vcpu
->kvm
->arch
.model
;
1910 vcpu
->arch
.sie_block
->ibc
= model
->ibc
;
1911 if (test_kvm_facility(vcpu
->kvm
, 7))
1912 vcpu
->arch
.sie_block
->fac
= (u32
)(u64
) model
->fac_list
;
1915 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
1919 atomic_set(&vcpu
->arch
.sie_block
->cpuflags
, CPUSTAT_ZARCH
|
1923 if (test_kvm_facility(vcpu
->kvm
, 78))
1924 atomic_or(CPUSTAT_GED2
, &vcpu
->arch
.sie_block
->cpuflags
);
1925 else if (test_kvm_facility(vcpu
->kvm
, 8))
1926 atomic_or(CPUSTAT_GED
, &vcpu
->arch
.sie_block
->cpuflags
);
1928 kvm_s390_vcpu_setup_model(vcpu
);
1930 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
1931 if (MACHINE_HAS_ESOP
)
1932 vcpu
->arch
.sie_block
->ecb
|= 0x02;
1933 if (test_kvm_facility(vcpu
->kvm
, 9))
1934 vcpu
->arch
.sie_block
->ecb
|= 0x04;
1935 if (test_kvm_facility(vcpu
->kvm
, 73))
1936 vcpu
->arch
.sie_block
->ecb
|= 0x10;
1938 if (test_kvm_facility(vcpu
->kvm
, 8) && sclp
.has_pfmfi
)
1939 vcpu
->arch
.sie_block
->ecb2
|= 0x08;
1940 vcpu
->arch
.sie_block
->eca
= 0x1002000U
;
1942 vcpu
->arch
.sie_block
->eca
|= 0x80000000U
;
1944 vcpu
->arch
.sie_block
->eca
|= 0x40000000U
;
1946 vcpu
->arch
.sie_block
->eca
|= 1;
1947 if (sclp
.has_sigpif
)
1948 vcpu
->arch
.sie_block
->eca
|= 0x10000000U
;
1949 if (test_kvm_facility(vcpu
->kvm
, 64))
1950 vcpu
->arch
.sie_block
->ecb3
|= 0x01;
1951 if (test_kvm_facility(vcpu
->kvm
, 129)) {
1952 vcpu
->arch
.sie_block
->eca
|= 0x00020000;
1953 vcpu
->arch
.sie_block
->ecd
|= 0x20000000;
1955 vcpu
->arch
.sie_block
->riccbd
= (unsigned long) &vcpu
->run
->s
.regs
.riccb
;
1956 vcpu
->arch
.sie_block
->ictl
|= ICTL_ISKE
| ICTL_SSKE
| ICTL_RRBE
;
1958 if (vcpu
->kvm
->arch
.use_cmma
) {
1959 rc
= kvm_s390_vcpu_setup_cmma(vcpu
);
1963 hrtimer_init(&vcpu
->arch
.ckc_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1964 vcpu
->arch
.ckc_timer
.function
= kvm_s390_idle_wakeup
;
1966 kvm_s390_vcpu_crypto_setup(vcpu
);
1971 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
,
1974 struct kvm_vcpu
*vcpu
;
1975 struct sie_page
*sie_page
;
1978 if (!kvm_is_ucontrol(kvm
) && !sca_can_add_vcpu(kvm
, id
))
1983 vcpu
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
1987 sie_page
= (struct sie_page
*) get_zeroed_page(GFP_KERNEL
);
1991 vcpu
->arch
.sie_block
= &sie_page
->sie_block
;
1992 vcpu
->arch
.sie_block
->itdba
= (unsigned long) &sie_page
->itdb
;
1994 /* the real guest size will always be smaller than msl */
1995 vcpu
->arch
.sie_block
->mso
= 0;
1996 vcpu
->arch
.sie_block
->msl
= sclp
.hamax
;
1998 vcpu
->arch
.sie_block
->icpua
= id
;
1999 spin_lock_init(&vcpu
->arch
.local_int
.lock
);
2000 vcpu
->arch
.local_int
.float_int
= &kvm
->arch
.float_int
;
2001 vcpu
->arch
.local_int
.wq
= &vcpu
->wq
;
2002 vcpu
->arch
.local_int
.cpuflags
= &vcpu
->arch
.sie_block
->cpuflags
;
2003 seqcount_init(&vcpu
->arch
.cputm_seqcount
);
2005 rc
= kvm_vcpu_init(vcpu
, kvm
, id
);
2007 goto out_free_sie_block
;
2008 VM_EVENT(kvm
, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id
, vcpu
,
2009 vcpu
->arch
.sie_block
);
2010 trace_kvm_s390_create_vcpu(id
, vcpu
, vcpu
->arch
.sie_block
);
2014 free_page((unsigned long)(vcpu
->arch
.sie_block
));
2016 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
2021 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
2023 return kvm_s390_vcpu_has_irq(vcpu
, 0);
2026 void kvm_s390_vcpu_block(struct kvm_vcpu
*vcpu
)
2028 atomic_or(PROG_BLOCK_SIE
, &vcpu
->arch
.sie_block
->prog20
);
2032 void kvm_s390_vcpu_unblock(struct kvm_vcpu
*vcpu
)
2034 atomic_andnot(PROG_BLOCK_SIE
, &vcpu
->arch
.sie_block
->prog20
);
2037 static void kvm_s390_vcpu_request(struct kvm_vcpu
*vcpu
)
2039 atomic_or(PROG_REQUEST
, &vcpu
->arch
.sie_block
->prog20
);
2043 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu
*vcpu
)
2045 atomic_andnot(PROG_REQUEST
, &vcpu
->arch
.sie_block
->prog20
);
2049 * Kick a guest cpu out of SIE and wait until SIE is not running.
2050 * If the CPU is not running (e.g. waiting as idle) the function will
2051 * return immediately. */
2052 void exit_sie(struct kvm_vcpu
*vcpu
)
2054 atomic_or(CPUSTAT_STOP_INT
, &vcpu
->arch
.sie_block
->cpuflags
);
2055 while (vcpu
->arch
.sie_block
->prog0c
& PROG_IN_SIE
)
2059 /* Kick a guest cpu out of SIE to process a request synchronously */
2060 void kvm_s390_sync_request(int req
, struct kvm_vcpu
*vcpu
)
2062 kvm_make_request(req
, vcpu
);
2063 kvm_s390_vcpu_request(vcpu
);
2066 static void kvm_gmap_notifier(struct gmap
*gmap
, unsigned long start
,
2069 struct kvm
*kvm
= gmap
->private;
2070 struct kvm_vcpu
*vcpu
;
2071 unsigned long prefix
;
2074 if (gmap_is_shadow(gmap
))
2076 if (start
>= 1UL << 31)
2077 /* We are only interested in prefix pages */
2079 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
2080 /* match against both prefix pages */
2081 prefix
= kvm_s390_get_prefix(vcpu
);
2082 if (prefix
<= end
&& start
<= prefix
+ 2*PAGE_SIZE
- 1) {
2083 VCPU_EVENT(vcpu
, 2, "gmap notifier for %lx-%lx",
2085 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD
, vcpu
);
2090 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
2092 /* kvm common code refers to this, but never calls it */
2097 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
,
2098 struct kvm_one_reg
*reg
)
2103 case KVM_REG_S390_TODPR
:
2104 r
= put_user(vcpu
->arch
.sie_block
->todpr
,
2105 (u32 __user
*)reg
->addr
);
2107 case KVM_REG_S390_EPOCHDIFF
:
2108 r
= put_user(vcpu
->arch
.sie_block
->epoch
,
2109 (u64 __user
*)reg
->addr
);
2111 case KVM_REG_S390_CPU_TIMER
:
2112 r
= put_user(kvm_s390_get_cpu_timer(vcpu
),
2113 (u64 __user
*)reg
->addr
);
2115 case KVM_REG_S390_CLOCK_COMP
:
2116 r
= put_user(vcpu
->arch
.sie_block
->ckc
,
2117 (u64 __user
*)reg
->addr
);
2119 case KVM_REG_S390_PFTOKEN
:
2120 r
= put_user(vcpu
->arch
.pfault_token
,
2121 (u64 __user
*)reg
->addr
);
2123 case KVM_REG_S390_PFCOMPARE
:
2124 r
= put_user(vcpu
->arch
.pfault_compare
,
2125 (u64 __user
*)reg
->addr
);
2127 case KVM_REG_S390_PFSELECT
:
2128 r
= put_user(vcpu
->arch
.pfault_select
,
2129 (u64 __user
*)reg
->addr
);
2131 case KVM_REG_S390_PP
:
2132 r
= put_user(vcpu
->arch
.sie_block
->pp
,
2133 (u64 __user
*)reg
->addr
);
2135 case KVM_REG_S390_GBEA
:
2136 r
= put_user(vcpu
->arch
.sie_block
->gbea
,
2137 (u64 __user
*)reg
->addr
);
2146 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
,
2147 struct kvm_one_reg
*reg
)
2153 case KVM_REG_S390_TODPR
:
2154 r
= get_user(vcpu
->arch
.sie_block
->todpr
,
2155 (u32 __user
*)reg
->addr
);
2157 case KVM_REG_S390_EPOCHDIFF
:
2158 r
= get_user(vcpu
->arch
.sie_block
->epoch
,
2159 (u64 __user
*)reg
->addr
);
2161 case KVM_REG_S390_CPU_TIMER
:
2162 r
= get_user(val
, (u64 __user
*)reg
->addr
);
2164 kvm_s390_set_cpu_timer(vcpu
, val
);
2166 case KVM_REG_S390_CLOCK_COMP
:
2167 r
= get_user(vcpu
->arch
.sie_block
->ckc
,
2168 (u64 __user
*)reg
->addr
);
2170 case KVM_REG_S390_PFTOKEN
:
2171 r
= get_user(vcpu
->arch
.pfault_token
,
2172 (u64 __user
*)reg
->addr
);
2173 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
2174 kvm_clear_async_pf_completion_queue(vcpu
);
2176 case KVM_REG_S390_PFCOMPARE
:
2177 r
= get_user(vcpu
->arch
.pfault_compare
,
2178 (u64 __user
*)reg
->addr
);
2180 case KVM_REG_S390_PFSELECT
:
2181 r
= get_user(vcpu
->arch
.pfault_select
,
2182 (u64 __user
*)reg
->addr
);
2184 case KVM_REG_S390_PP
:
2185 r
= get_user(vcpu
->arch
.sie_block
->pp
,
2186 (u64 __user
*)reg
->addr
);
2188 case KVM_REG_S390_GBEA
:
2189 r
= get_user(vcpu
->arch
.sie_block
->gbea
,
2190 (u64 __user
*)reg
->addr
);
2199 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu
*vcpu
)
2201 kvm_s390_vcpu_initial_reset(vcpu
);
2205 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
2207 memcpy(&vcpu
->run
->s
.regs
.gprs
, ®s
->gprs
, sizeof(regs
->gprs
));
2211 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
2213 memcpy(®s
->gprs
, &vcpu
->run
->s
.regs
.gprs
, sizeof(regs
->gprs
));
2217 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
2218 struct kvm_sregs
*sregs
)
2220 memcpy(&vcpu
->run
->s
.regs
.acrs
, &sregs
->acrs
, sizeof(sregs
->acrs
));
2221 memcpy(&vcpu
->arch
.sie_block
->gcr
, &sregs
->crs
, sizeof(sregs
->crs
));
2222 restore_access_regs(vcpu
->run
->s
.regs
.acrs
);
2226 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
2227 struct kvm_sregs
*sregs
)
2229 memcpy(&sregs
->acrs
, &vcpu
->run
->s
.regs
.acrs
, sizeof(sregs
->acrs
));
2230 memcpy(&sregs
->crs
, &vcpu
->arch
.sie_block
->gcr
, sizeof(sregs
->crs
));
2234 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
2236 /* make sure the new values will be lazily loaded */
2238 if (test_fp_ctl(fpu
->fpc
))
2240 current
->thread
.fpu
.fpc
= fpu
->fpc
;
2242 convert_fp_to_vx(current
->thread
.fpu
.vxrs
, (freg_t
*)fpu
->fprs
);
2244 memcpy(current
->thread
.fpu
.fprs
, &fpu
->fprs
, sizeof(fpu
->fprs
));
2248 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
2250 /* make sure we have the latest values */
2253 convert_vx_to_fp((freg_t
*)fpu
->fprs
, current
->thread
.fpu
.vxrs
);
2255 memcpy(fpu
->fprs
, current
->thread
.fpu
.fprs
, sizeof(fpu
->fprs
));
2256 fpu
->fpc
= current
->thread
.fpu
.fpc
;
2260 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu
*vcpu
, psw_t psw
)
2264 if (!is_vcpu_stopped(vcpu
))
2267 vcpu
->run
->psw_mask
= psw
.mask
;
2268 vcpu
->run
->psw_addr
= psw
.addr
;
2273 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
2274 struct kvm_translation
*tr
)
2276 return -EINVAL
; /* not implemented yet */
2279 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2280 KVM_GUESTDBG_USE_HW_BP | \
2281 KVM_GUESTDBG_ENABLE)
2283 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
2284 struct kvm_guest_debug
*dbg
)
2288 vcpu
->guest_debug
= 0;
2289 kvm_s390_clear_bp_data(vcpu
);
2291 if (dbg
->control
& ~VALID_GUESTDBG_FLAGS
)
2293 if (!sclp
.has_gpere
)
2296 if (dbg
->control
& KVM_GUESTDBG_ENABLE
) {
2297 vcpu
->guest_debug
= dbg
->control
;
2298 /* enforce guest PER */
2299 atomic_or(CPUSTAT_P
, &vcpu
->arch
.sie_block
->cpuflags
);
2301 if (dbg
->control
& KVM_GUESTDBG_USE_HW_BP
)
2302 rc
= kvm_s390_import_bp_data(vcpu
, dbg
);
2304 atomic_andnot(CPUSTAT_P
, &vcpu
->arch
.sie_block
->cpuflags
);
2305 vcpu
->arch
.guestdbg
.last_bp
= 0;
2309 vcpu
->guest_debug
= 0;
2310 kvm_s390_clear_bp_data(vcpu
);
2311 atomic_andnot(CPUSTAT_P
, &vcpu
->arch
.sie_block
->cpuflags
);
2317 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
2318 struct kvm_mp_state
*mp_state
)
2320 /* CHECK_STOP and LOAD are not supported yet */
2321 return is_vcpu_stopped(vcpu
) ? KVM_MP_STATE_STOPPED
:
2322 KVM_MP_STATE_OPERATING
;
2325 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
2326 struct kvm_mp_state
*mp_state
)
2330 /* user space knows about this interface - let it control the state */
2331 vcpu
->kvm
->arch
.user_cpu_state_ctrl
= 1;
2333 switch (mp_state
->mp_state
) {
2334 case KVM_MP_STATE_STOPPED
:
2335 kvm_s390_vcpu_stop(vcpu
);
2337 case KVM_MP_STATE_OPERATING
:
2338 kvm_s390_vcpu_start(vcpu
);
2340 case KVM_MP_STATE_LOAD
:
2341 case KVM_MP_STATE_CHECK_STOP
:
2342 /* fall through - CHECK_STOP and LOAD are not supported yet */
2350 static bool ibs_enabled(struct kvm_vcpu
*vcpu
)
2352 return atomic_read(&vcpu
->arch
.sie_block
->cpuflags
) & CPUSTAT_IBS
;
2355 static int kvm_s390_handle_requests(struct kvm_vcpu
*vcpu
)
2358 kvm_s390_vcpu_request_handled(vcpu
);
2359 if (!vcpu
->requests
)
2362 * We use MMU_RELOAD just to re-arm the ipte notifier for the
2363 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
2364 * This ensures that the ipte instruction for this request has
2365 * already finished. We might race against a second unmapper that
2366 * wants to set the blocking bit. Lets just retry the request loop.
2368 if (kvm_check_request(KVM_REQ_MMU_RELOAD
, vcpu
)) {
2370 rc
= gmap_mprotect_notify(vcpu
->arch
.gmap
,
2371 kvm_s390_get_prefix(vcpu
),
2372 PAGE_SIZE
* 2, PROT_WRITE
);
2374 kvm_make_request(KVM_REQ_MMU_RELOAD
, vcpu
);
2380 if (kvm_check_request(KVM_REQ_TLB_FLUSH
, vcpu
)) {
2381 vcpu
->arch
.sie_block
->ihcpu
= 0xffff;
2385 if (kvm_check_request(KVM_REQ_ENABLE_IBS
, vcpu
)) {
2386 if (!ibs_enabled(vcpu
)) {
2387 trace_kvm_s390_enable_disable_ibs(vcpu
->vcpu_id
, 1);
2388 atomic_or(CPUSTAT_IBS
,
2389 &vcpu
->arch
.sie_block
->cpuflags
);
2394 if (kvm_check_request(KVM_REQ_DISABLE_IBS
, vcpu
)) {
2395 if (ibs_enabled(vcpu
)) {
2396 trace_kvm_s390_enable_disable_ibs(vcpu
->vcpu_id
, 0);
2397 atomic_andnot(CPUSTAT_IBS
,
2398 &vcpu
->arch
.sie_block
->cpuflags
);
2403 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC
, vcpu
)) {
2404 vcpu
->arch
.sie_block
->ictl
|= ICTL_OPEREXC
;
2408 /* nothing to do, just clear the request */
2409 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
2414 void kvm_s390_set_tod_clock(struct kvm
*kvm
, u64 tod
)
2416 struct kvm_vcpu
*vcpu
;
2419 mutex_lock(&kvm
->lock
);
2421 kvm
->arch
.epoch
= tod
- get_tod_clock();
2422 kvm_s390_vcpu_block_all(kvm
);
2423 kvm_for_each_vcpu(i
, vcpu
, kvm
)
2424 vcpu
->arch
.sie_block
->epoch
= kvm
->arch
.epoch
;
2425 kvm_s390_vcpu_unblock_all(kvm
);
2427 mutex_unlock(&kvm
->lock
);
2431 * kvm_arch_fault_in_page - fault-in guest page if necessary
2432 * @vcpu: The corresponding virtual cpu
2433 * @gpa: Guest physical address
2434 * @writable: Whether the page should be writable or not
2436 * Make sure that a guest page has been faulted-in on the host.
2438 * Return: Zero on success, negative error code otherwise.
2440 long kvm_arch_fault_in_page(struct kvm_vcpu
*vcpu
, gpa_t gpa
, int writable
)
2442 return gmap_fault(vcpu
->arch
.gmap
, gpa
,
2443 writable
? FAULT_FLAG_WRITE
: 0);
2446 static void __kvm_inject_pfault_token(struct kvm_vcpu
*vcpu
, bool start_token
,
2447 unsigned long token
)
2449 struct kvm_s390_interrupt inti
;
2450 struct kvm_s390_irq irq
;
2453 irq
.u
.ext
.ext_params2
= token
;
2454 irq
.type
= KVM_S390_INT_PFAULT_INIT
;
2455 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu
, &irq
));
2457 inti
.type
= KVM_S390_INT_PFAULT_DONE
;
2458 inti
.parm64
= token
;
2459 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu
->kvm
, &inti
));
2463 void kvm_arch_async_page_not_present(struct kvm_vcpu
*vcpu
,
2464 struct kvm_async_pf
*work
)
2466 trace_kvm_s390_pfault_init(vcpu
, work
->arch
.pfault_token
);
2467 __kvm_inject_pfault_token(vcpu
, true, work
->arch
.pfault_token
);
2470 void kvm_arch_async_page_present(struct kvm_vcpu
*vcpu
,
2471 struct kvm_async_pf
*work
)
2473 trace_kvm_s390_pfault_done(vcpu
, work
->arch
.pfault_token
);
2474 __kvm_inject_pfault_token(vcpu
, false, work
->arch
.pfault_token
);
2477 void kvm_arch_async_page_ready(struct kvm_vcpu
*vcpu
,
2478 struct kvm_async_pf
*work
)
2480 /* s390 will always inject the page directly */
2483 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu
*vcpu
)
2486 * s390 will always inject the page directly,
2487 * but we still want check_async_completion to cleanup
2492 static int kvm_arch_setup_async_pf(struct kvm_vcpu
*vcpu
)
2495 struct kvm_arch_async_pf arch
;
2498 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
2500 if ((vcpu
->arch
.sie_block
->gpsw
.mask
& vcpu
->arch
.pfault_select
) !=
2501 vcpu
->arch
.pfault_compare
)
2503 if (psw_extint_disabled(vcpu
))
2505 if (kvm_s390_vcpu_has_irq(vcpu
, 0))
2507 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
))
2509 if (!vcpu
->arch
.gmap
->pfault_enabled
)
2512 hva
= gfn_to_hva(vcpu
->kvm
, gpa_to_gfn(current
->thread
.gmap_addr
));
2513 hva
+= current
->thread
.gmap_addr
& ~PAGE_MASK
;
2514 if (read_guest_real(vcpu
, vcpu
->arch
.pfault_token
, &arch
.pfault_token
, 8))
2517 rc
= kvm_setup_async_pf(vcpu
, current
->thread
.gmap_addr
, hva
, &arch
);
2521 static int vcpu_pre_run(struct kvm_vcpu
*vcpu
)
2526 * On s390 notifications for arriving pages will be delivered directly
2527 * to the guest but the house keeping for completed pfaults is
2528 * handled outside the worker.
2530 kvm_check_async_pf_completion(vcpu
);
2532 vcpu
->arch
.sie_block
->gg14
= vcpu
->run
->s
.regs
.gprs
[14];
2533 vcpu
->arch
.sie_block
->gg15
= vcpu
->run
->s
.regs
.gprs
[15];
2538 if (test_cpu_flag(CIF_MCCK_PENDING
))
2541 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
2542 rc
= kvm_s390_deliver_pending_interrupts(vcpu
);
2547 rc
= kvm_s390_handle_requests(vcpu
);
2551 if (guestdbg_enabled(vcpu
)) {
2552 kvm_s390_backup_guest_per_regs(vcpu
);
2553 kvm_s390_patch_guest_per_regs(vcpu
);
2556 vcpu
->arch
.sie_block
->icptcode
= 0;
2557 cpuflags
= atomic_read(&vcpu
->arch
.sie_block
->cpuflags
);
2558 VCPU_EVENT(vcpu
, 6, "entering sie flags %x", cpuflags
);
2559 trace_kvm_s390_sie_enter(vcpu
, cpuflags
);
2564 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu
*vcpu
)
2566 struct kvm_s390_pgm_info pgm_info
= {
2567 .code
= PGM_ADDRESSING
,
2572 VCPU_EVENT(vcpu
, 3, "%s", "fault in sie instruction");
2573 trace_kvm_s390_sie_fault(vcpu
);
2576 * We want to inject an addressing exception, which is defined as a
2577 * suppressing or terminating exception. However, since we came here
2578 * by a DAT access exception, the PSW still points to the faulting
2579 * instruction since DAT exceptions are nullifying. So we've got
2580 * to look up the current opcode to get the length of the instruction
2581 * to be able to forward the PSW.
2583 rc
= read_guest_instr(vcpu
, &opcode
, 1);
2584 ilen
= insn_length(opcode
);
2588 /* Instruction-Fetching Exceptions - we can't detect the ilen.
2589 * Forward by arbitrary ilc, injection will take care of
2590 * nullification if necessary.
2592 pgm_info
= vcpu
->arch
.pgm
;
2595 pgm_info
.flags
= ilen
| KVM_S390_PGM_FLAGS_ILC_VALID
;
2596 kvm_s390_forward_psw(vcpu
, ilen
);
2597 return kvm_s390_inject_prog_irq(vcpu
, &pgm_info
);
2600 static int vcpu_post_run(struct kvm_vcpu
*vcpu
, int exit_reason
)
2602 VCPU_EVENT(vcpu
, 6, "exit sie icptcode %d",
2603 vcpu
->arch
.sie_block
->icptcode
);
2604 trace_kvm_s390_sie_exit(vcpu
, vcpu
->arch
.sie_block
->icptcode
);
2606 if (guestdbg_enabled(vcpu
))
2607 kvm_s390_restore_guest_per_regs(vcpu
);
2609 vcpu
->run
->s
.regs
.gprs
[14] = vcpu
->arch
.sie_block
->gg14
;
2610 vcpu
->run
->s
.regs
.gprs
[15] = vcpu
->arch
.sie_block
->gg15
;
2612 if (vcpu
->arch
.sie_block
->icptcode
> 0) {
2613 int rc
= kvm_handle_sie_intercept(vcpu
);
2615 if (rc
!= -EOPNOTSUPP
)
2617 vcpu
->run
->exit_reason
= KVM_EXIT_S390_SIEIC
;
2618 vcpu
->run
->s390_sieic
.icptcode
= vcpu
->arch
.sie_block
->icptcode
;
2619 vcpu
->run
->s390_sieic
.ipa
= vcpu
->arch
.sie_block
->ipa
;
2620 vcpu
->run
->s390_sieic
.ipb
= vcpu
->arch
.sie_block
->ipb
;
2622 } else if (exit_reason
!= -EFAULT
) {
2623 vcpu
->stat
.exit_null
++;
2625 } else if (kvm_is_ucontrol(vcpu
->kvm
)) {
2626 vcpu
->run
->exit_reason
= KVM_EXIT_S390_UCONTROL
;
2627 vcpu
->run
->s390_ucontrol
.trans_exc_code
=
2628 current
->thread
.gmap_addr
;
2629 vcpu
->run
->s390_ucontrol
.pgm_code
= 0x10;
2631 } else if (current
->thread
.gmap_pfault
) {
2632 trace_kvm_s390_major_guest_pfault(vcpu
);
2633 current
->thread
.gmap_pfault
= 0;
2634 if (kvm_arch_setup_async_pf(vcpu
))
2636 return kvm_arch_fault_in_page(vcpu
, current
->thread
.gmap_addr
, 1);
2638 return vcpu_post_run_fault_in_sie(vcpu
);
2641 static int __vcpu_run(struct kvm_vcpu
*vcpu
)
2643 int rc
, exit_reason
;
2646 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2647 * ning the guest), so that memslots (and other stuff) are protected
2649 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
2652 rc
= vcpu_pre_run(vcpu
);
2656 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
2658 * As PF_VCPU will be used in fault handler, between
2659 * guest_enter and guest_exit should be no uaccess.
2661 local_irq_disable();
2662 guest_enter_irqoff();
2663 __disable_cpu_timer_accounting(vcpu
);
2665 exit_reason
= sie64a(vcpu
->arch
.sie_block
,
2666 vcpu
->run
->s
.regs
.gprs
);
2667 local_irq_disable();
2668 __enable_cpu_timer_accounting(vcpu
);
2669 guest_exit_irqoff();
2671 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
2673 rc
= vcpu_post_run(vcpu
, exit_reason
);
2674 } while (!signal_pending(current
) && !guestdbg_exit_pending(vcpu
) && !rc
);
2676 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
2680 static void sync_regs(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2682 vcpu
->arch
.sie_block
->gpsw
.mask
= kvm_run
->psw_mask
;
2683 vcpu
->arch
.sie_block
->gpsw
.addr
= kvm_run
->psw_addr
;
2684 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_PREFIX
)
2685 kvm_s390_set_prefix(vcpu
, kvm_run
->s
.regs
.prefix
);
2686 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_CRS
) {
2687 memcpy(&vcpu
->arch
.sie_block
->gcr
, &kvm_run
->s
.regs
.crs
, 128);
2688 /* some control register changes require a tlb flush */
2689 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
2691 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_ARCH0
) {
2692 kvm_s390_set_cpu_timer(vcpu
, kvm_run
->s
.regs
.cputm
);
2693 vcpu
->arch
.sie_block
->ckc
= kvm_run
->s
.regs
.ckc
;
2694 vcpu
->arch
.sie_block
->todpr
= kvm_run
->s
.regs
.todpr
;
2695 vcpu
->arch
.sie_block
->pp
= kvm_run
->s
.regs
.pp
;
2696 vcpu
->arch
.sie_block
->gbea
= kvm_run
->s
.regs
.gbea
;
2698 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_PFAULT
) {
2699 vcpu
->arch
.pfault_token
= kvm_run
->s
.regs
.pft
;
2700 vcpu
->arch
.pfault_select
= kvm_run
->s
.regs
.pfs
;
2701 vcpu
->arch
.pfault_compare
= kvm_run
->s
.regs
.pfc
;
2702 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
2703 kvm_clear_async_pf_completion_queue(vcpu
);
2705 kvm_run
->kvm_dirty_regs
= 0;
2708 static void store_regs(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2710 kvm_run
->psw_mask
= vcpu
->arch
.sie_block
->gpsw
.mask
;
2711 kvm_run
->psw_addr
= vcpu
->arch
.sie_block
->gpsw
.addr
;
2712 kvm_run
->s
.regs
.prefix
= kvm_s390_get_prefix(vcpu
);
2713 memcpy(&kvm_run
->s
.regs
.crs
, &vcpu
->arch
.sie_block
->gcr
, 128);
2714 kvm_run
->s
.regs
.cputm
= kvm_s390_get_cpu_timer(vcpu
);
2715 kvm_run
->s
.regs
.ckc
= vcpu
->arch
.sie_block
->ckc
;
2716 kvm_run
->s
.regs
.todpr
= vcpu
->arch
.sie_block
->todpr
;
2717 kvm_run
->s
.regs
.pp
= vcpu
->arch
.sie_block
->pp
;
2718 kvm_run
->s
.regs
.gbea
= vcpu
->arch
.sie_block
->gbea
;
2719 kvm_run
->s
.regs
.pft
= vcpu
->arch
.pfault_token
;
2720 kvm_run
->s
.regs
.pfs
= vcpu
->arch
.pfault_select
;
2721 kvm_run
->s
.regs
.pfc
= vcpu
->arch
.pfault_compare
;
2724 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
2729 if (guestdbg_exit_pending(vcpu
)) {
2730 kvm_s390_prepare_debug_exit(vcpu
);
2734 if (vcpu
->sigset_active
)
2735 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
2737 if (!kvm_s390_user_cpu_state_ctrl(vcpu
->kvm
)) {
2738 kvm_s390_vcpu_start(vcpu
);
2739 } else if (is_vcpu_stopped(vcpu
)) {
2740 pr_err_ratelimited("can't run stopped vcpu %d\n",
2745 sync_regs(vcpu
, kvm_run
);
2746 enable_cpu_timer_accounting(vcpu
);
2749 rc
= __vcpu_run(vcpu
);
2751 if (signal_pending(current
) && !rc
) {
2752 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
2756 if (guestdbg_exit_pending(vcpu
) && !rc
) {
2757 kvm_s390_prepare_debug_exit(vcpu
);
2761 if (rc
== -EREMOTE
) {
2762 /* userspace support is needed, kvm_run has been prepared */
2766 disable_cpu_timer_accounting(vcpu
);
2767 store_regs(vcpu
, kvm_run
);
2769 if (vcpu
->sigset_active
)
2770 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
2772 vcpu
->stat
.exit_userspace
++;
2777 * store status at address
2778 * we use have two special cases:
2779 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2780 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2782 int kvm_s390_store_status_unloaded(struct kvm_vcpu
*vcpu
, unsigned long gpa
)
2784 unsigned char archmode
= 1;
2785 freg_t fprs
[NUM_FPRS
];
2790 px
= kvm_s390_get_prefix(vcpu
);
2791 if (gpa
== KVM_S390_STORE_STATUS_NOADDR
) {
2792 if (write_guest_abs(vcpu
, 163, &archmode
, 1))
2795 } else if (gpa
== KVM_S390_STORE_STATUS_PREFIXED
) {
2796 if (write_guest_real(vcpu
, 163, &archmode
, 1))
2800 gpa
-= __LC_FPREGS_SAVE_AREA
;
2802 /* manually convert vector registers if necessary */
2803 if (MACHINE_HAS_VX
) {
2804 convert_vx_to_fp(fprs
, (__vector128
*) vcpu
->run
->s
.regs
.vrs
);
2805 rc
= write_guest_abs(vcpu
, gpa
+ __LC_FPREGS_SAVE_AREA
,
2808 rc
= write_guest_abs(vcpu
, gpa
+ __LC_FPREGS_SAVE_AREA
,
2809 vcpu
->run
->s
.regs
.fprs
, 128);
2811 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_GPREGS_SAVE_AREA
,
2812 vcpu
->run
->s
.regs
.gprs
, 128);
2813 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_PSW_SAVE_AREA
,
2814 &vcpu
->arch
.sie_block
->gpsw
, 16);
2815 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_PREFIX_SAVE_AREA
,
2817 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_FP_CREG_SAVE_AREA
,
2818 &vcpu
->run
->s
.regs
.fpc
, 4);
2819 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_TOD_PROGREG_SAVE_AREA
,
2820 &vcpu
->arch
.sie_block
->todpr
, 4);
2821 cputm
= kvm_s390_get_cpu_timer(vcpu
);
2822 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_CPU_TIMER_SAVE_AREA
,
2824 clkcomp
= vcpu
->arch
.sie_block
->ckc
>> 8;
2825 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_CLOCK_COMP_SAVE_AREA
,
2827 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_AREGS_SAVE_AREA
,
2828 &vcpu
->run
->s
.regs
.acrs
, 64);
2829 rc
|= write_guest_abs(vcpu
, gpa
+ __LC_CREGS_SAVE_AREA
,
2830 &vcpu
->arch
.sie_block
->gcr
, 128);
2831 return rc
? -EFAULT
: 0;
2834 int kvm_s390_vcpu_store_status(struct kvm_vcpu
*vcpu
, unsigned long addr
)
2837 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2838 * copying in vcpu load/put. Lets update our copies before we save
2839 * it into the save area
2842 vcpu
->run
->s
.regs
.fpc
= current
->thread
.fpu
.fpc
;
2843 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
2845 return kvm_s390_store_status_unloaded(vcpu
, addr
);
2849 * store additional status at address
2851 int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu
*vcpu
,
2854 /* Only bits 0-53 are used for address formation */
2855 if (!(gpa
& ~0x3ff))
2858 return write_guest_abs(vcpu
, gpa
& ~0x3ff,
2859 (void *)&vcpu
->run
->s
.regs
.vrs
, 512);
2862 int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu
*vcpu
, unsigned long addr
)
2864 if (!test_kvm_facility(vcpu
->kvm
, 129))
2868 * The guest VXRS are in the host VXRs due to the lazy
2869 * copying in vcpu load/put. We can simply call save_fpu_regs()
2870 * to save the current register state because we are in the
2871 * middle of a load/put cycle.
2873 * Let's update our copies before we save it into the save area.
2877 return kvm_s390_store_adtl_status_unloaded(vcpu
, addr
);
2880 static void __disable_ibs_on_vcpu(struct kvm_vcpu
*vcpu
)
2882 kvm_check_request(KVM_REQ_ENABLE_IBS
, vcpu
);
2883 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS
, vcpu
);
2886 static void __disable_ibs_on_all_vcpus(struct kvm
*kvm
)
2889 struct kvm_vcpu
*vcpu
;
2891 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
2892 __disable_ibs_on_vcpu(vcpu
);
2896 static void __enable_ibs_on_vcpu(struct kvm_vcpu
*vcpu
)
2900 kvm_check_request(KVM_REQ_DISABLE_IBS
, vcpu
);
2901 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS
, vcpu
);
2904 void kvm_s390_vcpu_start(struct kvm_vcpu
*vcpu
)
2906 int i
, online_vcpus
, started_vcpus
= 0;
2908 if (!is_vcpu_stopped(vcpu
))
2911 trace_kvm_s390_vcpu_start_stop(vcpu
->vcpu_id
, 1);
2912 /* Only one cpu at a time may enter/leave the STOPPED state. */
2913 spin_lock(&vcpu
->kvm
->arch
.start_stop_lock
);
2914 online_vcpus
= atomic_read(&vcpu
->kvm
->online_vcpus
);
2916 for (i
= 0; i
< online_vcpus
; i
++) {
2917 if (!is_vcpu_stopped(vcpu
->kvm
->vcpus
[i
]))
2921 if (started_vcpus
== 0) {
2922 /* we're the only active VCPU -> speed it up */
2923 __enable_ibs_on_vcpu(vcpu
);
2924 } else if (started_vcpus
== 1) {
2926 * As we are starting a second VCPU, we have to disable
2927 * the IBS facility on all VCPUs to remove potentially
2928 * oustanding ENABLE requests.
2930 __disable_ibs_on_all_vcpus(vcpu
->kvm
);
2933 atomic_andnot(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
2935 * Another VCPU might have used IBS while we were offline.
2936 * Let's play safe and flush the VCPU at startup.
2938 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
2939 spin_unlock(&vcpu
->kvm
->arch
.start_stop_lock
);
2943 void kvm_s390_vcpu_stop(struct kvm_vcpu
*vcpu
)
2945 int i
, online_vcpus
, started_vcpus
= 0;
2946 struct kvm_vcpu
*started_vcpu
= NULL
;
2948 if (is_vcpu_stopped(vcpu
))
2951 trace_kvm_s390_vcpu_start_stop(vcpu
->vcpu_id
, 0);
2952 /* Only one cpu at a time may enter/leave the STOPPED state. */
2953 spin_lock(&vcpu
->kvm
->arch
.start_stop_lock
);
2954 online_vcpus
= atomic_read(&vcpu
->kvm
->online_vcpus
);
2956 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
2957 kvm_s390_clear_stop_irq(vcpu
);
2959 atomic_or(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
2960 __disable_ibs_on_vcpu(vcpu
);
2962 for (i
= 0; i
< online_vcpus
; i
++) {
2963 if (!is_vcpu_stopped(vcpu
->kvm
->vcpus
[i
])) {
2965 started_vcpu
= vcpu
->kvm
->vcpus
[i
];
2969 if (started_vcpus
== 1) {
2971 * As we only have one VCPU left, we want to enable the
2972 * IBS facility for that VCPU to speed it up.
2974 __enable_ibs_on_vcpu(started_vcpu
);
2977 spin_unlock(&vcpu
->kvm
->arch
.start_stop_lock
);
2981 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
2982 struct kvm_enable_cap
*cap
)
2990 case KVM_CAP_S390_CSS_SUPPORT
:
2991 if (!vcpu
->kvm
->arch
.css_support
) {
2992 vcpu
->kvm
->arch
.css_support
= 1;
2993 VM_EVENT(vcpu
->kvm
, 3, "%s", "ENABLE: CSS support");
2994 trace_kvm_s390_enable_css(vcpu
->kvm
);
3005 static long kvm_s390_guest_mem_op(struct kvm_vcpu
*vcpu
,
3006 struct kvm_s390_mem_op
*mop
)
3008 void __user
*uaddr
= (void __user
*)mop
->buf
;
3009 void *tmpbuf
= NULL
;
3011 const u64 supported_flags
= KVM_S390_MEMOP_F_INJECT_EXCEPTION
3012 | KVM_S390_MEMOP_F_CHECK_ONLY
;
3014 if (mop
->flags
& ~supported_flags
)
3017 if (mop
->size
> MEM_OP_MAX_SIZE
)
3020 if (!(mop
->flags
& KVM_S390_MEMOP_F_CHECK_ONLY
)) {
3021 tmpbuf
= vmalloc(mop
->size
);
3026 srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
3029 case KVM_S390_MEMOP_LOGICAL_READ
:
3030 if (mop
->flags
& KVM_S390_MEMOP_F_CHECK_ONLY
) {
3031 r
= check_gva_range(vcpu
, mop
->gaddr
, mop
->ar
,
3032 mop
->size
, GACC_FETCH
);
3035 r
= read_guest(vcpu
, mop
->gaddr
, mop
->ar
, tmpbuf
, mop
->size
);
3037 if (copy_to_user(uaddr
, tmpbuf
, mop
->size
))
3041 case KVM_S390_MEMOP_LOGICAL_WRITE
:
3042 if (mop
->flags
& KVM_S390_MEMOP_F_CHECK_ONLY
) {
3043 r
= check_gva_range(vcpu
, mop
->gaddr
, mop
->ar
,
3044 mop
->size
, GACC_STORE
);
3047 if (copy_from_user(tmpbuf
, uaddr
, mop
->size
)) {
3051 r
= write_guest(vcpu
, mop
->gaddr
, mop
->ar
, tmpbuf
, mop
->size
);
3057 srcu_read_unlock(&vcpu
->kvm
->srcu
, srcu_idx
);
3059 if (r
> 0 && (mop
->flags
& KVM_S390_MEMOP_F_INJECT_EXCEPTION
) != 0)
3060 kvm_s390_inject_prog_irq(vcpu
, &vcpu
->arch
.pgm
);
3066 long kvm_arch_vcpu_ioctl(struct file
*filp
,
3067 unsigned int ioctl
, unsigned long arg
)
3069 struct kvm_vcpu
*vcpu
= filp
->private_data
;
3070 void __user
*argp
= (void __user
*)arg
;
3075 case KVM_S390_IRQ
: {
3076 struct kvm_s390_irq s390irq
;
3079 if (copy_from_user(&s390irq
, argp
, sizeof(s390irq
)))
3081 r
= kvm_s390_inject_vcpu(vcpu
, &s390irq
);
3084 case KVM_S390_INTERRUPT
: {
3085 struct kvm_s390_interrupt s390int
;
3086 struct kvm_s390_irq s390irq
;
3089 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
3091 if (s390int_to_s390irq(&s390int
, &s390irq
))
3093 r
= kvm_s390_inject_vcpu(vcpu
, &s390irq
);
3096 case KVM_S390_STORE_STATUS
:
3097 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
3098 r
= kvm_s390_vcpu_store_status(vcpu
, arg
);
3099 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
3101 case KVM_S390_SET_INITIAL_PSW
: {
3105 if (copy_from_user(&psw
, argp
, sizeof(psw
)))
3107 r
= kvm_arch_vcpu_ioctl_set_initial_psw(vcpu
, psw
);
3110 case KVM_S390_INITIAL_RESET
:
3111 r
= kvm_arch_vcpu_ioctl_initial_reset(vcpu
);
3113 case KVM_SET_ONE_REG
:
3114 case KVM_GET_ONE_REG
: {
3115 struct kvm_one_reg reg
;
3117 if (copy_from_user(®
, argp
, sizeof(reg
)))
3119 if (ioctl
== KVM_SET_ONE_REG
)
3120 r
= kvm_arch_vcpu_ioctl_set_one_reg(vcpu
, ®
);
3122 r
= kvm_arch_vcpu_ioctl_get_one_reg(vcpu
, ®
);
3125 #ifdef CONFIG_KVM_S390_UCONTROL
3126 case KVM_S390_UCAS_MAP
: {
3127 struct kvm_s390_ucas_mapping ucasmap
;
3129 if (copy_from_user(&ucasmap
, argp
, sizeof(ucasmap
))) {
3134 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
3139 r
= gmap_map_segment(vcpu
->arch
.gmap
, ucasmap
.user_addr
,
3140 ucasmap
.vcpu_addr
, ucasmap
.length
);
3143 case KVM_S390_UCAS_UNMAP
: {
3144 struct kvm_s390_ucas_mapping ucasmap
;
3146 if (copy_from_user(&ucasmap
, argp
, sizeof(ucasmap
))) {
3151 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
3156 r
= gmap_unmap_segment(vcpu
->arch
.gmap
, ucasmap
.vcpu_addr
,
3161 case KVM_S390_VCPU_FAULT
: {
3162 r
= gmap_fault(vcpu
->arch
.gmap
, arg
, 0);
3165 case KVM_ENABLE_CAP
:
3167 struct kvm_enable_cap cap
;
3169 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
3171 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
3174 case KVM_S390_MEM_OP
: {
3175 struct kvm_s390_mem_op mem_op
;
3177 if (copy_from_user(&mem_op
, argp
, sizeof(mem_op
)) == 0)
3178 r
= kvm_s390_guest_mem_op(vcpu
, &mem_op
);
3183 case KVM_S390_SET_IRQ_STATE
: {
3184 struct kvm_s390_irq_state irq_state
;
3187 if (copy_from_user(&irq_state
, argp
, sizeof(irq_state
)))
3189 if (irq_state
.len
> VCPU_IRQS_MAX_BUF
||
3190 irq_state
.len
== 0 ||
3191 irq_state
.len
% sizeof(struct kvm_s390_irq
) > 0) {
3195 r
= kvm_s390_set_irq_state(vcpu
,
3196 (void __user
*) irq_state
.buf
,
3200 case KVM_S390_GET_IRQ_STATE
: {
3201 struct kvm_s390_irq_state irq_state
;
3204 if (copy_from_user(&irq_state
, argp
, sizeof(irq_state
)))
3206 if (irq_state
.len
== 0) {
3210 r
= kvm_s390_get_irq_state(vcpu
,
3211 (__u8 __user
*) irq_state
.buf
,
3221 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
3223 #ifdef CONFIG_KVM_S390_UCONTROL
3224 if ((vmf
->pgoff
== KVM_S390_SIE_PAGE_OFFSET
)
3225 && (kvm_is_ucontrol(vcpu
->kvm
))) {
3226 vmf
->page
= virt_to_page(vcpu
->arch
.sie_block
);
3227 get_page(vmf
->page
);
3231 return VM_FAULT_SIGBUS
;
3234 int kvm_arch_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
3235 unsigned long npages
)
3240 /* Section: memory related */
3241 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
3242 struct kvm_memory_slot
*memslot
,
3243 const struct kvm_userspace_memory_region
*mem
,
3244 enum kvm_mr_change change
)
3246 /* A few sanity checks. We can have memory slots which have to be
3247 located/ended at a segment boundary (1MB). The memory in userland is
3248 ok to be fragmented into various different vmas. It is okay to mmap()
3249 and munmap() stuff in this slot after doing this call at any time */
3251 if (mem
->userspace_addr
& 0xffffful
)
3254 if (mem
->memory_size
& 0xffffful
)
3257 if (mem
->guest_phys_addr
+ mem
->memory_size
> kvm
->arch
.mem_limit
)
3263 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
3264 const struct kvm_userspace_memory_region
*mem
,
3265 const struct kvm_memory_slot
*old
,
3266 const struct kvm_memory_slot
*new,
3267 enum kvm_mr_change change
)
3271 /* If the basics of the memslot do not change, we do not want
3272 * to update the gmap. Every update causes several unnecessary
3273 * segment translation exceptions. This is usually handled just
3274 * fine by the normal fault handler + gmap, but it will also
3275 * cause faults on the prefix page of running guest CPUs.
3277 if (old
->userspace_addr
== mem
->userspace_addr
&&
3278 old
->base_gfn
* PAGE_SIZE
== mem
->guest_phys_addr
&&
3279 old
->npages
* PAGE_SIZE
== mem
->memory_size
)
3282 rc
= gmap_map_segment(kvm
->arch
.gmap
, mem
->userspace_addr
,
3283 mem
->guest_phys_addr
, mem
->memory_size
);
3285 pr_warn("failed to commit memory region\n");
3289 static inline unsigned long nonhyp_mask(int i
)
3291 unsigned int nonhyp_fai
= (sclp
.hmfai
<< i
* 2) >> 30;
3293 return 0x0000ffffffffffffUL
>> (nonhyp_fai
<< 4);
3296 void kvm_arch_vcpu_block_finish(struct kvm_vcpu
*vcpu
)
3298 vcpu
->valid_wakeup
= false;
3301 static int __init
kvm_s390_init(void)
3305 if (!sclp
.has_sief2
) {
3306 pr_info("SIE not available\n");
3310 for (i
= 0; i
< 16; i
++)
3311 kvm_s390_fac_list_mask
[i
] |=
3312 S390_lowcore
.stfle_fac_list
[i
] & nonhyp_mask(i
);
3314 return kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
3317 static void __exit
kvm_s390_exit(void)
3322 module_init(kvm_s390_init
);
3323 module_exit(kvm_s390_exit
);
3326 * Enable autoloading of the kvm module.
3327 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3328 * since x86 takes a different approach.
3330 #include <linux/miscdevice.h>
3331 MODULE_ALIAS_MISCDEV(KVM_MINOR
);
3332 MODULE_ALIAS("devname:kvm");