2 * handling kvm guest interrupts
4 * Copyright IBM Corp. 2008, 2015
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/mmu_context.h>
17 #include <linux/signal.h>
18 #include <linux/slab.h>
19 #include <linux/bitmap.h>
20 #include <asm/asm-offsets.h>
22 #include <asm/uaccess.h>
26 #include "trace-s390.h"
28 #define IOINT_SCHID_MASK 0x0000ffff
29 #define IOINT_SSID_MASK 0x00030000
30 #define IOINT_CSSID_MASK 0x03fc0000
31 #define IOINT_AI_MASK 0x04000000
32 #define PFAULT_INIT 0x0600
33 #define PFAULT_DONE 0x0680
34 #define VIRTIO_PARAM 0x0d00
36 static int is_ioint(u64 type
)
38 return ((type
& 0xfffe0000u
) != 0xfffe0000u
);
41 int psw_extint_disabled(struct kvm_vcpu
*vcpu
)
43 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
);
46 static int psw_ioint_disabled(struct kvm_vcpu
*vcpu
)
48 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_IO
);
51 static int psw_mchk_disabled(struct kvm_vcpu
*vcpu
)
53 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_MCHECK
);
56 static int psw_interrupts_disabled(struct kvm_vcpu
*vcpu
)
58 if ((vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PER
) ||
59 (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_IO
) ||
60 (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
))
65 static int ckc_interrupts_enabled(struct kvm_vcpu
*vcpu
)
67 if (psw_extint_disabled(vcpu
) ||
68 !(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
70 if (guestdbg_enabled(vcpu
) && guestdbg_sstep_enabled(vcpu
))
71 /* No timer interrupts when single stepping */
76 static u64
int_word_to_isc_bits(u32 int_word
)
78 u8 isc
= (int_word
& 0x38000000) >> 27;
80 return (0x80 >> isc
) << 24;
83 static int __must_check
__interrupt_is_deliverable(struct kvm_vcpu
*vcpu
,
84 struct kvm_s390_interrupt_info
*inti
)
87 case KVM_S390_INT_EXTERNAL_CALL
:
88 if (psw_extint_disabled(vcpu
))
90 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x2000ul
)
93 case KVM_S390_INT_EMERGENCY
:
94 if (psw_extint_disabled(vcpu
))
96 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x4000ul
)
99 case KVM_S390_INT_CLOCK_COMP
:
100 return ckc_interrupts_enabled(vcpu
);
101 case KVM_S390_INT_CPU_TIMER
:
102 if (psw_extint_disabled(vcpu
))
104 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x400ul
)
107 case KVM_S390_INT_SERVICE
:
108 case KVM_S390_INT_PFAULT_INIT
:
109 case KVM_S390_INT_PFAULT_DONE
:
110 case KVM_S390_INT_VIRTIO
:
111 if (psw_extint_disabled(vcpu
))
113 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
)
116 case KVM_S390_PROGRAM_INT
:
117 case KVM_S390_SIGP_STOP
:
118 case KVM_S390_SIGP_SET_PREFIX
:
119 case KVM_S390_RESTART
:
122 if (psw_mchk_disabled(vcpu
))
124 if (vcpu
->arch
.sie_block
->gcr
[14] & inti
->mchk
.cr14
)
127 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
128 if (psw_ioint_disabled(vcpu
))
130 if (vcpu
->arch
.sie_block
->gcr
[6] &
131 int_word_to_isc_bits(inti
->io
.io_int_word
))
135 printk(KERN_WARNING
"illegal interrupt type %llx\n",
142 static inline unsigned long pending_local_irqs(struct kvm_vcpu
*vcpu
)
144 return vcpu
->arch
.local_int
.pending_irqs
;
147 static unsigned long deliverable_local_irqs(struct kvm_vcpu
*vcpu
)
149 unsigned long active_mask
= pending_local_irqs(vcpu
);
151 if (psw_extint_disabled(vcpu
))
152 active_mask
&= ~IRQ_PEND_EXT_MASK
;
153 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x2000ul
))
154 __clear_bit(IRQ_PEND_EXT_EXTERNAL
, &active_mask
);
155 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x4000ul
))
156 __clear_bit(IRQ_PEND_EXT_EMERGENCY
, &active_mask
);
157 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
158 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &active_mask
);
159 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x400ul
))
160 __clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &active_mask
);
161 if (psw_mchk_disabled(vcpu
))
162 active_mask
&= ~IRQ_PEND_MCHK_MASK
;
165 * STOP irqs will never be actively delivered. They are triggered via
166 * intercept requests and cleared when the stop intercept is performed.
168 __clear_bit(IRQ_PEND_SIGP_STOP
, &active_mask
);
173 static void __set_cpu_idle(struct kvm_vcpu
*vcpu
)
175 atomic_set_mask(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
176 set_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
179 static void __unset_cpu_idle(struct kvm_vcpu
*vcpu
)
181 atomic_clear_mask(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
182 clear_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
185 static void __reset_intercept_indicators(struct kvm_vcpu
*vcpu
)
187 atomic_clear_mask(CPUSTAT_IO_INT
| CPUSTAT_EXT_INT
| CPUSTAT_STOP_INT
,
188 &vcpu
->arch
.sie_block
->cpuflags
);
189 vcpu
->arch
.sie_block
->lctl
= 0x0000;
190 vcpu
->arch
.sie_block
->ictl
&= ~(ICTL_LPSW
| ICTL_STCTL
| ICTL_PINT
);
192 if (guestdbg_enabled(vcpu
)) {
193 vcpu
->arch
.sie_block
->lctl
|= (LCTL_CR0
| LCTL_CR9
|
194 LCTL_CR10
| LCTL_CR11
);
195 vcpu
->arch
.sie_block
->ictl
|= (ICTL_STCTL
| ICTL_PINT
);
199 static void __set_cpuflag(struct kvm_vcpu
*vcpu
, u32 flag
)
201 atomic_set_mask(flag
, &vcpu
->arch
.sie_block
->cpuflags
);
204 static void set_intercept_indicators_ext(struct kvm_vcpu
*vcpu
)
206 if (!(pending_local_irqs(vcpu
) & IRQ_PEND_EXT_MASK
))
208 if (psw_extint_disabled(vcpu
))
209 __set_cpuflag(vcpu
, CPUSTAT_EXT_INT
);
211 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR0
;
214 static void set_intercept_indicators_mchk(struct kvm_vcpu
*vcpu
)
216 if (!(pending_local_irqs(vcpu
) & IRQ_PEND_MCHK_MASK
))
218 if (psw_mchk_disabled(vcpu
))
219 vcpu
->arch
.sie_block
->ictl
|= ICTL_LPSW
;
221 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR14
;
224 static void set_intercept_indicators_stop(struct kvm_vcpu
*vcpu
)
226 if (kvm_s390_is_stop_irq_pending(vcpu
))
227 __set_cpuflag(vcpu
, CPUSTAT_STOP_INT
);
230 /* Set interception request for non-deliverable local interrupts */
231 static void set_intercept_indicators_local(struct kvm_vcpu
*vcpu
)
233 set_intercept_indicators_ext(vcpu
);
234 set_intercept_indicators_mchk(vcpu
);
235 set_intercept_indicators_stop(vcpu
);
238 static void __set_intercept_indicator(struct kvm_vcpu
*vcpu
,
239 struct kvm_s390_interrupt_info
*inti
)
241 switch (inti
->type
) {
242 case KVM_S390_INT_SERVICE
:
243 case KVM_S390_INT_PFAULT_DONE
:
244 case KVM_S390_INT_VIRTIO
:
245 if (psw_extint_disabled(vcpu
))
246 __set_cpuflag(vcpu
, CPUSTAT_EXT_INT
);
248 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR0
;
251 if (psw_mchk_disabled(vcpu
))
252 vcpu
->arch
.sie_block
->ictl
|= ICTL_LPSW
;
254 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR14
;
256 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
257 if (psw_ioint_disabled(vcpu
))
258 __set_cpuflag(vcpu
, CPUSTAT_IO_INT
);
260 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR6
;
267 static u16
get_ilc(struct kvm_vcpu
*vcpu
)
269 switch (vcpu
->arch
.sie_block
->icptcode
) {
275 /* last instruction only stored for these icptcodes */
276 return insn_length(vcpu
->arch
.sie_block
->ipa
>> 8);
278 return vcpu
->arch
.sie_block
->pgmilc
;
284 static int __must_check
__deliver_cpu_timer(struct kvm_vcpu
*vcpu
)
286 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
289 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_CPU_TIMER
,
292 rc
= put_guest_lc(vcpu
, EXT_IRQ_CPU_TIMER
,
293 (u16
*)__LC_EXT_INT_CODE
);
294 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
295 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
296 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
297 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
298 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
299 clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
300 return rc
? -EFAULT
: 0;
303 static int __must_check
__deliver_ckc(struct kvm_vcpu
*vcpu
)
305 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
308 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_CLOCK_COMP
,
311 rc
= put_guest_lc(vcpu
, EXT_IRQ_CLK_COMP
,
312 (u16 __user
*)__LC_EXT_INT_CODE
);
313 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
314 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
315 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
316 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
317 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
318 clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
319 return rc
? -EFAULT
: 0;
322 static int __must_check
__deliver_pfault_init(struct kvm_vcpu
*vcpu
)
324 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
325 struct kvm_s390_ext_info ext
;
328 spin_lock(&li
->lock
);
330 clear_bit(IRQ_PEND_PFAULT_INIT
, &li
->pending_irqs
);
331 li
->irq
.ext
.ext_params2
= 0;
332 spin_unlock(&li
->lock
);
334 VCPU_EVENT(vcpu
, 4, "interrupt: pfault init parm:%x,parm64:%llx",
336 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
337 KVM_S390_INT_PFAULT_INIT
,
340 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
, (u16
*) __LC_EXT_INT_CODE
);
341 rc
|= put_guest_lc(vcpu
, PFAULT_INIT
, (u16
*) __LC_EXT_CPU_ADDR
);
342 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
343 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
344 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
345 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
346 rc
|= put_guest_lc(vcpu
, ext
.ext_params2
, (u64
*) __LC_EXT_PARAMS2
);
347 return rc
? -EFAULT
: 0;
350 static int __must_check
__deliver_machine_check(struct kvm_vcpu
*vcpu
)
352 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
353 struct kvm_s390_mchk_info mchk
;
354 unsigned long adtl_status_addr
;
357 spin_lock(&li
->lock
);
360 * If there was an exigent machine check pending, then any repressible
361 * machine checks that might have been pending are indicated along
362 * with it, so always clear both bits
364 clear_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
);
365 clear_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
);
366 memset(&li
->irq
.mchk
, 0, sizeof(mchk
));
367 spin_unlock(&li
->lock
);
369 VCPU_EVENT(vcpu
, 4, "interrupt: machine check mcic=%llx",
371 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_MCHK
,
372 mchk
.cr14
, mchk
.mcic
);
374 rc
= kvm_s390_vcpu_store_status(vcpu
, KVM_S390_STORE_STATUS_PREFIXED
);
375 rc
|= read_guest_lc(vcpu
, __LC_VX_SAVE_AREA_ADDR
,
376 &adtl_status_addr
, sizeof(unsigned long));
377 rc
|= kvm_s390_vcpu_store_adtl_status(vcpu
, adtl_status_addr
);
378 rc
|= put_guest_lc(vcpu
, mchk
.mcic
,
379 (u64 __user
*) __LC_MCCK_CODE
);
380 rc
|= put_guest_lc(vcpu
, mchk
.failing_storage_address
,
381 (u64 __user
*) __LC_MCCK_FAIL_STOR_ADDR
);
382 rc
|= write_guest_lc(vcpu
, __LC_PSW_SAVE_AREA
,
383 &mchk
.fixed_logout
, sizeof(mchk
.fixed_logout
));
384 rc
|= write_guest_lc(vcpu
, __LC_MCK_OLD_PSW
,
385 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
386 rc
|= read_guest_lc(vcpu
, __LC_MCK_NEW_PSW
,
387 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
388 return rc
? -EFAULT
: 0;
391 static int __must_check
__deliver_restart(struct kvm_vcpu
*vcpu
)
393 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
396 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: cpu restart");
397 vcpu
->stat
.deliver_restart_signal
++;
398 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_RESTART
, 0, 0);
400 rc
= write_guest_lc(vcpu
,
401 offsetof(struct _lowcore
, restart_old_psw
),
402 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
403 rc
|= read_guest_lc(vcpu
, offsetof(struct _lowcore
, restart_psw
),
404 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
405 clear_bit(IRQ_PEND_RESTART
, &li
->pending_irqs
);
406 return rc
? -EFAULT
: 0;
409 static int __must_check
__deliver_set_prefix(struct kvm_vcpu
*vcpu
)
411 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
412 struct kvm_s390_prefix_info prefix
;
414 spin_lock(&li
->lock
);
415 prefix
= li
->irq
.prefix
;
416 li
->irq
.prefix
.address
= 0;
417 clear_bit(IRQ_PEND_SET_PREFIX
, &li
->pending_irqs
);
418 spin_unlock(&li
->lock
);
420 VCPU_EVENT(vcpu
, 4, "interrupt: set prefix to %x", prefix
.address
);
421 vcpu
->stat
.deliver_prefix_signal
++;
422 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
423 KVM_S390_SIGP_SET_PREFIX
,
426 kvm_s390_set_prefix(vcpu
, prefix
.address
);
430 static int __must_check
__deliver_emergency_signal(struct kvm_vcpu
*vcpu
)
432 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
436 spin_lock(&li
->lock
);
437 cpu_addr
= find_first_bit(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
);
438 clear_bit(cpu_addr
, li
->sigp_emerg_pending
);
439 if (bitmap_empty(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
))
440 clear_bit(IRQ_PEND_EXT_EMERGENCY
, &li
->pending_irqs
);
441 spin_unlock(&li
->lock
);
443 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: sigp emerg");
444 vcpu
->stat
.deliver_emergency_signal
++;
445 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_EMERGENCY
,
448 rc
= put_guest_lc(vcpu
, EXT_IRQ_EMERGENCY_SIG
,
449 (u16
*)__LC_EXT_INT_CODE
);
450 rc
|= put_guest_lc(vcpu
, cpu_addr
, (u16
*)__LC_EXT_CPU_ADDR
);
451 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
452 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
453 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
454 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
455 return rc
? -EFAULT
: 0;
458 static int __must_check
__deliver_external_call(struct kvm_vcpu
*vcpu
)
460 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
461 struct kvm_s390_extcall_info extcall
;
464 spin_lock(&li
->lock
);
465 extcall
= li
->irq
.extcall
;
466 li
->irq
.extcall
.code
= 0;
467 clear_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
);
468 spin_unlock(&li
->lock
);
470 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: sigp ext call");
471 vcpu
->stat
.deliver_external_call
++;
472 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
473 KVM_S390_INT_EXTERNAL_CALL
,
476 rc
= put_guest_lc(vcpu
, EXT_IRQ_EXTERNAL_CALL
,
477 (u16
*)__LC_EXT_INT_CODE
);
478 rc
|= put_guest_lc(vcpu
, extcall
.code
, (u16
*)__LC_EXT_CPU_ADDR
);
479 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
480 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
481 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
, &vcpu
->arch
.sie_block
->gpsw
,
483 return rc
? -EFAULT
: 0;
486 static int __must_check
__deliver_prog(struct kvm_vcpu
*vcpu
)
488 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
489 struct kvm_s390_pgm_info pgm_info
;
490 int rc
= 0, nullifying
= false;
491 u16 ilc
= get_ilc(vcpu
);
493 spin_lock(&li
->lock
);
494 pgm_info
= li
->irq
.pgm
;
495 clear_bit(IRQ_PEND_PROG
, &li
->pending_irqs
);
496 memset(&li
->irq
.pgm
, 0, sizeof(pgm_info
));
497 spin_unlock(&li
->lock
);
499 VCPU_EVENT(vcpu
, 4, "interrupt: pgm check code:%x, ilc:%x",
501 vcpu
->stat
.deliver_program_int
++;
502 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_PROGRAM_INT
,
505 switch (pgm_info
.code
& ~PGM_PER
) {
506 case PGM_AFX_TRANSLATION
:
507 case PGM_ASX_TRANSLATION
:
508 case PGM_EX_TRANSLATION
:
509 case PGM_LFX_TRANSLATION
:
510 case PGM_LSTE_SEQUENCE
:
511 case PGM_LSX_TRANSLATION
:
512 case PGM_LX_TRANSLATION
:
513 case PGM_PRIMARY_AUTHORITY
:
514 case PGM_SECONDARY_AUTHORITY
:
517 case PGM_SPACE_SWITCH
:
518 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
519 (u64
*)__LC_TRANS_EXC_CODE
);
521 case PGM_ALEN_TRANSLATION
:
522 case PGM_ALE_SEQUENCE
:
523 case PGM_ASTE_INSTANCE
:
524 case PGM_ASTE_SEQUENCE
:
525 case PGM_ASTE_VALIDITY
:
526 case PGM_EXTENDED_AUTHORITY
:
527 rc
= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
528 (u8
*)__LC_EXC_ACCESS_ID
);
532 case PGM_PAGE_TRANSLATION
:
533 case PGM_REGION_FIRST_TRANS
:
534 case PGM_REGION_SECOND_TRANS
:
535 case PGM_REGION_THIRD_TRANS
:
536 case PGM_SEGMENT_TRANSLATION
:
537 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
538 (u64
*)__LC_TRANS_EXC_CODE
);
539 rc
|= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
540 (u8
*)__LC_EXC_ACCESS_ID
);
541 rc
|= put_guest_lc(vcpu
, pgm_info
.op_access_id
,
542 (u8
*)__LC_OP_ACCESS_ID
);
546 rc
= put_guest_lc(vcpu
, pgm_info
.mon_class_nr
,
547 (u16
*)__LC_MON_CLASS_NR
);
548 rc
|= put_guest_lc(vcpu
, pgm_info
.mon_code
,
549 (u64
*)__LC_MON_CODE
);
551 case PGM_VECTOR_PROCESSING
:
553 rc
= put_guest_lc(vcpu
, pgm_info
.data_exc_code
,
554 (u32
*)__LC_DATA_EXC_CODE
);
557 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
558 (u64
*)__LC_TRANS_EXC_CODE
);
559 rc
|= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
560 (u8
*)__LC_EXC_ACCESS_ID
);
563 case PGM_STACK_EMPTY
:
564 case PGM_STACK_SPECIFICATION
:
566 case PGM_STACK_OPERATION
:
567 case PGM_TRACE_TABEL
:
568 case PGM_CRYPTO_OPERATION
:
573 if (pgm_info
.code
& PGM_PER
) {
574 rc
|= put_guest_lc(vcpu
, pgm_info
.per_code
,
575 (u8
*) __LC_PER_CODE
);
576 rc
|= put_guest_lc(vcpu
, pgm_info
.per_atmid
,
577 (u8
*)__LC_PER_ATMID
);
578 rc
|= put_guest_lc(vcpu
, pgm_info
.per_address
,
579 (u64
*) __LC_PER_ADDRESS
);
580 rc
|= put_guest_lc(vcpu
, pgm_info
.per_access_id
,
581 (u8
*) __LC_PER_ACCESS_ID
);
584 if (nullifying
&& vcpu
->arch
.sie_block
->icptcode
== ICPT_INST
)
585 kvm_s390_rewind_psw(vcpu
, ilc
);
587 rc
|= put_guest_lc(vcpu
, ilc
, (u16
*) __LC_PGM_ILC
);
588 rc
|= put_guest_lc(vcpu
, pgm_info
.code
,
589 (u16
*)__LC_PGM_INT_CODE
);
590 rc
|= write_guest_lc(vcpu
, __LC_PGM_OLD_PSW
,
591 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
592 rc
|= read_guest_lc(vcpu
, __LC_PGM_NEW_PSW
,
593 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
594 return rc
? -EFAULT
: 0;
597 static int __must_check
__deliver_service(struct kvm_vcpu
*vcpu
,
598 struct kvm_s390_interrupt_info
*inti
)
602 VCPU_EVENT(vcpu
, 4, "interrupt: sclp parm:%x",
603 inti
->ext
.ext_params
);
604 vcpu
->stat
.deliver_service_signal
++;
605 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
606 inti
->ext
.ext_params
, 0);
608 rc
= put_guest_lc(vcpu
, EXT_IRQ_SERVICE_SIG
, (u16
*)__LC_EXT_INT_CODE
);
609 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
610 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
611 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
612 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
613 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
614 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params
,
615 (u32
*)__LC_EXT_PARAMS
);
616 return rc
? -EFAULT
: 0;
619 static int __must_check
__deliver_pfault_done(struct kvm_vcpu
*vcpu
,
620 struct kvm_s390_interrupt_info
*inti
)
624 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
625 KVM_S390_INT_PFAULT_DONE
, 0,
626 inti
->ext
.ext_params2
);
628 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
, (u16
*)__LC_EXT_INT_CODE
);
629 rc
|= put_guest_lc(vcpu
, PFAULT_DONE
, (u16
*)__LC_EXT_CPU_ADDR
);
630 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
631 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
632 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
633 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
634 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params2
,
635 (u64
*)__LC_EXT_PARAMS2
);
636 return rc
? -EFAULT
: 0;
639 static int __must_check
__deliver_virtio(struct kvm_vcpu
*vcpu
,
640 struct kvm_s390_interrupt_info
*inti
)
644 VCPU_EVENT(vcpu
, 4, "interrupt: virtio parm:%x,parm64:%llx",
645 inti
->ext
.ext_params
, inti
->ext
.ext_params2
);
646 vcpu
->stat
.deliver_virtio_interrupt
++;
647 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
648 inti
->ext
.ext_params
,
649 inti
->ext
.ext_params2
);
651 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
, (u16
*)__LC_EXT_INT_CODE
);
652 rc
|= put_guest_lc(vcpu
, VIRTIO_PARAM
, (u16
*)__LC_EXT_CPU_ADDR
);
653 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
654 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
655 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
656 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
657 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params
,
658 (u32
*)__LC_EXT_PARAMS
);
659 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params2
,
660 (u64
*)__LC_EXT_PARAMS2
);
661 return rc
? -EFAULT
: 0;
664 static int __must_check
__deliver_io(struct kvm_vcpu
*vcpu
,
665 struct kvm_s390_interrupt_info
*inti
)
669 VCPU_EVENT(vcpu
, 4, "interrupt: I/O %llx", inti
->type
);
670 vcpu
->stat
.deliver_io_int
++;
671 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
672 ((__u32
)inti
->io
.subchannel_id
<< 16) |
673 inti
->io
.subchannel_nr
,
674 ((__u64
)inti
->io
.io_int_parm
<< 32) |
675 inti
->io
.io_int_word
);
677 rc
= put_guest_lc(vcpu
, inti
->io
.subchannel_id
,
678 (u16
*)__LC_SUBCHANNEL_ID
);
679 rc
|= put_guest_lc(vcpu
, inti
->io
.subchannel_nr
,
680 (u16
*)__LC_SUBCHANNEL_NR
);
681 rc
|= put_guest_lc(vcpu
, inti
->io
.io_int_parm
,
682 (u32
*)__LC_IO_INT_PARM
);
683 rc
|= put_guest_lc(vcpu
, inti
->io
.io_int_word
,
684 (u32
*)__LC_IO_INT_WORD
);
685 rc
|= write_guest_lc(vcpu
, __LC_IO_OLD_PSW
,
686 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
687 rc
|= read_guest_lc(vcpu
, __LC_IO_NEW_PSW
,
688 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
689 return rc
? -EFAULT
: 0;
692 static int __must_check
__deliver_mchk_floating(struct kvm_vcpu
*vcpu
,
693 struct kvm_s390_interrupt_info
*inti
)
695 struct kvm_s390_mchk_info
*mchk
= &inti
->mchk
;
698 VCPU_EVENT(vcpu
, 4, "interrupt: machine check mcic=%llx",
700 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_MCHK
,
701 mchk
->cr14
, mchk
->mcic
);
703 rc
= kvm_s390_vcpu_store_status(vcpu
, KVM_S390_STORE_STATUS_PREFIXED
);
704 rc
|= put_guest_lc(vcpu
, mchk
->mcic
,
705 (u64 __user
*) __LC_MCCK_CODE
);
706 rc
|= put_guest_lc(vcpu
, mchk
->failing_storage_address
,
707 (u64 __user
*) __LC_MCCK_FAIL_STOR_ADDR
);
708 rc
|= write_guest_lc(vcpu
, __LC_PSW_SAVE_AREA
,
709 &mchk
->fixed_logout
, sizeof(mchk
->fixed_logout
));
710 rc
|= write_guest_lc(vcpu
, __LC_MCK_OLD_PSW
,
711 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
712 rc
|= read_guest_lc(vcpu
, __LC_MCK_NEW_PSW
,
713 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
714 return rc
? -EFAULT
: 0;
717 typedef int (*deliver_irq_t
)(struct kvm_vcpu
*vcpu
);
719 static const deliver_irq_t deliver_irq_funcs
[] = {
720 [IRQ_PEND_MCHK_EX
] = __deliver_machine_check
,
721 [IRQ_PEND_PROG
] = __deliver_prog
,
722 [IRQ_PEND_EXT_EMERGENCY
] = __deliver_emergency_signal
,
723 [IRQ_PEND_EXT_EXTERNAL
] = __deliver_external_call
,
724 [IRQ_PEND_EXT_CLOCK_COMP
] = __deliver_ckc
,
725 [IRQ_PEND_EXT_CPU_TIMER
] = __deliver_cpu_timer
,
726 [IRQ_PEND_RESTART
] = __deliver_restart
,
727 [IRQ_PEND_SET_PREFIX
] = __deliver_set_prefix
,
728 [IRQ_PEND_PFAULT_INIT
] = __deliver_pfault_init
,
731 static int __must_check
__deliver_floating_interrupt(struct kvm_vcpu
*vcpu
,
732 struct kvm_s390_interrupt_info
*inti
)
736 switch (inti
->type
) {
737 case KVM_S390_INT_SERVICE
:
738 rc
= __deliver_service(vcpu
, inti
);
740 case KVM_S390_INT_PFAULT_DONE
:
741 rc
= __deliver_pfault_done(vcpu
, inti
);
743 case KVM_S390_INT_VIRTIO
:
744 rc
= __deliver_virtio(vcpu
, inti
);
747 rc
= __deliver_mchk_floating(vcpu
, inti
);
749 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
750 rc
= __deliver_io(vcpu
, inti
);
759 /* Check whether an external call is pending (deliverable or not) */
760 int kvm_s390_ext_call_pending(struct kvm_vcpu
*vcpu
)
762 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
763 uint8_t sigp_ctrl
= vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
;
765 if (!sclp_has_sigpif())
766 return test_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
);
768 return (sigp_ctrl
& SIGP_CTRL_C
) &&
769 (atomic_read(&vcpu
->arch
.sie_block
->cpuflags
) & CPUSTAT_ECALL_PEND
);
772 int kvm_s390_vcpu_has_irq(struct kvm_vcpu
*vcpu
, int exclude_stop
)
774 struct kvm_s390_float_interrupt
*fi
= vcpu
->arch
.local_int
.float_int
;
775 struct kvm_s390_interrupt_info
*inti
;
778 rc
= !!deliverable_local_irqs(vcpu
);
780 if ((!rc
) && atomic_read(&fi
->active
)) {
781 spin_lock(&fi
->lock
);
782 list_for_each_entry(inti
, &fi
->list
, list
)
783 if (__interrupt_is_deliverable(vcpu
, inti
)) {
787 spin_unlock(&fi
->lock
);
790 if (!rc
&& kvm_cpu_has_pending_timer(vcpu
))
793 /* external call pending and deliverable */
794 if (!rc
&& kvm_s390_ext_call_pending(vcpu
) &&
795 !psw_extint_disabled(vcpu
) &&
796 (vcpu
->arch
.sie_block
->gcr
[0] & 0x2000ul
))
799 if (!rc
&& !exclude_stop
&& kvm_s390_is_stop_irq_pending(vcpu
))
805 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
807 if (!(vcpu
->arch
.sie_block
->ckc
<
808 get_tod_clock_fast() + vcpu
->arch
.sie_block
->epoch
))
810 if (!ckc_interrupts_enabled(vcpu
))
815 int kvm_s390_handle_wait(struct kvm_vcpu
*vcpu
)
819 vcpu
->stat
.exit_wait_state
++;
822 if (kvm_cpu_has_pending_timer(vcpu
) || kvm_arch_vcpu_runnable(vcpu
))
825 if (psw_interrupts_disabled(vcpu
)) {
826 VCPU_EVENT(vcpu
, 3, "%s", "disabled wait");
827 return -EOPNOTSUPP
; /* disabled wait */
830 if (!ckc_interrupts_enabled(vcpu
)) {
831 VCPU_EVENT(vcpu
, 3, "%s", "enabled wait w/o timer");
832 __set_cpu_idle(vcpu
);
836 now
= get_tod_clock_fast() + vcpu
->arch
.sie_block
->epoch
;
837 sltime
= tod_to_ns(vcpu
->arch
.sie_block
->ckc
- now
);
840 if (vcpu
->arch
.sie_block
->ckc
< now
)
843 __set_cpu_idle(vcpu
);
844 hrtimer_start(&vcpu
->arch
.ckc_timer
, ktime_set (0, sltime
) , HRTIMER_MODE_REL
);
845 VCPU_EVENT(vcpu
, 5, "enabled wait via clock comparator: %llx ns", sltime
);
847 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
848 kvm_vcpu_block(vcpu
);
849 __unset_cpu_idle(vcpu
);
850 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
852 hrtimer_cancel(&vcpu
->arch
.ckc_timer
);
856 void kvm_s390_vcpu_wakeup(struct kvm_vcpu
*vcpu
)
858 if (waitqueue_active(&vcpu
->wq
)) {
860 * The vcpu gave up the cpu voluntarily, mark it as a good
863 vcpu
->preempted
= true;
864 wake_up_interruptible(&vcpu
->wq
);
865 vcpu
->stat
.halt_wakeup
++;
869 enum hrtimer_restart
kvm_s390_idle_wakeup(struct hrtimer
*timer
)
871 struct kvm_vcpu
*vcpu
;
874 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.ckc_timer
);
875 now
= get_tod_clock_fast() + vcpu
->arch
.sie_block
->epoch
;
876 sltime
= tod_to_ns(vcpu
->arch
.sie_block
->ckc
- now
);
879 * If the monotonic clock runs faster than the tod clock we might be
880 * woken up too early and have to go back to sleep to avoid deadlocks.
882 if (vcpu
->arch
.sie_block
->ckc
> now
&&
883 hrtimer_forward_now(timer
, ns_to_ktime(sltime
)))
884 return HRTIMER_RESTART
;
885 kvm_s390_vcpu_wakeup(vcpu
);
886 return HRTIMER_NORESTART
;
889 void kvm_s390_clear_local_irqs(struct kvm_vcpu
*vcpu
)
891 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
893 spin_lock(&li
->lock
);
894 li
->pending_irqs
= 0;
895 bitmap_zero(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
);
896 memset(&li
->irq
, 0, sizeof(li
->irq
));
897 spin_unlock(&li
->lock
);
899 /* clear pending external calls set by sigp interpretation facility */
900 atomic_clear_mask(CPUSTAT_ECALL_PEND
, li
->cpuflags
);
901 vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
= 0;
904 int __must_check
kvm_s390_deliver_pending_interrupts(struct kvm_vcpu
*vcpu
)
906 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
907 struct kvm_s390_float_interrupt
*fi
= vcpu
->arch
.local_int
.float_int
;
908 struct kvm_s390_interrupt_info
*n
, *inti
= NULL
;
912 unsigned long irq_type
;
913 unsigned long deliverable_irqs
;
915 __reset_intercept_indicators(vcpu
);
917 /* pending ckc conditions might have been invalidated */
918 clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
919 if (kvm_cpu_has_pending_timer(vcpu
))
920 set_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
923 deliverable_irqs
= deliverable_local_irqs(vcpu
);
924 /* bits are in the order of interrupt priority */
925 irq_type
= find_first_bit(&deliverable_irqs
, IRQ_PEND_COUNT
);
926 if (irq_type
== IRQ_PEND_COUNT
)
928 func
= deliver_irq_funcs
[irq_type
];
930 WARN_ON_ONCE(func
== NULL
);
931 clear_bit(irq_type
, &li
->pending_irqs
);
935 } while (!rc
&& irq_type
!= IRQ_PEND_COUNT
);
937 set_intercept_indicators_local(vcpu
);
939 if (!rc
&& atomic_read(&fi
->active
)) {
942 spin_lock(&fi
->lock
);
943 list_for_each_entry_safe(inti
, n
, &fi
->list
, list
) {
944 if (__interrupt_is_deliverable(vcpu
, inti
)) {
945 list_del(&inti
->list
);
950 __set_intercept_indicator(vcpu
, inti
);
952 if (list_empty(&fi
->list
))
953 atomic_set(&fi
->active
, 0);
954 spin_unlock(&fi
->lock
);
956 rc
= __deliver_floating_interrupt(vcpu
, inti
);
959 } while (!rc
&& deliver
);
965 static int __inject_prog(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
967 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
969 li
->irq
.pgm
= irq
->u
.pgm
;
970 set_bit(IRQ_PEND_PROG
, &li
->pending_irqs
);
974 int kvm_s390_inject_program_int(struct kvm_vcpu
*vcpu
, u16 code
)
976 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
977 struct kvm_s390_irq irq
;
979 VCPU_EVENT(vcpu
, 3, "inject: program check %d (from kernel)", code
);
980 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_PROGRAM_INT
, code
,
982 spin_lock(&li
->lock
);
983 irq
.u
.pgm
.code
= code
;
984 __inject_prog(vcpu
, &irq
);
985 BUG_ON(waitqueue_active(li
->wq
));
986 spin_unlock(&li
->lock
);
990 int kvm_s390_inject_prog_irq(struct kvm_vcpu
*vcpu
,
991 struct kvm_s390_pgm_info
*pgm_info
)
993 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
994 struct kvm_s390_irq irq
;
997 VCPU_EVENT(vcpu
, 3, "inject: prog irq %d (from kernel)",
999 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_PROGRAM_INT
,
1000 pgm_info
->code
, 0, 1);
1001 spin_lock(&li
->lock
);
1002 irq
.u
.pgm
= *pgm_info
;
1003 rc
= __inject_prog(vcpu
, &irq
);
1004 BUG_ON(waitqueue_active(li
->wq
));
1005 spin_unlock(&li
->lock
);
1009 static int __inject_pfault_init(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1011 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1013 VCPU_EVENT(vcpu
, 3, "inject: external irq params:%x, params2:%llx",
1014 irq
->u
.ext
.ext_params
, irq
->u
.ext
.ext_params2
);
1015 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_PFAULT_INIT
,
1016 irq
->u
.ext
.ext_params
,
1017 irq
->u
.ext
.ext_params2
, 2);
1019 li
->irq
.ext
= irq
->u
.ext
;
1020 set_bit(IRQ_PEND_PFAULT_INIT
, &li
->pending_irqs
);
1021 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
1025 static int __inject_extcall_sigpif(struct kvm_vcpu
*vcpu
, uint16_t src_id
)
1027 unsigned char new_val
, old_val
;
1028 uint8_t *sigp_ctrl
= &vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sigp_ctrl
;
1030 new_val
= SIGP_CTRL_C
| (src_id
& SIGP_CTRL_SCN_MASK
);
1031 old_val
= *sigp_ctrl
& ~SIGP_CTRL_C
;
1032 if (cmpxchg(sigp_ctrl
, old_val
, new_val
) != old_val
) {
1033 /* another external call is pending */
1036 atomic_set_mask(CPUSTAT_ECALL_PEND
, &vcpu
->arch
.sie_block
->cpuflags
);
1040 static int __inject_extcall(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1042 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1043 struct kvm_s390_extcall_info
*extcall
= &li
->irq
.extcall
;
1044 uint16_t src_id
= irq
->u
.extcall
.code
;
1046 VCPU_EVENT(vcpu
, 3, "inject: external call source-cpu:%u",
1048 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_EXTERNAL_CALL
,
1051 /* sending vcpu invalid */
1052 if (src_id
>= KVM_MAX_VCPUS
||
1053 kvm_get_vcpu(vcpu
->kvm
, src_id
) == NULL
)
1056 if (sclp_has_sigpif())
1057 return __inject_extcall_sigpif(vcpu
, src_id
);
1059 if (!test_and_set_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
))
1061 *extcall
= irq
->u
.extcall
;
1062 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
1066 static int __inject_set_prefix(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1068 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1069 struct kvm_s390_prefix_info
*prefix
= &li
->irq
.prefix
;
1071 VCPU_EVENT(vcpu
, 3, "inject: set prefix to %x (from user)",
1072 irq
->u
.prefix
.address
);
1073 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_SIGP_SET_PREFIX
,
1074 irq
->u
.prefix
.address
, 0, 2);
1076 if (!is_vcpu_stopped(vcpu
))
1079 *prefix
= irq
->u
.prefix
;
1080 set_bit(IRQ_PEND_SET_PREFIX
, &li
->pending_irqs
);
1084 #define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
1085 static int __inject_sigp_stop(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1087 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1088 struct kvm_s390_stop_info
*stop
= &li
->irq
.stop
;
1091 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_SIGP_STOP
, 0, 0, 2);
1093 if (irq
->u
.stop
.flags
& ~KVM_S390_STOP_SUPP_FLAGS
)
1096 if (is_vcpu_stopped(vcpu
)) {
1097 if (irq
->u
.stop
.flags
& KVM_S390_STOP_FLAG_STORE_STATUS
)
1098 rc
= kvm_s390_store_status_unloaded(vcpu
,
1099 KVM_S390_STORE_STATUS_NOADDR
);
1103 if (test_and_set_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
))
1105 stop
->flags
= irq
->u
.stop
.flags
;
1106 __set_cpuflag(vcpu
, CPUSTAT_STOP_INT
);
1110 static int __inject_sigp_restart(struct kvm_vcpu
*vcpu
,
1111 struct kvm_s390_irq
*irq
)
1113 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1115 VCPU_EVENT(vcpu
, 3, "inject: restart type %llx", irq
->type
);
1116 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_RESTART
, 0, 0, 2);
1118 set_bit(IRQ_PEND_RESTART
, &li
->pending_irqs
);
1122 static int __inject_sigp_emergency(struct kvm_vcpu
*vcpu
,
1123 struct kvm_s390_irq
*irq
)
1125 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1127 VCPU_EVENT(vcpu
, 3, "inject: emergency %u\n",
1129 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_EMERGENCY
,
1130 irq
->u
.emerg
.code
, 0, 2);
1132 set_bit(irq
->u
.emerg
.code
, li
->sigp_emerg_pending
);
1133 set_bit(IRQ_PEND_EXT_EMERGENCY
, &li
->pending_irqs
);
1134 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
1138 static int __inject_mchk(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1140 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1141 struct kvm_s390_mchk_info
*mchk
= &li
->irq
.mchk
;
1143 VCPU_EVENT(vcpu
, 5, "inject: machine check parm64:%llx",
1145 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_MCHK
, 0,
1146 irq
->u
.mchk
.mcic
, 2);
1149 * Because repressible machine checks can be indicated along with
1150 * exigent machine checks (PoP, Chapter 11, Interruption action)
1151 * we need to combine cr14, mcic and external damage code.
1152 * Failing storage address and the logout area should not be or'ed
1153 * together, we just indicate the last occurrence of the corresponding
1156 mchk
->cr14
|= irq
->u
.mchk
.cr14
;
1157 mchk
->mcic
|= irq
->u
.mchk
.mcic
;
1158 mchk
->ext_damage_code
|= irq
->u
.mchk
.ext_damage_code
;
1159 mchk
->failing_storage_address
= irq
->u
.mchk
.failing_storage_address
;
1160 memcpy(&mchk
->fixed_logout
, &irq
->u
.mchk
.fixed_logout
,
1161 sizeof(mchk
->fixed_logout
));
1162 if (mchk
->mcic
& MCHK_EX_MASK
)
1163 set_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
);
1164 else if (mchk
->mcic
& MCHK_REP_MASK
)
1165 set_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
);
1169 static int __inject_ckc(struct kvm_vcpu
*vcpu
)
1171 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1173 VCPU_EVENT(vcpu
, 3, "inject: type %x", KVM_S390_INT_CLOCK_COMP
);
1174 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_CLOCK_COMP
,
1177 set_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
1178 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
1182 static int __inject_cpu_timer(struct kvm_vcpu
*vcpu
)
1184 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1186 VCPU_EVENT(vcpu
, 3, "inject: type %x", KVM_S390_INT_CPU_TIMER
);
1187 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_CPU_TIMER
,
1190 set_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
1191 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
1196 struct kvm_s390_interrupt_info
*kvm_s390_get_io_int(struct kvm
*kvm
,
1199 struct kvm_s390_float_interrupt
*fi
;
1200 struct kvm_s390_interrupt_info
*inti
, *iter
;
1202 if ((!schid
&& !cr6
) || (schid
&& cr6
))
1204 fi
= &kvm
->arch
.float_int
;
1205 spin_lock(&fi
->lock
);
1207 list_for_each_entry(iter
, &fi
->list
, list
) {
1208 if (!is_ioint(iter
->type
))
1211 ((cr6
& int_word_to_isc_bits(iter
->io
.io_int_word
)) == 0))
1214 if (((schid
& 0x00000000ffff0000) >> 16) !=
1215 iter
->io
.subchannel_id
)
1217 if ((schid
& 0x000000000000ffff) !=
1218 iter
->io
.subchannel_nr
)
1225 list_del_init(&inti
->list
);
1228 if (list_empty(&fi
->list
))
1229 atomic_set(&fi
->active
, 0);
1230 spin_unlock(&fi
->lock
);
1234 static int __inject_vm(struct kvm
*kvm
, struct kvm_s390_interrupt_info
*inti
)
1236 struct kvm_s390_local_interrupt
*li
;
1237 struct kvm_s390_float_interrupt
*fi
;
1238 struct kvm_s390_interrupt_info
*iter
;
1239 struct kvm_vcpu
*dst_vcpu
= NULL
;
1243 fi
= &kvm
->arch
.float_int
;
1244 spin_lock(&fi
->lock
);
1245 if (fi
->irq_count
>= KVM_S390_MAX_FLOAT_IRQS
) {
1250 if (!is_ioint(inti
->type
)) {
1251 list_add_tail(&inti
->list
, &fi
->list
);
1253 u64 isc_bits
= int_word_to_isc_bits(inti
->io
.io_int_word
);
1255 /* Keep I/O interrupts sorted in isc order. */
1256 list_for_each_entry(iter
, &fi
->list
, list
) {
1257 if (!is_ioint(iter
->type
))
1259 if (int_word_to_isc_bits(iter
->io
.io_int_word
)
1264 list_add_tail(&inti
->list
, &iter
->list
);
1266 atomic_set(&fi
->active
, 1);
1267 if (atomic_read(&kvm
->online_vcpus
) == 0)
1269 sigcpu
= find_first_bit(fi
->idle_mask
, KVM_MAX_VCPUS
);
1270 if (sigcpu
== KVM_MAX_VCPUS
) {
1272 sigcpu
= fi
->next_rr_cpu
++;
1273 if (sigcpu
== KVM_MAX_VCPUS
)
1274 sigcpu
= fi
->next_rr_cpu
= 0;
1275 } while (kvm_get_vcpu(kvm
, sigcpu
) == NULL
);
1277 dst_vcpu
= kvm_get_vcpu(kvm
, sigcpu
);
1278 li
= &dst_vcpu
->arch
.local_int
;
1279 spin_lock(&li
->lock
);
1280 switch (inti
->type
) {
1282 atomic_set_mask(CPUSTAT_STOP_INT
, li
->cpuflags
);
1284 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1285 atomic_set_mask(CPUSTAT_IO_INT
, li
->cpuflags
);
1288 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
1291 spin_unlock(&li
->lock
);
1292 kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm
, sigcpu
));
1294 spin_unlock(&fi
->lock
);
1298 int kvm_s390_inject_vm(struct kvm
*kvm
,
1299 struct kvm_s390_interrupt
*s390int
)
1301 struct kvm_s390_interrupt_info
*inti
;
1304 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
1308 inti
->type
= s390int
->type
;
1309 switch (inti
->type
) {
1310 case KVM_S390_INT_VIRTIO
:
1311 VM_EVENT(kvm
, 5, "inject: virtio parm:%x,parm64:%llx",
1312 s390int
->parm
, s390int
->parm64
);
1313 inti
->ext
.ext_params
= s390int
->parm
;
1314 inti
->ext
.ext_params2
= s390int
->parm64
;
1316 case KVM_S390_INT_SERVICE
:
1317 VM_EVENT(kvm
, 5, "inject: sclp parm:%x", s390int
->parm
);
1318 inti
->ext
.ext_params
= s390int
->parm
;
1320 case KVM_S390_INT_PFAULT_DONE
:
1321 inti
->ext
.ext_params2
= s390int
->parm64
;
1324 VM_EVENT(kvm
, 5, "inject: machine check parm64:%llx",
1326 inti
->mchk
.cr14
= s390int
->parm
; /* upper bits are not used */
1327 inti
->mchk
.mcic
= s390int
->parm64
;
1329 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1330 if (inti
->type
& IOINT_AI_MASK
)
1331 VM_EVENT(kvm
, 5, "%s", "inject: I/O (AI)");
1333 VM_EVENT(kvm
, 5, "inject: I/O css %x ss %x schid %04x",
1334 s390int
->type
& IOINT_CSSID_MASK
,
1335 s390int
->type
& IOINT_SSID_MASK
,
1336 s390int
->type
& IOINT_SCHID_MASK
);
1337 inti
->io
.subchannel_id
= s390int
->parm
>> 16;
1338 inti
->io
.subchannel_nr
= s390int
->parm
& 0x0000ffffu
;
1339 inti
->io
.io_int_parm
= s390int
->parm64
>> 32;
1340 inti
->io
.io_int_word
= s390int
->parm64
& 0x00000000ffffffffull
;
1346 trace_kvm_s390_inject_vm(s390int
->type
, s390int
->parm
, s390int
->parm64
,
1349 rc
= __inject_vm(kvm
, inti
);
1355 int kvm_s390_reinject_io_int(struct kvm
*kvm
,
1356 struct kvm_s390_interrupt_info
*inti
)
1358 return __inject_vm(kvm
, inti
);
1361 int s390int_to_s390irq(struct kvm_s390_interrupt
*s390int
,
1362 struct kvm_s390_irq
*irq
)
1364 irq
->type
= s390int
->type
;
1365 switch (irq
->type
) {
1366 case KVM_S390_PROGRAM_INT
:
1367 if (s390int
->parm
& 0xffff0000)
1369 irq
->u
.pgm
.code
= s390int
->parm
;
1371 case KVM_S390_SIGP_SET_PREFIX
:
1372 irq
->u
.prefix
.address
= s390int
->parm
;
1374 case KVM_S390_SIGP_STOP
:
1375 irq
->u
.stop
.flags
= s390int
->parm
;
1377 case KVM_S390_INT_EXTERNAL_CALL
:
1378 if (s390int
->parm
& 0xffff0000)
1380 irq
->u
.extcall
.code
= s390int
->parm
;
1382 case KVM_S390_INT_EMERGENCY
:
1383 if (s390int
->parm
& 0xffff0000)
1385 irq
->u
.emerg
.code
= s390int
->parm
;
1388 irq
->u
.mchk
.mcic
= s390int
->parm64
;
1394 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu
*vcpu
)
1396 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1398 return test_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
);
1401 void kvm_s390_clear_stop_irq(struct kvm_vcpu
*vcpu
)
1403 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1405 spin_lock(&li
->lock
);
1406 li
->irq
.stop
.flags
= 0;
1407 clear_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
);
1408 spin_unlock(&li
->lock
);
1411 int kvm_s390_inject_vcpu(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1413 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1416 spin_lock(&li
->lock
);
1417 switch (irq
->type
) {
1418 case KVM_S390_PROGRAM_INT
:
1419 VCPU_EVENT(vcpu
, 3, "inject: program check %d (from user)",
1421 rc
= __inject_prog(vcpu
, irq
);
1423 case KVM_S390_SIGP_SET_PREFIX
:
1424 rc
= __inject_set_prefix(vcpu
, irq
);
1426 case KVM_S390_SIGP_STOP
:
1427 rc
= __inject_sigp_stop(vcpu
, irq
);
1429 case KVM_S390_RESTART
:
1430 rc
= __inject_sigp_restart(vcpu
, irq
);
1432 case KVM_S390_INT_CLOCK_COMP
:
1433 rc
= __inject_ckc(vcpu
);
1435 case KVM_S390_INT_CPU_TIMER
:
1436 rc
= __inject_cpu_timer(vcpu
);
1438 case KVM_S390_INT_EXTERNAL_CALL
:
1439 rc
= __inject_extcall(vcpu
, irq
);
1441 case KVM_S390_INT_EMERGENCY
:
1442 rc
= __inject_sigp_emergency(vcpu
, irq
);
1445 rc
= __inject_mchk(vcpu
, irq
);
1447 case KVM_S390_INT_PFAULT_INIT
:
1448 rc
= __inject_pfault_init(vcpu
, irq
);
1450 case KVM_S390_INT_VIRTIO
:
1451 case KVM_S390_INT_SERVICE
:
1452 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1456 spin_unlock(&li
->lock
);
1458 kvm_s390_vcpu_wakeup(vcpu
);
1462 void kvm_s390_clear_float_irqs(struct kvm
*kvm
)
1464 struct kvm_s390_float_interrupt
*fi
;
1465 struct kvm_s390_interrupt_info
*n
, *inti
= NULL
;
1467 fi
= &kvm
->arch
.float_int
;
1468 spin_lock(&fi
->lock
);
1469 list_for_each_entry_safe(inti
, n
, &fi
->list
, list
) {
1470 list_del(&inti
->list
);
1474 atomic_set(&fi
->active
, 0);
1475 spin_unlock(&fi
->lock
);
1478 static inline int copy_irq_to_user(struct kvm_s390_interrupt_info
*inti
,
1481 struct kvm_s390_irq __user
*uptr
= (struct kvm_s390_irq __user
*) addr
;
1482 struct kvm_s390_irq irq
= {0};
1484 irq
.type
= inti
->type
;
1485 switch (inti
->type
) {
1486 case KVM_S390_INT_PFAULT_INIT
:
1487 case KVM_S390_INT_PFAULT_DONE
:
1488 case KVM_S390_INT_VIRTIO
:
1489 case KVM_S390_INT_SERVICE
:
1490 irq
.u
.ext
= inti
->ext
;
1492 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1493 irq
.u
.io
= inti
->io
;
1496 irq
.u
.mchk
= inti
->mchk
;
1502 if (copy_to_user(uptr
, &irq
, sizeof(irq
)))
1508 static int get_all_floating_irqs(struct kvm
*kvm
, __u8
*buf
, __u64 len
)
1510 struct kvm_s390_interrupt_info
*inti
;
1511 struct kvm_s390_float_interrupt
*fi
;
1515 fi
= &kvm
->arch
.float_int
;
1516 spin_lock(&fi
->lock
);
1518 list_for_each_entry(inti
, &fi
->list
, list
) {
1519 if (len
< sizeof(struct kvm_s390_irq
)) {
1520 /* signal userspace to try again */
1524 ret
= copy_irq_to_user(inti
, buf
);
1527 buf
+= sizeof(struct kvm_s390_irq
);
1528 len
-= sizeof(struct kvm_s390_irq
);
1532 spin_unlock(&fi
->lock
);
1534 return ret
< 0 ? ret
: n
;
1537 static int flic_get_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1541 switch (attr
->group
) {
1542 case KVM_DEV_FLIC_GET_ALL_IRQS
:
1543 r
= get_all_floating_irqs(dev
->kvm
, (u8
*) attr
->addr
,
1553 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info
*inti
,
1556 struct kvm_s390_irq __user
*uptr
= (struct kvm_s390_irq __user
*) addr
;
1557 void *target
= NULL
;
1558 void __user
*source
;
1561 if (get_user(inti
->type
, (u64 __user
*)addr
))
1564 switch (inti
->type
) {
1565 case KVM_S390_INT_PFAULT_INIT
:
1566 case KVM_S390_INT_PFAULT_DONE
:
1567 case KVM_S390_INT_VIRTIO
:
1568 case KVM_S390_INT_SERVICE
:
1569 target
= (void *) &inti
->ext
;
1570 source
= &uptr
->u
.ext
;
1571 size
= sizeof(inti
->ext
);
1573 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1574 target
= (void *) &inti
->io
;
1575 source
= &uptr
->u
.io
;
1576 size
= sizeof(inti
->io
);
1579 target
= (void *) &inti
->mchk
;
1580 source
= &uptr
->u
.mchk
;
1581 size
= sizeof(inti
->mchk
);
1587 if (copy_from_user(target
, source
, size
))
1593 static int enqueue_floating_irq(struct kvm_device
*dev
,
1594 struct kvm_device_attr
*attr
)
1596 struct kvm_s390_interrupt_info
*inti
= NULL
;
1598 int len
= attr
->attr
;
1600 if (len
% sizeof(struct kvm_s390_irq
) != 0)
1602 else if (len
> KVM_S390_FLIC_MAX_BUFFER
)
1605 while (len
>= sizeof(struct kvm_s390_irq
)) {
1606 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
1610 r
= copy_irq_from_user(inti
, attr
->addr
);
1615 r
= __inject_vm(dev
->kvm
, inti
);
1620 len
-= sizeof(struct kvm_s390_irq
);
1621 attr
->addr
+= sizeof(struct kvm_s390_irq
);
1627 static struct s390_io_adapter
*get_io_adapter(struct kvm
*kvm
, unsigned int id
)
1629 if (id
>= MAX_S390_IO_ADAPTERS
)
1631 return kvm
->arch
.adapters
[id
];
1634 static int register_io_adapter(struct kvm_device
*dev
,
1635 struct kvm_device_attr
*attr
)
1637 struct s390_io_adapter
*adapter
;
1638 struct kvm_s390_io_adapter adapter_info
;
1640 if (copy_from_user(&adapter_info
,
1641 (void __user
*)attr
->addr
, sizeof(adapter_info
)))
1644 if ((adapter_info
.id
>= MAX_S390_IO_ADAPTERS
) ||
1645 (dev
->kvm
->arch
.adapters
[adapter_info
.id
] != NULL
))
1648 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
1652 INIT_LIST_HEAD(&adapter
->maps
);
1653 init_rwsem(&adapter
->maps_lock
);
1654 atomic_set(&adapter
->nr_maps
, 0);
1655 adapter
->id
= adapter_info
.id
;
1656 adapter
->isc
= adapter_info
.isc
;
1657 adapter
->maskable
= adapter_info
.maskable
;
1658 adapter
->masked
= false;
1659 adapter
->swap
= adapter_info
.swap
;
1660 dev
->kvm
->arch
.adapters
[adapter
->id
] = adapter
;
1665 int kvm_s390_mask_adapter(struct kvm
*kvm
, unsigned int id
, bool masked
)
1668 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
1670 if (!adapter
|| !adapter
->maskable
)
1672 ret
= adapter
->masked
;
1673 adapter
->masked
= masked
;
1677 static int kvm_s390_adapter_map(struct kvm
*kvm
, unsigned int id
, __u64 addr
)
1679 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
1680 struct s390_map_info
*map
;
1683 if (!adapter
|| !addr
)
1686 map
= kzalloc(sizeof(*map
), GFP_KERNEL
);
1691 INIT_LIST_HEAD(&map
->list
);
1692 map
->guest_addr
= addr
;
1693 map
->addr
= gmap_translate(kvm
->arch
.gmap
, addr
);
1694 if (map
->addr
== -EFAULT
) {
1698 ret
= get_user_pages_fast(map
->addr
, 1, 1, &map
->page
);
1702 down_write(&adapter
->maps_lock
);
1703 if (atomic_inc_return(&adapter
->nr_maps
) < MAX_S390_ADAPTER_MAPS
) {
1704 list_add_tail(&map
->list
, &adapter
->maps
);
1707 put_page(map
->page
);
1710 up_write(&adapter
->maps_lock
);
1717 static int kvm_s390_adapter_unmap(struct kvm
*kvm
, unsigned int id
, __u64 addr
)
1719 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
1720 struct s390_map_info
*map
, *tmp
;
1723 if (!adapter
|| !addr
)
1726 down_write(&adapter
->maps_lock
);
1727 list_for_each_entry_safe(map
, tmp
, &adapter
->maps
, list
) {
1728 if (map
->guest_addr
== addr
) {
1730 atomic_dec(&adapter
->nr_maps
);
1731 list_del(&map
->list
);
1732 put_page(map
->page
);
1737 up_write(&adapter
->maps_lock
);
1739 return found
? 0 : -EINVAL
;
1742 void kvm_s390_destroy_adapters(struct kvm
*kvm
)
1745 struct s390_map_info
*map
, *tmp
;
1747 for (i
= 0; i
< MAX_S390_IO_ADAPTERS
; i
++) {
1748 if (!kvm
->arch
.adapters
[i
])
1750 list_for_each_entry_safe(map
, tmp
,
1751 &kvm
->arch
.adapters
[i
]->maps
, list
) {
1752 list_del(&map
->list
);
1753 put_page(map
->page
);
1756 kfree(kvm
->arch
.adapters
[i
]);
1760 static int modify_io_adapter(struct kvm_device
*dev
,
1761 struct kvm_device_attr
*attr
)
1763 struct kvm_s390_io_adapter_req req
;
1764 struct s390_io_adapter
*adapter
;
1767 if (copy_from_user(&req
, (void __user
*)attr
->addr
, sizeof(req
)))
1770 adapter
= get_io_adapter(dev
->kvm
, req
.id
);
1774 case KVM_S390_IO_ADAPTER_MASK
:
1775 ret
= kvm_s390_mask_adapter(dev
->kvm
, req
.id
, req
.mask
);
1779 case KVM_S390_IO_ADAPTER_MAP
:
1780 ret
= kvm_s390_adapter_map(dev
->kvm
, req
.id
, req
.addr
);
1782 case KVM_S390_IO_ADAPTER_UNMAP
:
1783 ret
= kvm_s390_adapter_unmap(dev
->kvm
, req
.id
, req
.addr
);
1792 static int flic_set_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1796 struct kvm_vcpu
*vcpu
;
1798 switch (attr
->group
) {
1799 case KVM_DEV_FLIC_ENQUEUE
:
1800 r
= enqueue_floating_irq(dev
, attr
);
1802 case KVM_DEV_FLIC_CLEAR_IRQS
:
1803 kvm_s390_clear_float_irqs(dev
->kvm
);
1805 case KVM_DEV_FLIC_APF_ENABLE
:
1806 dev
->kvm
->arch
.gmap
->pfault_enabled
= 1;
1808 case KVM_DEV_FLIC_APF_DISABLE_WAIT
:
1809 dev
->kvm
->arch
.gmap
->pfault_enabled
= 0;
1811 * Make sure no async faults are in transition when
1812 * clearing the queues. So we don't need to worry
1813 * about late coming workers.
1815 synchronize_srcu(&dev
->kvm
->srcu
);
1816 kvm_for_each_vcpu(i
, vcpu
, dev
->kvm
)
1817 kvm_clear_async_pf_completion_queue(vcpu
);
1819 case KVM_DEV_FLIC_ADAPTER_REGISTER
:
1820 r
= register_io_adapter(dev
, attr
);
1822 case KVM_DEV_FLIC_ADAPTER_MODIFY
:
1823 r
= modify_io_adapter(dev
, attr
);
1832 static int flic_create(struct kvm_device
*dev
, u32 type
)
1836 if (dev
->kvm
->arch
.flic
)
1838 dev
->kvm
->arch
.flic
= dev
;
1842 static void flic_destroy(struct kvm_device
*dev
)
1844 dev
->kvm
->arch
.flic
= NULL
;
1848 /* s390 floating irq controller (flic) */
1849 struct kvm_device_ops kvm_flic_ops
= {
1851 .get_attr
= flic_get_attr
,
1852 .set_attr
= flic_set_attr
,
1853 .create
= flic_create
,
1854 .destroy
= flic_destroy
,
1857 static unsigned long get_ind_bit(__u64 addr
, unsigned long bit_nr
, bool swap
)
1861 bit
= bit_nr
+ (addr
% PAGE_SIZE
) * 8;
1863 return swap
? (bit
^ (BITS_PER_LONG
- 1)) : bit
;
1866 static struct s390_map_info
*get_map_info(struct s390_io_adapter
*adapter
,
1869 struct s390_map_info
*map
;
1874 list_for_each_entry(map
, &adapter
->maps
, list
) {
1875 if (map
->guest_addr
== addr
)
1881 static int adapter_indicators_set(struct kvm
*kvm
,
1882 struct s390_io_adapter
*adapter
,
1883 struct kvm_s390_adapter_int
*adapter_int
)
1886 int summary_set
, idx
;
1887 struct s390_map_info
*info
;
1890 info
= get_map_info(adapter
, adapter_int
->ind_addr
);
1893 map
= page_address(info
->page
);
1894 bit
= get_ind_bit(info
->addr
, adapter_int
->ind_offset
, adapter
->swap
);
1896 idx
= srcu_read_lock(&kvm
->srcu
);
1897 mark_page_dirty(kvm
, info
->guest_addr
>> PAGE_SHIFT
);
1898 set_page_dirty_lock(info
->page
);
1899 info
= get_map_info(adapter
, adapter_int
->summary_addr
);
1901 srcu_read_unlock(&kvm
->srcu
, idx
);
1904 map
= page_address(info
->page
);
1905 bit
= get_ind_bit(info
->addr
, adapter_int
->summary_offset
,
1907 summary_set
= test_and_set_bit(bit
, map
);
1908 mark_page_dirty(kvm
, info
->guest_addr
>> PAGE_SHIFT
);
1909 set_page_dirty_lock(info
->page
);
1910 srcu_read_unlock(&kvm
->srcu
, idx
);
1911 return summary_set
? 0 : 1;
1915 * < 0 - not injected due to error
1916 * = 0 - coalesced, summary indicator already active
1917 * > 0 - injected interrupt
1919 static int set_adapter_int(struct kvm_kernel_irq_routing_entry
*e
,
1920 struct kvm
*kvm
, int irq_source_id
, int level
,
1924 struct s390_io_adapter
*adapter
;
1926 /* We're only interested in the 0->1 transition. */
1929 adapter
= get_io_adapter(kvm
, e
->adapter
.adapter_id
);
1932 down_read(&adapter
->maps_lock
);
1933 ret
= adapter_indicators_set(kvm
, adapter
, &e
->adapter
);
1934 up_read(&adapter
->maps_lock
);
1935 if ((ret
> 0) && !adapter
->masked
) {
1936 struct kvm_s390_interrupt s390int
= {
1937 .type
= KVM_S390_INT_IO(1, 0, 0, 0),
1939 .parm64
= (adapter
->isc
<< 27) | 0x80000000,
1941 ret
= kvm_s390_inject_vm(kvm
, &s390int
);
1948 int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry
*e
,
1949 const struct kvm_irq_routing_entry
*ue
)
1954 case KVM_IRQ_ROUTING_S390_ADAPTER
:
1955 e
->set
= set_adapter_int
;
1956 e
->adapter
.summary_addr
= ue
->u
.adapter
.summary_addr
;
1957 e
->adapter
.ind_addr
= ue
->u
.adapter
.ind_addr
;
1958 e
->adapter
.summary_offset
= ue
->u
.adapter
.summary_offset
;
1959 e
->adapter
.ind_offset
= ue
->u
.adapter
.ind_offset
;
1960 e
->adapter
.adapter_id
= ue
->u
.adapter
.adapter_id
;
1970 int kvm_set_msi(struct kvm_kernel_irq_routing_entry
*e
, struct kvm
*kvm
,
1971 int irq_source_id
, int level
, bool line_status
)