KVM: s390: fix delivery of vector regs during machine checks
[deliverable/linux.git] / arch / s390 / kvm / interrupt.c
CommitLineData
ba5c1e9b 1/*
a53c8fab 2 * handling kvm guest interrupts
ba5c1e9b 3 *
33b412ac 4 * Copyright IBM Corp. 2008, 2015
ba5c1e9b
CO
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */
12
ca872302 13#include <linux/interrupt.h>
ba5c1e9b 14#include <linux/kvm_host.h>
cbb870c8 15#include <linux/hrtimer.h>
84223598 16#include <linux/mmu_context.h>
3cd61299 17#include <linux/signal.h>
5a0e3ad6 18#include <linux/slab.h>
383d0b05 19#include <linux/bitmap.h>
94aa033e 20#include <linux/vmalloc.h>
cbb870c8 21#include <asm/asm-offsets.h>
33b412ac 22#include <asm/dis.h>
cbb870c8 23#include <asm/uaccess.h>
ea5f4969 24#include <asm/sclp.h>
6d3da241 25#include <asm/isc.h>
1e133ab2 26#include <asm/gmap.h>
0319dae6 27#include <asm/switch_to.h>
ff5dc149 28#include <asm/nmi.h>
ba5c1e9b
CO
29#include "kvm-s390.h"
30#include "gaccess.h"
ade38c31 31#include "trace-s390.h"
ba5c1e9b 32
44c6ca3d 33#define PFAULT_INIT 0x0600
60f90a14
JF
34#define PFAULT_DONE 0x0680
35#define VIRTIO_PARAM 0x0d00
d8346b7d 36
a5bd7647
ED
37/* handle external calls via sigp interpretation facility */
38static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
39{
7d43bafc
ED
40 int c, scn;
41
2c1bb2be
DH
42 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
43 return 0;
44
5e044315 45 read_lock(&vcpu->kvm->arch.sca_lock);
7d43bafc
ED
46 if (vcpu->kvm->arch.use_esca) {
47 struct esca_block *sca = vcpu->kvm->arch.sca;
48 union esca_sigp_ctrl sigp_ctrl =
49 sca->cpu[vcpu->vcpu_id].sigp_ctrl;
50
51 c = sigp_ctrl.c;
52 scn = sigp_ctrl.scn;
53 } else {
54 struct bsca_block *sca = vcpu->kvm->arch.sca;
55 union bsca_sigp_ctrl sigp_ctrl =
56 sca->cpu[vcpu->vcpu_id].sigp_ctrl;
57
58 c = sigp_ctrl.c;
59 scn = sigp_ctrl.scn;
60 }
5e044315 61 read_unlock(&vcpu->kvm->arch.sca_lock);
a5bd7647
ED
62
63 if (src_id)
7d43bafc 64 *src_id = scn;
a5bd7647 65
2c1bb2be 66 return c;
a5bd7647
ED
67}
68
69static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
70{
bc784cce 71 int expect, rc;
a5bd7647 72
5e044315 73 read_lock(&vcpu->kvm->arch.sca_lock);
7d43bafc
ED
74 if (vcpu->kvm->arch.use_esca) {
75 struct esca_block *sca = vcpu->kvm->arch.sca;
76 union esca_sigp_ctrl *sigp_ctrl =
77 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
78 union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
79
80 new_val.scn = src_id;
81 new_val.c = 1;
82 old_val.c = 0;
83
84 expect = old_val.value;
85 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
86 } else {
87 struct bsca_block *sca = vcpu->kvm->arch.sca;
88 union bsca_sigp_ctrl *sigp_ctrl =
89 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
90 union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
bc784cce 91
7d43bafc
ED
92 new_val.scn = src_id;
93 new_val.c = 1;
94 old_val.c = 0;
95
96 expect = old_val.value;
97 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
98 }
5e044315 99 read_unlock(&vcpu->kvm->arch.sca_lock);
bc784cce
ED
100
101 if (rc != expect) {
a5bd7647
ED
102 /* another external call is pending */
103 return -EBUSY;
104 }
105 atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
106 return 0;
107}
108
109static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
110{
a5bd7647 111 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
7d43bafc 112 int rc, expect;
a5bd7647
ED
113
114 atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
5e044315 115 read_lock(&vcpu->kvm->arch.sca_lock);
7d43bafc
ED
116 if (vcpu->kvm->arch.use_esca) {
117 struct esca_block *sca = vcpu->kvm->arch.sca;
118 union esca_sigp_ctrl *sigp_ctrl =
119 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
120 union esca_sigp_ctrl old = *sigp_ctrl;
121
122 expect = old.value;
123 rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
124 } else {
125 struct bsca_block *sca = vcpu->kvm->arch.sca;
126 union bsca_sigp_ctrl *sigp_ctrl =
127 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
128 union bsca_sigp_ctrl old = *sigp_ctrl;
129
130 expect = old.value;
131 rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
132 }
5e044315 133 read_unlock(&vcpu->kvm->arch.sca_lock);
7d43bafc 134 WARN_ON(rc != expect); /* cannot clear? */
a5bd7647
ED
135}
136
3c038e6b 137int psw_extint_disabled(struct kvm_vcpu *vcpu)
ba5c1e9b
CO
138{
139 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
140}
141
d8346b7d
CH
142static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
143{
144 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
145}
146
48a3e950
CH
147static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
148{
149 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
150}
151
ba5c1e9b
CO
152static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
153{
fee0e0fd
DH
154 return psw_extint_disabled(vcpu) &&
155 psw_ioint_disabled(vcpu) &&
156 psw_mchk_disabled(vcpu);
ba5c1e9b
CO
157}
158
bb78c5ec
DH
159static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
160{
161 if (psw_extint_disabled(vcpu) ||
162 !(vcpu->arch.sie_block->gcr[0] & 0x800ul))
163 return 0;
f71d0dc5
DH
164 if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
165 /* No timer interrupts when single stepping */
166 return 0;
bb78c5ec
DH
167 return 1;
168}
169
b4aec925
DH
170static int ckc_irq_pending(struct kvm_vcpu *vcpu)
171{
60417fcc 172 if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm))
b4aec925
DH
173 return 0;
174 return ckc_interrupts_enabled(vcpu);
175}
176
177static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
178{
179 return !psw_extint_disabled(vcpu) &&
180 (vcpu->arch.sie_block->gcr[0] & 0x400ul);
181}
182
183static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
184{
4287f247
DH
185 if (!cpu_timer_interrupts_enabled(vcpu))
186 return 0;
187 return kvm_s390_get_cpu_timer(vcpu) >> 63;
b4aec925
DH
188}
189
6d3da241 190static inline int is_ioirq(unsigned long irq_type)
79fd50c6 191{
6d3da241
JF
192 return ((irq_type >= IRQ_PEND_IO_ISC_0) &&
193 (irq_type <= IRQ_PEND_IO_ISC_7));
194}
79fd50c6 195
6d3da241
JF
196static uint64_t isc_to_isc_bits(int isc)
197{
79fd50c6
CH
198 return (0x80 >> isc) << 24;
199}
200
6d3da241 201static inline u8 int_word_to_isc(u32 int_word)
ba5c1e9b 202{
6d3da241
JF
203 return (int_word & 0x38000000) >> 27;
204}
205
5f94c58e 206static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
6d3da241 207{
5f94c58e
DH
208 return vcpu->kvm->arch.float_int.pending_irqs |
209 vcpu->arch.local_int.pending_irqs;
383d0b05
JF
210}
211
6d3da241
JF
212static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
213 unsigned long active_mask)
214{
215 int i;
216
217 for (i = 0; i <= MAX_ISC; i++)
218 if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i)))
219 active_mask &= ~(1UL << (IRQ_PEND_IO_ISC_0 + i));
220
221 return active_mask;
222}
223
224static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
383d0b05 225{
6d3da241
JF
226 unsigned long active_mask;
227
5f94c58e 228 active_mask = pending_irqs(vcpu);
ffeca0ae
JF
229 if (!active_mask)
230 return 0;
383d0b05
JF
231
232 if (psw_extint_disabled(vcpu))
233 active_mask &= ~IRQ_PEND_EXT_MASK;
6d3da241
JF
234 if (psw_ioint_disabled(vcpu))
235 active_mask &= ~IRQ_PEND_IO_MASK;
236 else
237 active_mask = disable_iscs(vcpu, active_mask);
383d0b05
JF
238 if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
239 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
240 if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul))
241 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
242 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
243 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
244 if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul))
245 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
6d3da241
JF
246 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
247 __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
383d0b05
JF
248 if (psw_mchk_disabled(vcpu))
249 active_mask &= ~IRQ_PEND_MCHK_MASK;
6d3da241
JF
250 if (!(vcpu->arch.sie_block->gcr[14] &
251 vcpu->kvm->arch.float_int.mchk.cr14))
252 __clear_bit(IRQ_PEND_MCHK_REP, &active_mask);
383d0b05 253
6cddd432
DH
254 /*
255 * STOP irqs will never be actively delivered. They are triggered via
256 * intercept requests and cleared when the stop intercept is performed.
257 */
258 __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
259
383d0b05
JF
260 return active_mask;
261}
262
ba5c1e9b
CO
263static void __set_cpu_idle(struct kvm_vcpu *vcpu)
264{
805de8f4 265 atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
ba5c1e9b
CO
266 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
267}
268
269static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
270{
805de8f4 271 atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
ba5c1e9b
CO
272 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
273}
274
275static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
276{
805de8f4
PZ
277 atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
278 &vcpu->arch.sie_block->cpuflags);
ba5c1e9b 279 vcpu->arch.sie_block->lctl = 0x0000;
27291e21
DH
280 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
281
282 if (guestdbg_enabled(vcpu)) {
283 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
284 LCTL_CR10 | LCTL_CR11);
285 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
286 }
ba5c1e9b
CO
287}
288
289static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
290{
805de8f4 291 atomic_or(flag, &vcpu->arch.sie_block->cpuflags);
ba5c1e9b
CO
292}
293
6d3da241
JF
294static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
295{
5f94c58e 296 if (!(pending_irqs(vcpu) & IRQ_PEND_IO_MASK))
6d3da241
JF
297 return;
298 else if (psw_ioint_disabled(vcpu))
299 __set_cpuflag(vcpu, CPUSTAT_IO_INT);
300 else
301 vcpu->arch.sie_block->lctl |= LCTL_CR6;
302}
303
383d0b05
JF
304static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
305{
5f94c58e 306 if (!(pending_irqs(vcpu) & IRQ_PEND_EXT_MASK))
383d0b05
JF
307 return;
308 if (psw_extint_disabled(vcpu))
309 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
310 else
311 vcpu->arch.sie_block->lctl |= LCTL_CR0;
312}
313
314static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
315{
5f94c58e 316 if (!(pending_irqs(vcpu) & IRQ_PEND_MCHK_MASK))
383d0b05
JF
317 return;
318 if (psw_mchk_disabled(vcpu))
319 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
320 else
321 vcpu->arch.sie_block->lctl |= LCTL_CR14;
322}
323
6cddd432
DH
324static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
325{
326 if (kvm_s390_is_stop_irq_pending(vcpu))
327 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
328}
329
6d3da241
JF
330/* Set interception request for non-deliverable interrupts */
331static void set_intercept_indicators(struct kvm_vcpu *vcpu)
383d0b05 332{
6d3da241 333 set_intercept_indicators_io(vcpu);
383d0b05
JF
334 set_intercept_indicators_ext(vcpu);
335 set_intercept_indicators_mchk(vcpu);
6cddd432 336 set_intercept_indicators_stop(vcpu);
383d0b05
JF
337}
338
0fb97abe
JF
339static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
340{
383d0b05 341 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
0fb97abe
JF
342 int rc;
343
344 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
345 0, 0);
346
347 rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
348 (u16 *)__LC_EXT_INT_CODE);
467fc298 349 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
350 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
351 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
352 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
353 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
383d0b05 354 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
99e20009 355 return rc ? -EFAULT : 0;
0fb97abe
JF
356}
357
358static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
359{
383d0b05 360 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
0fb97abe
JF
361 int rc;
362
363 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
364 0, 0);
365
366 rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
367 (u16 __user *)__LC_EXT_INT_CODE);
467fc298 368 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
369 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
370 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
371 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
372 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
383d0b05 373 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
99e20009 374 return rc ? -EFAULT : 0;
0fb97abe
JF
375}
376
383d0b05 377static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
0fb97abe 378{
383d0b05
JF
379 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
380 struct kvm_s390_ext_info ext;
0fb97abe
JF
381 int rc;
382
383d0b05
JF
383 spin_lock(&li->lock);
384 ext = li->irq.ext;
385 clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
386 li->irq.ext.ext_params2 = 0;
387 spin_unlock(&li->lock);
388
3f24ba15
CB
389 VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx",
390 ext.ext_params2);
0fb97abe
JF
391 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
392 KVM_S390_INT_PFAULT_INIT,
383d0b05 393 0, ext.ext_params2);
0fb97abe
JF
394
395 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
396 rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
397 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
398 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
399 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
400 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
383d0b05 401 rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
99e20009 402 return rc ? -EFAULT : 0;
0fb97abe
JF
403}
404
d6404ded
DH
405static int __write_machine_check(struct kvm_vcpu *vcpu,
406 struct kvm_s390_mchk_info *mchk)
407{
408 unsigned long ext_sa_addr;
0319dae6 409 freg_t fprs[NUM_FPRS];
ff5dc149 410 union mci mci;
d6404ded
DH
411 int rc;
412
ff5dc149 413 mci.val = mchk->mcic;
0319dae6
DH
414 /* take care of lazy register loading via vcpu load/put */
415 save_fpu_regs();
416 save_access_regs(vcpu->run->s.regs.acrs);
417
d6404ded
DH
418 /* Extended save area */
419 rc = read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR, &ext_sa_addr,
420 sizeof(unsigned long));
ff5dc149
DH
421 /* Only bits 0-53 are used for address formation */
422 ext_sa_addr &= ~0x3ffUL;
423 if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) {
424 if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs,
425 512))
426 mci.vr = 0;
427 } else {
428 mci.vr = 0;
429 }
d6404ded
DH
430
431 /* General interruption information */
0319dae6 432 rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID);
d6404ded
DH
433 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
434 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
435 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
436 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
ff5dc149 437 rc |= put_guest_lc(vcpu, mci.val, (u64 __user *) __LC_MCCK_CODE);
d6404ded
DH
438
439 /* Register-save areas */
0319dae6
DH
440 if (MACHINE_HAS_VX) {
441 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
442 rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, fprs, 128);
443 } else {
444 rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA,
445 vcpu->run->s.regs.fprs, 128);
446 }
447 rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA,
448 vcpu->run->s.regs.gprs, 128);
449 rc |= put_guest_lc(vcpu, current->thread.fpu.fpc,
450 (u32 __user *) __LC_FP_CREG_SAVE_AREA);
451 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr,
452 (u32 __user *) __LC_TOD_PROGREG_SAVE_AREA);
453 rc |= put_guest_lc(vcpu, kvm_s390_get_cpu_timer(vcpu),
454 (u64 __user *) __LC_CPU_TIMER_SAVE_AREA);
455 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->ckc >> 8,
456 (u64 __user *) __LC_CLOCK_COMP_SAVE_AREA);
457 rc |= write_guest_lc(vcpu, __LC_AREGS_SAVE_AREA,
458 &vcpu->run->s.regs.acrs, 64);
459 rc |= write_guest_lc(vcpu, __LC_CREGS_SAVE_AREA,
460 &vcpu->arch.sie_block->gcr, 128);
d6404ded
DH
461
462 /* Extended interruption information */
463 rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
464 (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
465 rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, &mchk->fixed_logout,
466 sizeof(mchk->fixed_logout));
467 return rc ? -EFAULT : 0;
468}
469
383d0b05 470static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
0fb97abe 471{
6d3da241 472 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
383d0b05 473 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
6d3da241 474 struct kvm_s390_mchk_info mchk = {};
6d3da241
JF
475 int deliver = 0;
476 int rc = 0;
0fb97abe 477
6d3da241 478 spin_lock(&fi->lock);
383d0b05 479 spin_lock(&li->lock);
6d3da241
JF
480 if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) ||
481 test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) {
482 /*
483 * If there was an exigent machine check pending, then any
484 * repressible machine checks that might have been pending
485 * are indicated along with it, so always clear bits for
486 * repressible and exigent interrupts
487 */
488 mchk = li->irq.mchk;
489 clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
490 clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
491 memset(&li->irq.mchk, 0, sizeof(mchk));
492 deliver = 1;
493 }
383d0b05 494 /*
6d3da241
JF
495 * We indicate floating repressible conditions along with
496 * other pending conditions. Channel Report Pending and Channel
497 * Subsystem damage are the only two and and are indicated by
498 * bits in mcic and masked in cr14.
383d0b05 499 */
6d3da241
JF
500 if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
501 mchk.mcic |= fi->mchk.mcic;
502 mchk.cr14 |= fi->mchk.cr14;
503 memset(&fi->mchk, 0, sizeof(mchk));
504 deliver = 1;
505 }
383d0b05 506 spin_unlock(&li->lock);
6d3da241 507 spin_unlock(&fi->lock);
383d0b05 508
6d3da241 509 if (deliver) {
3f24ba15 510 VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx",
6d3da241
JF
511 mchk.mcic);
512 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
513 KVM_S390_MCHK,
514 mchk.cr14, mchk.mcic);
d6404ded 515 rc = __write_machine_check(vcpu, &mchk);
6d3da241 516 }
d6404ded 517 return rc;
0fb97abe
JF
518}
519
520static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
521{
383d0b05 522 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
0fb97abe
JF
523 int rc;
524
3f24ba15 525 VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart");
0fb97abe
JF
526 vcpu->stat.deliver_restart_signal++;
527 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
528
529 rc = write_guest_lc(vcpu,
c667aeac 530 offsetof(struct lowcore, restart_old_psw),
0fb97abe 531 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
c667aeac 532 rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw),
0fb97abe 533 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
383d0b05 534 clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
99e20009 535 return rc ? -EFAULT : 0;
0fb97abe
JF
536}
537
383d0b05 538static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
0fb97abe 539{
383d0b05
JF
540 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
541 struct kvm_s390_prefix_info prefix;
542
543 spin_lock(&li->lock);
544 prefix = li->irq.prefix;
545 li->irq.prefix.address = 0;
546 clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
547 spin_unlock(&li->lock);
0fb97abe 548
0fb97abe
JF
549 vcpu->stat.deliver_prefix_signal++;
550 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
551 KVM_S390_SIGP_SET_PREFIX,
383d0b05 552 prefix.address, 0);
0fb97abe 553
383d0b05 554 kvm_s390_set_prefix(vcpu, prefix.address);
0fb97abe
JF
555 return 0;
556}
557
383d0b05 558static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
0fb97abe 559{
383d0b05 560 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
0fb97abe 561 int rc;
383d0b05
JF
562 int cpu_addr;
563
564 spin_lock(&li->lock);
565 cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
566 clear_bit(cpu_addr, li->sigp_emerg_pending);
567 if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
568 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
569 spin_unlock(&li->lock);
0fb97abe 570
3f24ba15 571 VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg");
0fb97abe 572 vcpu->stat.deliver_emergency_signal++;
383d0b05
JF
573 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
574 cpu_addr, 0);
0fb97abe
JF
575
576 rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
577 (u16 *)__LC_EXT_INT_CODE);
383d0b05 578 rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
579 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
580 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
581 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
582 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
99e20009 583 return rc ? -EFAULT : 0;
0fb97abe
JF
584}
585
383d0b05 586static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
0fb97abe 587{
383d0b05
JF
588 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
589 struct kvm_s390_extcall_info extcall;
0fb97abe
JF
590 int rc;
591
383d0b05
JF
592 spin_lock(&li->lock);
593 extcall = li->irq.extcall;
594 li->irq.extcall.code = 0;
595 clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
596 spin_unlock(&li->lock);
597
3f24ba15 598 VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call");
0fb97abe
JF
599 vcpu->stat.deliver_external_call++;
600 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
601 KVM_S390_INT_EXTERNAL_CALL,
383d0b05 602 extcall.code, 0);
0fb97abe
JF
603
604 rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
605 (u16 *)__LC_EXT_INT_CODE);
383d0b05 606 rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
607 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
608 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
609 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
610 sizeof(psw_t));
99e20009 611 return rc ? -EFAULT : 0;
0fb97abe
JF
612}
613
383d0b05 614static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
8712836b 615{
383d0b05
JF
616 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
617 struct kvm_s390_pgm_info pgm_info;
a9a846fd 618 int rc = 0, nullifying = false;
634790b8 619 u16 ilen;
8712836b 620
383d0b05
JF
621 spin_lock(&li->lock);
622 pgm_info = li->irq.pgm;
623 clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
624 memset(&li->irq.pgm, 0, sizeof(pgm_info));
625 spin_unlock(&li->lock);
626
634790b8 627 ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK;
0e8bc06a
DH
628 VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d",
629 pgm_info.code, ilen);
0fb97abe
JF
630 vcpu->stat.deliver_program_int++;
631 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
383d0b05 632 pgm_info.code, 0);
0fb97abe 633
383d0b05 634 switch (pgm_info.code & ~PGM_PER) {
8712836b
DH
635 case PGM_AFX_TRANSLATION:
636 case PGM_ASX_TRANSLATION:
637 case PGM_EX_TRANSLATION:
638 case PGM_LFX_TRANSLATION:
639 case PGM_LSTE_SEQUENCE:
640 case PGM_LSX_TRANSLATION:
641 case PGM_LX_TRANSLATION:
642 case PGM_PRIMARY_AUTHORITY:
643 case PGM_SECONDARY_AUTHORITY:
a9a846fd
TH
644 nullifying = true;
645 /* fall through */
8712836b 646 case PGM_SPACE_SWITCH:
383d0b05 647 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
8712836b
DH
648 (u64 *)__LC_TRANS_EXC_CODE);
649 break;
650 case PGM_ALEN_TRANSLATION:
651 case PGM_ALE_SEQUENCE:
652 case PGM_ASTE_INSTANCE:
653 case PGM_ASTE_SEQUENCE:
654 case PGM_ASTE_VALIDITY:
655 case PGM_EXTENDED_AUTHORITY:
383d0b05 656 rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
8712836b 657 (u8 *)__LC_EXC_ACCESS_ID);
a9a846fd 658 nullifying = true;
8712836b
DH
659 break;
660 case PGM_ASCE_TYPE:
661 case PGM_PAGE_TRANSLATION:
662 case PGM_REGION_FIRST_TRANS:
663 case PGM_REGION_SECOND_TRANS:
664 case PGM_REGION_THIRD_TRANS:
665 case PGM_SEGMENT_TRANSLATION:
383d0b05 666 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
8712836b 667 (u64 *)__LC_TRANS_EXC_CODE);
383d0b05 668 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
8712836b 669 (u8 *)__LC_EXC_ACCESS_ID);
383d0b05 670 rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
8712836b 671 (u8 *)__LC_OP_ACCESS_ID);
a9a846fd 672 nullifying = true;
8712836b
DH
673 break;
674 case PGM_MONITOR:
383d0b05 675 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
a36c5393 676 (u16 *)__LC_MON_CLASS_NR);
383d0b05 677 rc |= put_guest_lc(vcpu, pgm_info.mon_code,
8712836b
DH
678 (u64 *)__LC_MON_CODE);
679 break;
403c8648 680 case PGM_VECTOR_PROCESSING:
8712836b 681 case PGM_DATA:
383d0b05 682 rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
8712836b
DH
683 (u32 *)__LC_DATA_EXC_CODE);
684 break;
685 case PGM_PROTECTION:
383d0b05 686 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
8712836b 687 (u64 *)__LC_TRANS_EXC_CODE);
383d0b05 688 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
8712836b
DH
689 (u8 *)__LC_EXC_ACCESS_ID);
690 break;
a9a846fd
TH
691 case PGM_STACK_FULL:
692 case PGM_STACK_EMPTY:
693 case PGM_STACK_SPECIFICATION:
694 case PGM_STACK_TYPE:
695 case PGM_STACK_OPERATION:
696 case PGM_TRACE_TABEL:
697 case PGM_CRYPTO_OPERATION:
698 nullifying = true;
699 break;
8712836b
DH
700 }
701
383d0b05
JF
702 if (pgm_info.code & PGM_PER) {
703 rc |= put_guest_lc(vcpu, pgm_info.per_code,
8712836b 704 (u8 *) __LC_PER_CODE);
383d0b05 705 rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
8712836b 706 (u8 *)__LC_PER_ATMID);
383d0b05 707 rc |= put_guest_lc(vcpu, pgm_info.per_address,
8712836b 708 (u64 *) __LC_PER_ADDRESS);
383d0b05 709 rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
8712836b
DH
710 (u8 *) __LC_PER_ACCESS_ID);
711 }
712
eaa4f416 713 if (nullifying && !(pgm_info.flags & KVM_S390_PGM_FLAGS_NO_REWIND))
0e8bc06a 714 kvm_s390_rewind_psw(vcpu, ilen);
a9a846fd 715
0e8bc06a
DH
716 /* bit 1+2 of the target are the ilc, so we can directly use ilen */
717 rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC);
2ba45968
DH
718 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
719 (u64 *) __LC_LAST_BREAK);
383d0b05 720 rc |= put_guest_lc(vcpu, pgm_info.code,
8712836b
DH
721 (u16 *)__LC_PGM_INT_CODE);
722 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
723 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
724 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
725 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
99e20009 726 return rc ? -EFAULT : 0;
0fb97abe
JF
727}
728
6d3da241 729static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
0fb97abe 730{
6d3da241
JF
731 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
732 struct kvm_s390_ext_info ext;
733 int rc = 0;
734
735 spin_lock(&fi->lock);
736 if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
737 spin_unlock(&fi->lock);
738 return 0;
739 }
740 ext = fi->srv_signal;
741 memset(&fi->srv_signal, 0, sizeof(ext));
742 clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
743 spin_unlock(&fi->lock);
0fb97abe 744
3f24ba15 745 VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x",
6d3da241 746 ext.ext_params);
0fb97abe 747 vcpu->stat.deliver_service_signal++;
6d3da241
JF
748 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
749 ext.ext_params, 0);
0fb97abe
JF
750
751 rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
467fc298 752 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
753 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
754 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
755 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
756 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
6d3da241 757 rc |= put_guest_lc(vcpu, ext.ext_params,
0fb97abe 758 (u32 *)__LC_EXT_PARAMS);
6d3da241 759
99e20009 760 return rc ? -EFAULT : 0;
0fb97abe
JF
761}
762
6d3da241 763static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
0fb97abe 764{
6d3da241
JF
765 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
766 struct kvm_s390_interrupt_info *inti;
767 int rc = 0;
0fb97abe 768
6d3da241
JF
769 spin_lock(&fi->lock);
770 inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT],
771 struct kvm_s390_interrupt_info,
772 list);
773 if (inti) {
6d3da241
JF
774 list_del(&inti->list);
775 fi->counters[FIRQ_CNTR_PFAULT] -= 1;
776 }
777 if (list_empty(&fi->lists[FIRQ_LIST_PFAULT]))
778 clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
779 spin_unlock(&fi->lock);
8712836b 780
6d3da241 781 if (inti) {
3f24ba15
CB
782 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
783 KVM_S390_INT_PFAULT_DONE, 0,
784 inti->ext.ext_params2);
785 VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx",
786 inti->ext.ext_params2);
787
6d3da241
JF
788 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
789 (u16 *)__LC_EXT_INT_CODE);
790 rc |= put_guest_lc(vcpu, PFAULT_DONE,
791 (u16 *)__LC_EXT_CPU_ADDR);
792 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
793 &vcpu->arch.sie_block->gpsw,
794 sizeof(psw_t));
795 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
796 &vcpu->arch.sie_block->gpsw,
797 sizeof(psw_t));
798 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
799 (u64 *)__LC_EXT_PARAMS2);
800 kfree(inti);
801 }
99e20009 802 return rc ? -EFAULT : 0;
0fb97abe
JF
803}
804
6d3da241 805static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
0fb97abe 806{
6d3da241
JF
807 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
808 struct kvm_s390_interrupt_info *inti;
809 int rc = 0;
0fb97abe 810
6d3da241
JF
811 spin_lock(&fi->lock);
812 inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO],
813 struct kvm_s390_interrupt_info,
814 list);
815 if (inti) {
816 VCPU_EVENT(vcpu, 4,
3f24ba15 817 "deliver: virtio parm: 0x%x,parm64: 0x%llx",
6d3da241
JF
818 inti->ext.ext_params, inti->ext.ext_params2);
819 vcpu->stat.deliver_virtio_interrupt++;
820 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
821 inti->type,
822 inti->ext.ext_params,
823 inti->ext.ext_params2);
824 list_del(&inti->list);
825 fi->counters[FIRQ_CNTR_VIRTIO] -= 1;
826 }
827 if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO]))
828 clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
829 spin_unlock(&fi->lock);
0fb97abe 830
6d3da241
JF
831 if (inti) {
832 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
833 (u16 *)__LC_EXT_INT_CODE);
834 rc |= put_guest_lc(vcpu, VIRTIO_PARAM,
835 (u16 *)__LC_EXT_CPU_ADDR);
836 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
837 &vcpu->arch.sie_block->gpsw,
838 sizeof(psw_t));
839 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
840 &vcpu->arch.sie_block->gpsw,
841 sizeof(psw_t));
842 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
843 (u32 *)__LC_EXT_PARAMS);
844 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
845 (u64 *)__LC_EXT_PARAMS2);
846 kfree(inti);
847 }
99e20009 848 return rc ? -EFAULT : 0;
0fb97abe
JF
849}
850
851static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
6d3da241 852 unsigned long irq_type)
0fb97abe 853{
6d3da241
JF
854 struct list_head *isc_list;
855 struct kvm_s390_float_interrupt *fi;
856 struct kvm_s390_interrupt_info *inti = NULL;
857 int rc = 0;
0fb97abe 858
6d3da241 859 fi = &vcpu->kvm->arch.float_int;
8712836b 860
6d3da241
JF
861 spin_lock(&fi->lock);
862 isc_list = &fi->lists[irq_type - IRQ_PEND_IO_ISC_0];
863 inti = list_first_entry_or_null(isc_list,
864 struct kvm_s390_interrupt_info,
865 list);
866 if (inti) {
dcc98ea6
CB
867 if (inti->type & KVM_S390_INT_IO_AI_MASK)
868 VCPU_EVENT(vcpu, 4, "%s", "deliver: I/O (AI)");
869 else
870 VCPU_EVENT(vcpu, 4, "deliver: I/O %x ss %x schid %04x",
871 inti->io.subchannel_id >> 8,
872 inti->io.subchannel_id >> 1 & 0x3,
873 inti->io.subchannel_nr);
874
6d3da241
JF
875 vcpu->stat.deliver_io_int++;
876 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
877 inti->type,
878 ((__u32)inti->io.subchannel_id << 16) |
879 inti->io.subchannel_nr,
880 ((__u64)inti->io.io_int_parm << 32) |
881 inti->io.io_int_word);
882 list_del(&inti->list);
883 fi->counters[FIRQ_CNTR_IO] -= 1;
884 }
885 if (list_empty(isc_list))
886 clear_bit(irq_type, &fi->pending_irqs);
887 spin_unlock(&fi->lock);
888
889 if (inti) {
890 rc = put_guest_lc(vcpu, inti->io.subchannel_id,
891 (u16 *)__LC_SUBCHANNEL_ID);
892 rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
893 (u16 *)__LC_SUBCHANNEL_NR);
894 rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
895 (u32 *)__LC_IO_INT_PARM);
896 rc |= put_guest_lc(vcpu, inti->io.io_int_word,
897 (u32 *)__LC_IO_INT_WORD);
898 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
899 &vcpu->arch.sie_block->gpsw,
900 sizeof(psw_t));
901 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
902 &vcpu->arch.sie_block->gpsw,
903 sizeof(psw_t));
904 kfree(inti);
905 }
383d0b05 906
99e20009 907 return rc ? -EFAULT : 0;
383d0b05
JF
908}
909
910typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
911
912static const deliver_irq_t deliver_irq_funcs[] = {
913 [IRQ_PEND_MCHK_EX] = __deliver_machine_check,
6d3da241 914 [IRQ_PEND_MCHK_REP] = __deliver_machine_check,
383d0b05
JF
915 [IRQ_PEND_PROG] = __deliver_prog,
916 [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal,
917 [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call,
918 [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
919 [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer,
920 [IRQ_PEND_RESTART] = __deliver_restart,
383d0b05
JF
921 [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix,
922 [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init,
6d3da241
JF
923 [IRQ_PEND_EXT_SERVICE] = __deliver_service,
924 [IRQ_PEND_PFAULT_DONE] = __deliver_pfault_done,
925 [IRQ_PEND_VIRTIO] = __deliver_virtio,
383d0b05
JF
926};
927
ea5f4969
DH
928/* Check whether an external call is pending (deliverable or not) */
929int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
4953919f 930{
ea5f4969 931 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
4953919f 932
37c5f6c8 933 if (!sclp.has_sigpif)
ea5f4969 934 return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
4953919f 935
a5bd7647 936 return sca_ext_call_pending(vcpu, NULL);
4953919f
DH
937}
938
9a022067 939int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
ba5c1e9b 940{
4d32ad6b
DH
941 if (deliverable_irqs(vcpu))
942 return 1;
ba5c1e9b 943
4d32ad6b
DH
944 if (kvm_cpu_has_pending_timer(vcpu))
945 return 1;
ba5c1e9b 946
ea5f4969 947 /* external call pending and deliverable */
4d32ad6b 948 if (kvm_s390_ext_call_pending(vcpu) &&
ea5f4969
DH
949 !psw_extint_disabled(vcpu) &&
950 (vcpu->arch.sie_block->gcr[0] & 0x2000ul))
4d32ad6b 951 return 1;
4953919f 952
4d32ad6b
DH
953 if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
954 return 1;
955 return 0;
ba5c1e9b
CO
956}
957
3d80840d
MT
958int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
959{
b4aec925 960 return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
3d80840d
MT
961}
962
b3c17f10
DH
963static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
964{
965 u64 now, cputm, sltime = 0;
966
967 if (ckc_interrupts_enabled(vcpu)) {
968 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
969 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
970 /* already expired or overflow? */
971 if (!sltime || vcpu->arch.sie_block->ckc <= now)
972 return 0;
973 if (cpu_timer_interrupts_enabled(vcpu)) {
974 cputm = kvm_s390_get_cpu_timer(vcpu);
975 /* already expired? */
976 if (cputm >> 63)
977 return 0;
978 return min(sltime, tod_to_ns(cputm));
979 }
980 } else if (cpu_timer_interrupts_enabled(vcpu)) {
981 sltime = kvm_s390_get_cpu_timer(vcpu);
982 /* already expired? */
983 if (sltime >> 63)
984 return 0;
985 }
986 return sltime;
987}
988
ba5c1e9b
CO
989int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
990{
b3c17f10 991 u64 sltime;
ba5c1e9b
CO
992
993 vcpu->stat.exit_wait_state++;
ba5c1e9b 994
0759d068 995 /* fast path */
118b862b 996 if (kvm_arch_vcpu_runnable(vcpu))
0759d068 997 return 0;
e52b2af5 998
ba5c1e9b
CO
999 if (psw_interrupts_disabled(vcpu)) {
1000 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
b8e660b8 1001 return -EOPNOTSUPP; /* disabled wait */
ba5c1e9b
CO
1002 }
1003
b3c17f10
DH
1004 if (!ckc_interrupts_enabled(vcpu) &&
1005 !cpu_timer_interrupts_enabled(vcpu)) {
ba5c1e9b 1006 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
bda343ef 1007 __set_cpu_idle(vcpu);
ba5c1e9b
CO
1008 goto no_timer;
1009 }
1010
b3c17f10
DH
1011 sltime = __calculate_sltime(vcpu);
1012 if (!sltime)
bda343ef
DH
1013 return 0;
1014
1015 __set_cpu_idle(vcpu);
ca872302 1016 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
b3c17f10 1017 VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
ba5c1e9b 1018no_timer:
800c1065 1019 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
0759d068 1020 kvm_vcpu_block(vcpu);
ba5c1e9b 1021 __unset_cpu_idle(vcpu);
800c1065
TH
1022 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1023
2d00f759 1024 hrtimer_cancel(&vcpu->arch.ckc_timer);
ba5c1e9b
CO
1025 return 0;
1026}
1027
0e9c85a5
DH
1028void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
1029{
3491caf2
CB
1030 /*
1031 * We cannot move this into the if, as the CPU might be already
1032 * in kvm_vcpu_block without having the waitqueue set (polling)
1033 */
1034 vcpu->valid_wakeup = true;
8577370f 1035 if (swait_active(&vcpu->wq)) {
0e9c85a5
DH
1036 /*
1037 * The vcpu gave up the cpu voluntarily, mark it as a good
1038 * yield-candidate.
1039 */
1040 vcpu->preempted = true;
8577370f 1041 swake_up(&vcpu->wq);
ce2e4f0b 1042 vcpu->stat.halt_wakeup++;
0e9c85a5 1043 }
adbf1698
DH
1044 /*
1045 * The VCPU might not be sleeping but is executing the VSIE. Let's
1046 * kick it, so it leaves the SIE to process the request.
1047 */
1048 kvm_s390_vsie_kick(vcpu);
0e9c85a5
DH
1049}
1050
ca872302
CB
1051enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
1052{
1053 struct kvm_vcpu *vcpu;
b3c17f10 1054 u64 sltime;
ca872302
CB
1055
1056 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
b3c17f10 1057 sltime = __calculate_sltime(vcpu);
ca872302 1058
2d00f759
DH
1059 /*
1060 * If the monotonic clock runs faster than the tod clock we might be
1061 * woken up too early and have to go back to sleep to avoid deadlocks.
1062 */
b3c17f10 1063 if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime)))
2d00f759
DH
1064 return HRTIMER_RESTART;
1065 kvm_s390_vcpu_wakeup(vcpu);
ca872302
CB
1066 return HRTIMER_NORESTART;
1067}
ba5c1e9b 1068
2ed10cc1
JF
1069void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
1070{
1071 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2ed10cc1 1072
4ae3c081 1073 spin_lock(&li->lock);
383d0b05
JF
1074 li->pending_irqs = 0;
1075 bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
1076 memset(&li->irq, 0, sizeof(li->irq));
4ae3c081 1077 spin_unlock(&li->lock);
4953919f 1078
a5bd7647 1079 sca_clear_ext_call(vcpu);
2ed10cc1
JF
1080}
1081
614aeab4 1082int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
ba5c1e9b 1083{
180c12fb 1084 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
383d0b05 1085 deliver_irq_t func;
79395031 1086 int rc = 0;
383d0b05 1087 unsigned long irq_type;
6d3da241 1088 unsigned long irqs;
ba5c1e9b
CO
1089
1090 __reset_intercept_indicators(vcpu);
ba5c1e9b 1091
383d0b05
JF
1092 /* pending ckc conditions might have been invalidated */
1093 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
b4aec925 1094 if (ckc_irq_pending(vcpu))
383d0b05
JF
1095 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1096
b4aec925
DH
1097 /* pending cpu timer conditions might have been invalidated */
1098 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1099 if (cpu_timer_irq_pending(vcpu))
1100 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1101
ffeca0ae 1102 while ((irqs = deliverable_irqs(vcpu)) && !rc) {
383d0b05 1103 /* bits are in the order of interrupt priority */
6d3da241 1104 irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT);
6d3da241
JF
1105 if (is_ioirq(irq_type)) {
1106 rc = __deliver_io(vcpu, irq_type);
1107 } else {
1108 func = deliver_irq_funcs[irq_type];
1109 if (!func) {
1110 WARN_ON_ONCE(func == NULL);
1111 clear_bit(irq_type, &li->pending_irqs);
1112 continue;
1113 }
1114 rc = func(vcpu);
383d0b05 1115 }
ffeca0ae 1116 }
383d0b05 1117
6d3da241 1118 set_intercept_indicators(vcpu);
79395031
JF
1119
1120 return rc;
ba5c1e9b
CO
1121}
1122
383d0b05 1123static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1124{
1125 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1126
ed2afcfa
DH
1127 VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
1128 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
1129 irq->u.pgm.code, 0);
1130
634790b8
DH
1131 if (!(irq->u.pgm.flags & KVM_S390_PGM_FLAGS_ILC_VALID)) {
1132 /* auto detection if no valid ILC was given */
1133 irq->u.pgm.flags &= ~KVM_S390_PGM_FLAGS_ILC_MASK;
1134 irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu);
1135 irq->u.pgm.flags |= KVM_S390_PGM_FLAGS_ILC_VALID;
1136 }
1137
238293b1
DH
1138 if (irq->u.pgm.code == PGM_PER) {
1139 li->irq.pgm.code |= PGM_PER;
634790b8 1140 li->irq.pgm.flags = irq->u.pgm.flags;
238293b1
DH
1141 /* only modify PER related information */
1142 li->irq.pgm.per_address = irq->u.pgm.per_address;
1143 li->irq.pgm.per_code = irq->u.pgm.per_code;
1144 li->irq.pgm.per_atmid = irq->u.pgm.per_atmid;
1145 li->irq.pgm.per_access_id = irq->u.pgm.per_access_id;
1146 } else if (!(irq->u.pgm.code & PGM_PER)) {
1147 li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) |
1148 irq->u.pgm.code;
634790b8 1149 li->irq.pgm.flags = irq->u.pgm.flags;
238293b1
DH
1150 /* only modify non-PER information */
1151 li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code;
1152 li->irq.pgm.mon_code = irq->u.pgm.mon_code;
1153 li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code;
1154 li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr;
1155 li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id;
1156 li->irq.pgm.op_access_id = irq->u.pgm.op_access_id;
1157 } else {
1158 li->irq.pgm = irq->u.pgm;
1159 }
9185124e 1160 set_bit(IRQ_PEND_PROG, &li->pending_irqs);
0146a7b0
JF
1161 return 0;
1162}
1163
383d0b05 1164static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1165{
1166 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1167
3f24ba15
CB
1168 VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx",
1169 irq->u.ext.ext_params2);
383d0b05
JF
1170 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
1171 irq->u.ext.ext_params,
ed2afcfa 1172 irq->u.ext.ext_params2);
383d0b05
JF
1173
1174 li->irq.ext = irq->u.ext;
1175 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
805de8f4 1176 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
0146a7b0
JF
1177 return 0;
1178}
1179
0675d92d 1180static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1181{
1182 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
383d0b05 1183 struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
ea5f4969 1184 uint16_t src_id = irq->u.extcall.code;
0146a7b0 1185
3f24ba15 1186 VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u",
ea5f4969 1187 src_id);
383d0b05 1188 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
ed2afcfa 1189 src_id, 0);
ea5f4969
DH
1190
1191 /* sending vcpu invalid */
152e9f65 1192 if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
ea5f4969
DH
1193 return -EINVAL;
1194
37c5f6c8 1195 if (sclp.has_sigpif)
a5bd7647 1196 return sca_inject_ext_call(vcpu, src_id);
383d0b05 1197
b938eace 1198 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
ea5f4969 1199 return -EBUSY;
383d0b05 1200 *extcall = irq->u.extcall;
805de8f4 1201 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
0146a7b0
JF
1202 return 0;
1203}
1204
383d0b05 1205static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1206{
1207 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
383d0b05 1208 struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
0146a7b0 1209
ed2afcfa 1210 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x",
556cc0da 1211 irq->u.prefix.address);
383d0b05 1212 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
ed2afcfa 1213 irq->u.prefix.address, 0);
383d0b05 1214
a3a9c59a
DH
1215 if (!is_vcpu_stopped(vcpu))
1216 return -EBUSY;
1217
383d0b05
JF
1218 *prefix = irq->u.prefix;
1219 set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
0146a7b0
JF
1220 return 0;
1221}
1222
6cddd432 1223#define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
383d0b05 1224static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1225{
1226 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2822545f 1227 struct kvm_s390_stop_info *stop = &li->irq.stop;
6cddd432 1228 int rc = 0;
0146a7b0 1229
ed2afcfa 1230 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0);
383d0b05 1231
2822545f
DH
1232 if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
1233 return -EINVAL;
1234
6cddd432
DH
1235 if (is_vcpu_stopped(vcpu)) {
1236 if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
1237 rc = kvm_s390_store_status_unloaded(vcpu,
1238 KVM_S390_STORE_STATUS_NOADDR);
1239 return rc;
1240 }
1241
1242 if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
1243 return -EBUSY;
2822545f 1244 stop->flags = irq->u.stop.flags;
6cddd432 1245 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
0146a7b0
JF
1246 return 0;
1247}
1248
1249static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
383d0b05 1250 struct kvm_s390_irq *irq)
0146a7b0
JF
1251{
1252 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1253
3f24ba15 1254 VCPU_EVENT(vcpu, 3, "%s", "inject: restart int");
ed2afcfa 1255 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
383d0b05
JF
1256
1257 set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
0146a7b0
JF
1258 return 0;
1259}
1260
1261static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
383d0b05 1262 struct kvm_s390_irq *irq)
0146a7b0
JF
1263{
1264 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1265
3f24ba15 1266 VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u",
383d0b05
JF
1267 irq->u.emerg.code);
1268 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
ed2afcfa 1269 irq->u.emerg.code, 0);
383d0b05 1270
b85de33a
DH
1271 /* sending vcpu invalid */
1272 if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
1273 return -EINVAL;
1274
49538d12 1275 set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
383d0b05 1276 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
805de8f4 1277 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
0146a7b0
JF
1278 return 0;
1279}
1280
383d0b05 1281static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1282{
1283 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
383d0b05 1284 struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
0146a7b0 1285
3f24ba15 1286 VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx",
556cc0da 1287 irq->u.mchk.mcic);
383d0b05 1288 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
ed2afcfa 1289 irq->u.mchk.mcic);
383d0b05
JF
1290
1291 /*
fc2020cf
JF
1292 * Because repressible machine checks can be indicated along with
1293 * exigent machine checks (PoP, Chapter 11, Interruption action)
1294 * we need to combine cr14, mcic and external damage code.
1295 * Failing storage address and the logout area should not be or'ed
1296 * together, we just indicate the last occurrence of the corresponding
1297 * machine check
383d0b05 1298 */
fc2020cf 1299 mchk->cr14 |= irq->u.mchk.cr14;
383d0b05 1300 mchk->mcic |= irq->u.mchk.mcic;
fc2020cf
JF
1301 mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
1302 mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
1303 memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
1304 sizeof(mchk->fixed_logout));
383d0b05
JF
1305 if (mchk->mcic & MCHK_EX_MASK)
1306 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
1307 else if (mchk->mcic & MCHK_REP_MASK)
1308 set_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
0146a7b0
JF
1309 return 0;
1310}
1311
383d0b05 1312static int __inject_ckc(struct kvm_vcpu *vcpu)
0146a7b0
JF
1313{
1314 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1315
3f24ba15 1316 VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external");
383d0b05 1317 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
ed2afcfa 1318 0, 0);
383d0b05
JF
1319
1320 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
805de8f4 1321 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
0146a7b0
JF
1322 return 0;
1323}
1324
383d0b05 1325static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
0146a7b0
JF
1326{
1327 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1328
3f24ba15 1329 VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external");
383d0b05 1330 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
ed2afcfa 1331 0, 0);
383d0b05
JF
1332
1333 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
805de8f4 1334 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
ba5c1e9b
CO
1335 return 0;
1336}
1337
6d3da241
JF
1338static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
1339 int isc, u32 schid)
1340{
1341 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1342 struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1343 struct kvm_s390_interrupt_info *iter;
1344 u16 id = (schid & 0xffff0000U) >> 16;
1345 u16 nr = schid & 0x0000ffffU;
1346
1347 spin_lock(&fi->lock);
1348 list_for_each_entry(iter, isc_list, list) {
1349 if (schid && (id != iter->io.subchannel_id ||
1350 nr != iter->io.subchannel_nr))
1351 continue;
1352 /* found an appropriate entry */
1353 list_del_init(&iter->list);
1354 fi->counters[FIRQ_CNTR_IO] -= 1;
1355 if (list_empty(isc_list))
1356 clear_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs);
1357 spin_unlock(&fi->lock);
1358 return iter;
1359 }
1360 spin_unlock(&fi->lock);
1361 return NULL;
1362}
383d0b05 1363
6d3da241
JF
1364/*
1365 * Dequeue and return an I/O interrupt matching any of the interruption
1366 * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
1367 */
fa6b7fe9 1368struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
6d3da241
JF
1369 u64 isc_mask, u32 schid)
1370{
1371 struct kvm_s390_interrupt_info *inti = NULL;
1372 int isc;
1373
1374 for (isc = 0; isc <= MAX_ISC && !inti; isc++) {
1375 if (isc_mask & isc_to_isc_bits(isc))
1376 inti = get_io_int(kvm, isc, schid);
1377 }
1378 return inti;
1379}
1380
1381#define SCCB_MASK 0xFFFFFFF8
1382#define SCCB_EVENT_PENDING 0x3
1383
1384static int __inject_service(struct kvm *kvm,
1385 struct kvm_s390_interrupt_info *inti)
1386{
1387 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1388
1389 spin_lock(&fi->lock);
1390 fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING;
1391 /*
1392 * Early versions of the QEMU s390 bios will inject several
1393 * service interrupts after another without handling a
1394 * condition code indicating busy.
1395 * We will silently ignore those superfluous sccb values.
1396 * A future version of QEMU will take care of serialization
1397 * of servc requests
1398 */
1399 if (fi->srv_signal.ext_params & SCCB_MASK)
1400 goto out;
1401 fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK;
1402 set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
1403out:
1404 spin_unlock(&fi->lock);
1405 kfree(inti);
1406 return 0;
1407}
1408
1409static int __inject_virtio(struct kvm *kvm,
1410 struct kvm_s390_interrupt_info *inti)
1411{
1412 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1413
1414 spin_lock(&fi->lock);
1415 if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) {
1416 spin_unlock(&fi->lock);
1417 return -EBUSY;
1418 }
1419 fi->counters[FIRQ_CNTR_VIRTIO] += 1;
1420 list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]);
1421 set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
1422 spin_unlock(&fi->lock);
1423 return 0;
1424}
1425
1426static int __inject_pfault_done(struct kvm *kvm,
1427 struct kvm_s390_interrupt_info *inti)
1428{
1429 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1430
1431 spin_lock(&fi->lock);
1432 if (fi->counters[FIRQ_CNTR_PFAULT] >=
1433 (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) {
1434 spin_unlock(&fi->lock);
1435 return -EBUSY;
1436 }
1437 fi->counters[FIRQ_CNTR_PFAULT] += 1;
1438 list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]);
1439 set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
1440 spin_unlock(&fi->lock);
1441 return 0;
1442}
1443
1444#define CR_PENDING_SUBCLASS 28
1445static int __inject_float_mchk(struct kvm *kvm,
1446 struct kvm_s390_interrupt_info *inti)
1447{
1448 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1449
1450 spin_lock(&fi->lock);
1451 fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS);
1452 fi->mchk.mcic |= inti->mchk.mcic;
1453 set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs);
1454 spin_unlock(&fi->lock);
1455 kfree(inti);
1456 return 0;
1457}
1458
1459static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
fa6b7fe9
CH
1460{
1461 struct kvm_s390_float_interrupt *fi;
6d3da241
JF
1462 struct list_head *list;
1463 int isc;
fa6b7fe9 1464
fa6b7fe9
CH
1465 fi = &kvm->arch.float_int;
1466 spin_lock(&fi->lock);
6d3da241
JF
1467 if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) {
1468 spin_unlock(&fi->lock);
1469 return -EBUSY;
a91b8ebe 1470 }
6d3da241
JF
1471 fi->counters[FIRQ_CNTR_IO] += 1;
1472
dcc98ea6
CB
1473 if (inti->type & KVM_S390_INT_IO_AI_MASK)
1474 VM_EVENT(kvm, 4, "%s", "inject: I/O (AI)");
1475 else
1476 VM_EVENT(kvm, 4, "inject: I/O %x ss %x schid %04x",
1477 inti->io.subchannel_id >> 8,
1478 inti->io.subchannel_id >> 1 & 0x3,
1479 inti->io.subchannel_nr);
6d3da241
JF
1480 isc = int_word_to_isc(inti->io.io_int_word);
1481 list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1482 list_add_tail(&inti->list, list);
1483 set_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs);
fa6b7fe9 1484 spin_unlock(&fi->lock);
6d3da241 1485 return 0;
fa6b7fe9 1486}
ba5c1e9b 1487
96e0ed23
DH
1488/*
1489 * Find a destination VCPU for a floating irq and kick it.
1490 */
1491static void __floating_irq_kick(struct kvm *kvm, u64 type)
ba5c1e9b 1492{
96e0ed23 1493 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
180c12fb 1494 struct kvm_s390_local_interrupt *li;
96e0ed23
DH
1495 struct kvm_vcpu *dst_vcpu;
1496 int sigcpu, online_vcpus, nr_tries = 0;
1497
1498 online_vcpus = atomic_read(&kvm->online_vcpus);
1499 if (!online_vcpus)
1500 return;
1501
1502 /* find idle VCPUs first, then round robin */
1503 sigcpu = find_first_bit(fi->idle_mask, online_vcpus);
1504 if (sigcpu == online_vcpus) {
1505 do {
1506 sigcpu = fi->next_rr_cpu;
1507 fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus;
1508 /* avoid endless loops if all vcpus are stopped */
1509 if (nr_tries++ >= online_vcpus)
1510 return;
1511 } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu)));
1512 }
1513 dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1514
1515 /* make the VCPU drop out of the SIE, or wake it up if sleeping */
1516 li = &dst_vcpu->arch.local_int;
1517 spin_lock(&li->lock);
1518 switch (type) {
1519 case KVM_S390_MCHK:
805de8f4 1520 atomic_or(CPUSTAT_STOP_INT, li->cpuflags);
96e0ed23
DH
1521 break;
1522 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
805de8f4 1523 atomic_or(CPUSTAT_IO_INT, li->cpuflags);
96e0ed23
DH
1524 break;
1525 default:
805de8f4 1526 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
96e0ed23
DH
1527 break;
1528 }
1529 spin_unlock(&li->lock);
1530 kvm_s390_vcpu_wakeup(dst_vcpu);
1531}
1532
1533static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1534{
6d3da241
JF
1535 u64 type = READ_ONCE(inti->type);
1536 int rc;
ba5c1e9b 1537
6d3da241
JF
1538 switch (type) {
1539 case KVM_S390_MCHK:
1540 rc = __inject_float_mchk(kvm, inti);
1541 break;
1542 case KVM_S390_INT_VIRTIO:
1543 rc = __inject_virtio(kvm, inti);
1544 break;
1545 case KVM_S390_INT_SERVICE:
1546 rc = __inject_service(kvm, inti);
1547 break;
1548 case KVM_S390_INT_PFAULT_DONE:
1549 rc = __inject_pfault_done(kvm, inti);
1550 break;
1551 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1552 rc = __inject_io(kvm, inti);
1553 break;
1554 default:
a91b8ebe 1555 rc = -EINVAL;
c05c4186 1556 }
6d3da241
JF
1557 if (rc)
1558 return rc;
1559
96e0ed23 1560 __floating_irq_kick(kvm, type);
6d3da241 1561 return 0;
c05c4186
JF
1562}
1563
1564int kvm_s390_inject_vm(struct kvm *kvm,
1565 struct kvm_s390_interrupt *s390int)
1566{
1567 struct kvm_s390_interrupt_info *inti;
428d53be 1568 int rc;
c05c4186 1569
ba5c1e9b
CO
1570 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1571 if (!inti)
1572 return -ENOMEM;
1573
c05c4186
JF
1574 inti->type = s390int->type;
1575 switch (inti->type) {
ba5c1e9b 1576 case KVM_S390_INT_VIRTIO:
33e19115 1577 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
ba5c1e9b 1578 s390int->parm, s390int->parm64);
ba5c1e9b
CO
1579 inti->ext.ext_params = s390int->parm;
1580 inti->ext.ext_params2 = s390int->parm64;
1581 break;
1582 case KVM_S390_INT_SERVICE:
3f24ba15 1583 VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm);
ba5c1e9b
CO
1584 inti->ext.ext_params = s390int->parm;
1585 break;
3c038e6b 1586 case KVM_S390_INT_PFAULT_DONE:
3c038e6b
DD
1587 inti->ext.ext_params2 = s390int->parm64;
1588 break;
48a3e950 1589 case KVM_S390_MCHK:
3f24ba15 1590 VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx",
48a3e950 1591 s390int->parm64);
48a3e950
CH
1592 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
1593 inti->mchk.mcic = s390int->parm64;
1594 break;
d8346b7d 1595 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
d8346b7d
CH
1596 inti->io.subchannel_id = s390int->parm >> 16;
1597 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
1598 inti->io.io_int_parm = s390int->parm64 >> 32;
1599 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
1600 break;
ba5c1e9b
CO
1601 default:
1602 kfree(inti);
1603 return -EINVAL;
1604 }
ade38c31
CH
1605 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
1606 2);
ba5c1e9b 1607
428d53be
DH
1608 rc = __inject_vm(kvm, inti);
1609 if (rc)
1610 kfree(inti);
1611 return rc;
ba5c1e9b
CO
1612}
1613
15462e37 1614int kvm_s390_reinject_io_int(struct kvm *kvm,
2f32d4ea
CH
1615 struct kvm_s390_interrupt_info *inti)
1616{
15462e37 1617 return __inject_vm(kvm, inti);
2f32d4ea
CH
1618}
1619
383d0b05
JF
1620int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
1621 struct kvm_s390_irq *irq)
1622{
1623 irq->type = s390int->type;
1624 switch (irq->type) {
1625 case KVM_S390_PROGRAM_INT:
1626 if (s390int->parm & 0xffff0000)
1627 return -EINVAL;
1628 irq->u.pgm.code = s390int->parm;
1629 break;
1630 case KVM_S390_SIGP_SET_PREFIX:
1631 irq->u.prefix.address = s390int->parm;
1632 break;
2822545f
DH
1633 case KVM_S390_SIGP_STOP:
1634 irq->u.stop.flags = s390int->parm;
1635 break;
383d0b05 1636 case KVM_S390_INT_EXTERNAL_CALL:
94d1f564 1637 if (s390int->parm & 0xffff0000)
383d0b05
JF
1638 return -EINVAL;
1639 irq->u.extcall.code = s390int->parm;
1640 break;
1641 case KVM_S390_INT_EMERGENCY:
94d1f564 1642 if (s390int->parm & 0xffff0000)
383d0b05
JF
1643 return -EINVAL;
1644 irq->u.emerg.code = s390int->parm;
1645 break;
1646 case KVM_S390_MCHK:
1647 irq->u.mchk.mcic = s390int->parm64;
1648 break;
1649 }
1650 return 0;
1651}
1652
6cddd432
DH
1653int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
1654{
1655 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1656
1657 return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1658}
1659
1660void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
1661{
1662 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1663
1664 spin_lock(&li->lock);
1665 li->irq.stop.flags = 0;
1666 clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1667 spin_unlock(&li->lock);
1668}
1669
79e87a10 1670static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
ba5c1e9b 1671{
0146a7b0 1672 int rc;
ba5c1e9b 1673
383d0b05 1674 switch (irq->type) {
ba5c1e9b 1675 case KVM_S390_PROGRAM_INT:
383d0b05 1676 rc = __inject_prog(vcpu, irq);
ba5c1e9b 1677 break;
b7e6e4d3 1678 case KVM_S390_SIGP_SET_PREFIX:
383d0b05 1679 rc = __inject_set_prefix(vcpu, irq);
b7e6e4d3 1680 break;
ba5c1e9b 1681 case KVM_S390_SIGP_STOP:
383d0b05 1682 rc = __inject_sigp_stop(vcpu, irq);
0146a7b0 1683 break;
ba5c1e9b 1684 case KVM_S390_RESTART:
383d0b05 1685 rc = __inject_sigp_restart(vcpu, irq);
0146a7b0 1686 break;
e029ae5b 1687 case KVM_S390_INT_CLOCK_COMP:
383d0b05 1688 rc = __inject_ckc(vcpu);
0146a7b0 1689 break;
e029ae5b 1690 case KVM_S390_INT_CPU_TIMER:
383d0b05 1691 rc = __inject_cpu_timer(vcpu);
82a12737 1692 break;
7697e71f 1693 case KVM_S390_INT_EXTERNAL_CALL:
383d0b05 1694 rc = __inject_extcall(vcpu, irq);
82a12737 1695 break;
ba5c1e9b 1696 case KVM_S390_INT_EMERGENCY:
383d0b05 1697 rc = __inject_sigp_emergency(vcpu, irq);
ba5c1e9b 1698 break;
48a3e950 1699 case KVM_S390_MCHK:
383d0b05 1700 rc = __inject_mchk(vcpu, irq);
48a3e950 1701 break;
3c038e6b 1702 case KVM_S390_INT_PFAULT_INIT:
383d0b05 1703 rc = __inject_pfault_init(vcpu, irq);
3c038e6b 1704 break;
ba5c1e9b
CO
1705 case KVM_S390_INT_VIRTIO:
1706 case KVM_S390_INT_SERVICE:
d8346b7d 1707 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
ba5c1e9b 1708 default:
0146a7b0 1709 rc = -EINVAL;
ba5c1e9b 1710 }
79e87a10
JF
1711
1712 return rc;
1713}
1714
1715int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1716{
1717 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1718 int rc;
1719
1720 spin_lock(&li->lock);
1721 rc = do_inject_vcpu(vcpu, irq);
4ae3c081 1722 spin_unlock(&li->lock);
0146a7b0
JF
1723 if (!rc)
1724 kvm_s390_vcpu_wakeup(vcpu);
0146a7b0 1725 return rc;
ba5c1e9b 1726}
c05c4186 1727
6d3da241 1728static inline void clear_irq_list(struct list_head *_list)
c05c4186 1729{
6d3da241 1730 struct kvm_s390_interrupt_info *inti, *n;
c05c4186 1731
6d3da241 1732 list_for_each_entry_safe(inti, n, _list, list) {
c05c4186
JF
1733 list_del(&inti->list);
1734 kfree(inti);
1735 }
c05c4186
JF
1736}
1737
94aa033e
JF
1738static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
1739 struct kvm_s390_irq *irq)
c05c4186 1740{
94aa033e 1741 irq->type = inti->type;
c05c4186 1742 switch (inti->type) {
3c038e6b
DD
1743 case KVM_S390_INT_PFAULT_INIT:
1744 case KVM_S390_INT_PFAULT_DONE:
c05c4186 1745 case KVM_S390_INT_VIRTIO:
94aa033e 1746 irq->u.ext = inti->ext;
c05c4186
JF
1747 break;
1748 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
94aa033e 1749 irq->u.io = inti->io;
c05c4186 1750 break;
c05c4186 1751 }
c05c4186
JF
1752}
1753
6d3da241
JF
1754void kvm_s390_clear_float_irqs(struct kvm *kvm)
1755{
1756 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1757 int i;
1758
1759 spin_lock(&fi->lock);
f2ae45ed
JF
1760 fi->pending_irqs = 0;
1761 memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
1762 memset(&fi->mchk, 0, sizeof(fi->mchk));
6d3da241
JF
1763 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1764 clear_irq_list(&fi->lists[i]);
1765 for (i = 0; i < FIRQ_MAX_COUNT; i++)
1766 fi->counters[i] = 0;
1767 spin_unlock(&fi->lock);
1768};
1769
94aa033e 1770static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
c05c4186
JF
1771{
1772 struct kvm_s390_interrupt_info *inti;
1773 struct kvm_s390_float_interrupt *fi;
94aa033e 1774 struct kvm_s390_irq *buf;
6d3da241 1775 struct kvm_s390_irq *irq;
94aa033e 1776 int max_irqs;
c05c4186
JF
1777 int ret = 0;
1778 int n = 0;
6d3da241 1779 int i;
c05c4186 1780
94aa033e
JF
1781 if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
1782 return -EINVAL;
1783
1784 /*
1785 * We are already using -ENOMEM to signal
1786 * userspace it may retry with a bigger buffer,
1787 * so we need to use something else for this case
1788 */
1789 buf = vzalloc(len);
1790 if (!buf)
1791 return -ENOBUFS;
1792
1793 max_irqs = len / sizeof(struct kvm_s390_irq);
1794
c05c4186
JF
1795 fi = &kvm->arch.float_int;
1796 spin_lock(&fi->lock);
6d3da241
JF
1797 for (i = 0; i < FIRQ_LIST_COUNT; i++) {
1798 list_for_each_entry(inti, &fi->lists[i], list) {
1799 if (n == max_irqs) {
1800 /* signal userspace to try again */
1801 ret = -ENOMEM;
1802 goto out;
1803 }
1804 inti_to_irq(inti, &buf[n]);
1805 n++;
1806 }
1807 }
1808 if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) {
94aa033e 1809 if (n == max_irqs) {
c05c4186
JF
1810 /* signal userspace to try again */
1811 ret = -ENOMEM;
6d3da241 1812 goto out;
c05c4186 1813 }
6d3da241
JF
1814 irq = (struct kvm_s390_irq *) &buf[n];
1815 irq->type = KVM_S390_INT_SERVICE;
1816 irq->u.ext = fi->srv_signal;
c05c4186
JF
1817 n++;
1818 }
6d3da241
JF
1819 if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
1820 if (n == max_irqs) {
1821 /* signal userspace to try again */
1822 ret = -ENOMEM;
1823 goto out;
1824 }
1825 irq = (struct kvm_s390_irq *) &buf[n];
1826 irq->type = KVM_S390_MCHK;
1827 irq->u.mchk = fi->mchk;
1828 n++;
1829}
1830
1831out:
c05c4186 1832 spin_unlock(&fi->lock);
94aa033e
JF
1833 if (!ret && n > 0) {
1834 if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
1835 ret = -EFAULT;
1836 }
1837 vfree(buf);
c05c4186
JF
1838
1839 return ret < 0 ? ret : n;
1840}
1841
1842static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1843{
1844 int r;
1845
1846 switch (attr->group) {
1847 case KVM_DEV_FLIC_GET_ALL_IRQS:
94aa033e 1848 r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
c05c4186
JF
1849 attr->attr);
1850 break;
1851 default:
1852 r = -EINVAL;
1853 }
1854
1855 return r;
1856}
1857
1858static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
1859 u64 addr)
1860{
1861 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1862 void *target = NULL;
1863 void __user *source;
1864 u64 size;
1865
1866 if (get_user(inti->type, (u64 __user *)addr))
1867 return -EFAULT;
1868
1869 switch (inti->type) {
3c038e6b
DD
1870 case KVM_S390_INT_PFAULT_INIT:
1871 case KVM_S390_INT_PFAULT_DONE:
c05c4186
JF
1872 case KVM_S390_INT_VIRTIO:
1873 case KVM_S390_INT_SERVICE:
1874 target = (void *) &inti->ext;
1875 source = &uptr->u.ext;
1876 size = sizeof(inti->ext);
1877 break;
1878 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1879 target = (void *) &inti->io;
1880 source = &uptr->u.io;
1881 size = sizeof(inti->io);
1882 break;
1883 case KVM_S390_MCHK:
1884 target = (void *) &inti->mchk;
1885 source = &uptr->u.mchk;
1886 size = sizeof(inti->mchk);
1887 break;
1888 default:
1889 return -EINVAL;
1890 }
1891
1892 if (copy_from_user(target, source, size))
1893 return -EFAULT;
1894
1895 return 0;
1896}
1897
1898static int enqueue_floating_irq(struct kvm_device *dev,
1899 struct kvm_device_attr *attr)
1900{
1901 struct kvm_s390_interrupt_info *inti = NULL;
1902 int r = 0;
1903 int len = attr->attr;
1904
1905 if (len % sizeof(struct kvm_s390_irq) != 0)
1906 return -EINVAL;
1907 else if (len > KVM_S390_FLIC_MAX_BUFFER)
1908 return -EINVAL;
1909
1910 while (len >= sizeof(struct kvm_s390_irq)) {
1911 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1912 if (!inti)
1913 return -ENOMEM;
1914
1915 r = copy_irq_from_user(inti, attr->addr);
1916 if (r) {
1917 kfree(inti);
1918 return r;
1919 }
a91b8ebe
JF
1920 r = __inject_vm(dev->kvm, inti);
1921 if (r) {
1922 kfree(inti);
1923 return r;
1924 }
c05c4186
JF
1925 len -= sizeof(struct kvm_s390_irq);
1926 attr->addr += sizeof(struct kvm_s390_irq);
1927 }
1928
1929 return r;
1930}
1931
841b91c5
CH
1932static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
1933{
1934 if (id >= MAX_S390_IO_ADAPTERS)
1935 return NULL;
1936 return kvm->arch.adapters[id];
1937}
1938
1939static int register_io_adapter(struct kvm_device *dev,
1940 struct kvm_device_attr *attr)
1941{
1942 struct s390_io_adapter *adapter;
1943 struct kvm_s390_io_adapter adapter_info;
1944
1945 if (copy_from_user(&adapter_info,
1946 (void __user *)attr->addr, sizeof(adapter_info)))
1947 return -EFAULT;
1948
1949 if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
1950 (dev->kvm->arch.adapters[adapter_info.id] != NULL))
1951 return -EINVAL;
1952
1953 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
1954 if (!adapter)
1955 return -ENOMEM;
1956
1957 INIT_LIST_HEAD(&adapter->maps);
1958 init_rwsem(&adapter->maps_lock);
1959 atomic_set(&adapter->nr_maps, 0);
1960 adapter->id = adapter_info.id;
1961 adapter->isc = adapter_info.isc;
1962 adapter->maskable = adapter_info.maskable;
1963 adapter->masked = false;
1964 adapter->swap = adapter_info.swap;
1965 dev->kvm->arch.adapters[adapter->id] = adapter;
1966
1967 return 0;
1968}
1969
1970int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
1971{
1972 int ret;
1973 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1974
1975 if (!adapter || !adapter->maskable)
1976 return -EINVAL;
1977 ret = adapter->masked;
1978 adapter->masked = masked;
1979 return ret;
1980}
1981
1982static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
1983{
1984 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1985 struct s390_map_info *map;
1986 int ret;
1987
1988 if (!adapter || !addr)
1989 return -EINVAL;
1990
1991 map = kzalloc(sizeof(*map), GFP_KERNEL);
1992 if (!map) {
1993 ret = -ENOMEM;
1994 goto out;
1995 }
1996 INIT_LIST_HEAD(&map->list);
1997 map->guest_addr = addr;
6e0a0431 1998 map->addr = gmap_translate(kvm->arch.gmap, addr);
841b91c5
CH
1999 if (map->addr == -EFAULT) {
2000 ret = -EFAULT;
2001 goto out;
2002 }
2003 ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
2004 if (ret < 0)
2005 goto out;
2006 BUG_ON(ret != 1);
2007 down_write(&adapter->maps_lock);
2008 if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
2009 list_add_tail(&map->list, &adapter->maps);
2010 ret = 0;
2011 } else {
2012 put_page(map->page);
2013 ret = -EINVAL;
2014 }
2015 up_write(&adapter->maps_lock);
2016out:
2017 if (ret)
2018 kfree(map);
2019 return ret;
2020}
2021
2022static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
2023{
2024 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2025 struct s390_map_info *map, *tmp;
2026 int found = 0;
2027
2028 if (!adapter || !addr)
2029 return -EINVAL;
2030
2031 down_write(&adapter->maps_lock);
2032 list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
2033 if (map->guest_addr == addr) {
2034 found = 1;
2035 atomic_dec(&adapter->nr_maps);
2036 list_del(&map->list);
2037 put_page(map->page);
2038 kfree(map);
2039 break;
2040 }
2041 }
2042 up_write(&adapter->maps_lock);
2043
2044 return found ? 0 : -EINVAL;
2045}
2046
2047void kvm_s390_destroy_adapters(struct kvm *kvm)
2048{
2049 int i;
2050 struct s390_map_info *map, *tmp;
2051
2052 for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
2053 if (!kvm->arch.adapters[i])
2054 continue;
2055 list_for_each_entry_safe(map, tmp,
2056 &kvm->arch.adapters[i]->maps, list) {
2057 list_del(&map->list);
2058 put_page(map->page);
2059 kfree(map);
2060 }
2061 kfree(kvm->arch.adapters[i]);
2062 }
2063}
2064
2065static int modify_io_adapter(struct kvm_device *dev,
2066 struct kvm_device_attr *attr)
2067{
2068 struct kvm_s390_io_adapter_req req;
2069 struct s390_io_adapter *adapter;
2070 int ret;
2071
2072 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
2073 return -EFAULT;
2074
2075 adapter = get_io_adapter(dev->kvm, req.id);
2076 if (!adapter)
2077 return -EINVAL;
2078 switch (req.type) {
2079 case KVM_S390_IO_ADAPTER_MASK:
2080 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
2081 if (ret > 0)
2082 ret = 0;
2083 break;
2084 case KVM_S390_IO_ADAPTER_MAP:
2085 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
2086 break;
2087 case KVM_S390_IO_ADAPTER_UNMAP:
2088 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
2089 break;
2090 default:
2091 ret = -EINVAL;
2092 }
2093
2094 return ret;
2095}
2096
6d28f789
HP
2097static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr)
2098
2099{
2100 const u64 isc_mask = 0xffUL << 24; /* all iscs set */
2101 u32 schid;
2102
2103 if (attr->flags)
2104 return -EINVAL;
2105 if (attr->attr != sizeof(schid))
2106 return -EINVAL;
2107 if (copy_from_user(&schid, (void __user *) attr->addr, sizeof(schid)))
2108 return -EFAULT;
2109 kfree(kvm_s390_get_io_int(kvm, isc_mask, schid));
2110 /*
2111 * If userspace is conforming to the architecture, we can have at most
2112 * one pending I/O interrupt per subchannel, so this is effectively a
2113 * clear all.
2114 */
2115 return 0;
2116}
2117
c05c4186
JF
2118static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2119{
2120 int r = 0;
3c038e6b
DD
2121 unsigned int i;
2122 struct kvm_vcpu *vcpu;
c05c4186
JF
2123
2124 switch (attr->group) {
2125 case KVM_DEV_FLIC_ENQUEUE:
2126 r = enqueue_floating_irq(dev, attr);
2127 break;
2128 case KVM_DEV_FLIC_CLEAR_IRQS:
67335e63 2129 kvm_s390_clear_float_irqs(dev->kvm);
c05c4186 2130 break;
3c038e6b
DD
2131 case KVM_DEV_FLIC_APF_ENABLE:
2132 dev->kvm->arch.gmap->pfault_enabled = 1;
2133 break;
2134 case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2135 dev->kvm->arch.gmap->pfault_enabled = 0;
2136 /*
2137 * Make sure no async faults are in transition when
2138 * clearing the queues. So we don't need to worry
2139 * about late coming workers.
2140 */
2141 synchronize_srcu(&dev->kvm->srcu);
2142 kvm_for_each_vcpu(i, vcpu, dev->kvm)
2143 kvm_clear_async_pf_completion_queue(vcpu);
2144 break;
841b91c5
CH
2145 case KVM_DEV_FLIC_ADAPTER_REGISTER:
2146 r = register_io_adapter(dev, attr);
2147 break;
2148 case KVM_DEV_FLIC_ADAPTER_MODIFY:
2149 r = modify_io_adapter(dev, attr);
2150 break;
6d28f789
HP
2151 case KVM_DEV_FLIC_CLEAR_IO_IRQ:
2152 r = clear_io_irq(dev->kvm, attr);
2153 break;
c05c4186
JF
2154 default:
2155 r = -EINVAL;
2156 }
2157
2158 return r;
2159}
2160
4f129858
HP
2161static int flic_has_attr(struct kvm_device *dev,
2162 struct kvm_device_attr *attr)
2163{
2164 switch (attr->group) {
2165 case KVM_DEV_FLIC_GET_ALL_IRQS:
2166 case KVM_DEV_FLIC_ENQUEUE:
2167 case KVM_DEV_FLIC_CLEAR_IRQS:
2168 case KVM_DEV_FLIC_APF_ENABLE:
2169 case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2170 case KVM_DEV_FLIC_ADAPTER_REGISTER:
2171 case KVM_DEV_FLIC_ADAPTER_MODIFY:
6d28f789 2172 case KVM_DEV_FLIC_CLEAR_IO_IRQ:
4f129858
HP
2173 return 0;
2174 }
2175 return -ENXIO;
2176}
2177
c05c4186
JF
2178static int flic_create(struct kvm_device *dev, u32 type)
2179{
2180 if (!dev)
2181 return -EINVAL;
2182 if (dev->kvm->arch.flic)
2183 return -EINVAL;
2184 dev->kvm->arch.flic = dev;
2185 return 0;
2186}
2187
2188static void flic_destroy(struct kvm_device *dev)
2189{
2190 dev->kvm->arch.flic = NULL;
2191 kfree(dev);
2192}
2193
2194/* s390 floating irq controller (flic) */
2195struct kvm_device_ops kvm_flic_ops = {
2196 .name = "kvm-flic",
2197 .get_attr = flic_get_attr,
2198 .set_attr = flic_set_attr,
4f129858 2199 .has_attr = flic_has_attr,
c05c4186
JF
2200 .create = flic_create,
2201 .destroy = flic_destroy,
2202};
84223598
CH
2203
2204static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
2205{
2206 unsigned long bit;
2207
2208 bit = bit_nr + (addr % PAGE_SIZE) * 8;
2209
2210 return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
2211}
2212
2213static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
2214 u64 addr)
2215{
2216 struct s390_map_info *map;
2217
2218 if (!adapter)
2219 return NULL;
2220
2221 list_for_each_entry(map, &adapter->maps, list) {
2222 if (map->guest_addr == addr)
2223 return map;
2224 }
2225 return NULL;
2226}
2227
2228static int adapter_indicators_set(struct kvm *kvm,
2229 struct s390_io_adapter *adapter,
2230 struct kvm_s390_adapter_int *adapter_int)
2231{
2232 unsigned long bit;
2233 int summary_set, idx;
2234 struct s390_map_info *info;
2235 void *map;
2236
2237 info = get_map_info(adapter, adapter_int->ind_addr);
2238 if (!info)
2239 return -1;
2240 map = page_address(info->page);
2241 bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
2242 set_bit(bit, map);
2243 idx = srcu_read_lock(&kvm->srcu);
2244 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
2245 set_page_dirty_lock(info->page);
2246 info = get_map_info(adapter, adapter_int->summary_addr);
2247 if (!info) {
2248 srcu_read_unlock(&kvm->srcu, idx);
2249 return -1;
2250 }
2251 map = page_address(info->page);
2252 bit = get_ind_bit(info->addr, adapter_int->summary_offset,
2253 adapter->swap);
2254 summary_set = test_and_set_bit(bit, map);
2255 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
2256 set_page_dirty_lock(info->page);
2257 srcu_read_unlock(&kvm->srcu, idx);
2258 return summary_set ? 0 : 1;
2259}
2260
2261/*
2262 * < 0 - not injected due to error
2263 * = 0 - coalesced, summary indicator already active
2264 * > 0 - injected interrupt
2265 */
2266static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
2267 struct kvm *kvm, int irq_source_id, int level,
2268 bool line_status)
2269{
2270 int ret;
2271 struct s390_io_adapter *adapter;
2272
2273 /* We're only interested in the 0->1 transition. */
2274 if (!level)
2275 return 0;
2276 adapter = get_io_adapter(kvm, e->adapter.adapter_id);
2277 if (!adapter)
2278 return -1;
2279 down_read(&adapter->maps_lock);
2280 ret = adapter_indicators_set(kvm, adapter, &e->adapter);
2281 up_read(&adapter->maps_lock);
2282 if ((ret > 0) && !adapter->masked) {
2283 struct kvm_s390_interrupt s390int = {
2284 .type = KVM_S390_INT_IO(1, 0, 0, 0),
2285 .parm = 0,
2286 .parm64 = (adapter->isc << 27) | 0x80000000,
2287 };
2288 ret = kvm_s390_inject_vm(kvm, &s390int);
2289 if (ret == 0)
2290 ret = 1;
2291 }
2292 return ret;
2293}
2294
c63cf538
RK
2295int kvm_set_routing_entry(struct kvm *kvm,
2296 struct kvm_kernel_irq_routing_entry *e,
84223598
CH
2297 const struct kvm_irq_routing_entry *ue)
2298{
2299 int ret;
2300
2301 switch (ue->type) {
2302 case KVM_IRQ_ROUTING_S390_ADAPTER:
2303 e->set = set_adapter_int;
2304 e->adapter.summary_addr = ue->u.adapter.summary_addr;
2305 e->adapter.ind_addr = ue->u.adapter.ind_addr;
2306 e->adapter.summary_offset = ue->u.adapter.summary_offset;
2307 e->adapter.ind_offset = ue->u.adapter.ind_offset;
2308 e->adapter.adapter_id = ue->u.adapter.adapter_id;
2309 ret = 0;
2310 break;
2311 default:
2312 ret = -EINVAL;
2313 }
2314
2315 return ret;
2316}
2317
2318int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
2319 int irq_source_id, int level, bool line_status)
2320{
2321 return -EINVAL;
2322}
816c7667
JF
2323
2324int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len)
2325{
2326 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2327 struct kvm_s390_irq *buf;
2328 int r = 0;
2329 int n;
2330
2331 buf = vmalloc(len);
2332 if (!buf)
2333 return -ENOMEM;
2334
2335 if (copy_from_user((void *) buf, irqstate, len)) {
2336 r = -EFAULT;
2337 goto out_free;
2338 }
2339
2340 /*
2341 * Don't allow setting the interrupt state
2342 * when there are already interrupts pending
2343 */
2344 spin_lock(&li->lock);
2345 if (li->pending_irqs) {
2346 r = -EBUSY;
2347 goto out_unlock;
2348 }
2349
2350 for (n = 0; n < len / sizeof(*buf); n++) {
2351 r = do_inject_vcpu(vcpu, &buf[n]);
2352 if (r)
2353 break;
2354 }
2355
2356out_unlock:
2357 spin_unlock(&li->lock);
2358out_free:
2359 vfree(buf);
2360
2361 return r;
2362}
2363
2364static void store_local_irq(struct kvm_s390_local_interrupt *li,
2365 struct kvm_s390_irq *irq,
2366 unsigned long irq_type)
2367{
2368 switch (irq_type) {
2369 case IRQ_PEND_MCHK_EX:
2370 case IRQ_PEND_MCHK_REP:
2371 irq->type = KVM_S390_MCHK;
2372 irq->u.mchk = li->irq.mchk;
2373 break;
2374 case IRQ_PEND_PROG:
2375 irq->type = KVM_S390_PROGRAM_INT;
2376 irq->u.pgm = li->irq.pgm;
2377 break;
2378 case IRQ_PEND_PFAULT_INIT:
2379 irq->type = KVM_S390_INT_PFAULT_INIT;
2380 irq->u.ext = li->irq.ext;
2381 break;
2382 case IRQ_PEND_EXT_EXTERNAL:
2383 irq->type = KVM_S390_INT_EXTERNAL_CALL;
2384 irq->u.extcall = li->irq.extcall;
2385 break;
2386 case IRQ_PEND_EXT_CLOCK_COMP:
2387 irq->type = KVM_S390_INT_CLOCK_COMP;
2388 break;
2389 case IRQ_PEND_EXT_CPU_TIMER:
2390 irq->type = KVM_S390_INT_CPU_TIMER;
2391 break;
2392 case IRQ_PEND_SIGP_STOP:
2393 irq->type = KVM_S390_SIGP_STOP;
2394 irq->u.stop = li->irq.stop;
2395 break;
2396 case IRQ_PEND_RESTART:
2397 irq->type = KVM_S390_RESTART;
2398 break;
2399 case IRQ_PEND_SET_PREFIX:
2400 irq->type = KVM_S390_SIGP_SET_PREFIX;
2401 irq->u.prefix = li->irq.prefix;
2402 break;
2403 }
2404}
2405
2406int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
2407{
a5bd7647 2408 int scn;
816c7667
JF
2409 unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)];
2410 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2411 unsigned long pending_irqs;
2412 struct kvm_s390_irq irq;
2413 unsigned long irq_type;
2414 int cpuaddr;
2415 int n = 0;
2416
2417 spin_lock(&li->lock);
2418 pending_irqs = li->pending_irqs;
2419 memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending,
2420 sizeof(sigp_emerg_pending));
2421 spin_unlock(&li->lock);
2422
2423 for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) {
2424 memset(&irq, 0, sizeof(irq));
2425 if (irq_type == IRQ_PEND_EXT_EMERGENCY)
2426 continue;
2427 if (n + sizeof(irq) > len)
2428 return -ENOBUFS;
2429 store_local_irq(&vcpu->arch.local_int, &irq, irq_type);
2430 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2431 return -EFAULT;
2432 n += sizeof(irq);
2433 }
2434
2435 if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) {
2436 for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) {
2437 memset(&irq, 0, sizeof(irq));
2438 if (n + sizeof(irq) > len)
2439 return -ENOBUFS;
2440 irq.type = KVM_S390_INT_EMERGENCY;
2441 irq.u.emerg.code = cpuaddr;
2442 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2443 return -EFAULT;
2444 n += sizeof(irq);
2445 }
2446 }
2447
a5bd7647 2448 if (sca_ext_call_pending(vcpu, &scn)) {
816c7667
JF
2449 if (n + sizeof(irq) > len)
2450 return -ENOBUFS;
2451 memset(&irq, 0, sizeof(irq));
2452 irq.type = KVM_S390_INT_EXTERNAL_CALL;
a5bd7647 2453 irq.u.extcall.code = scn;
816c7667
JF
2454 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2455 return -EFAULT;
2456 n += sizeof(irq);
2457 }
2458
2459 return n;
2460}
This page took 0.611587 seconds and 5 git commands to generate.