KVM: s390: optimize float int lock: spin_lock_bh --> spin_lock
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 */
14
15#include <linux/compiler.h>
16#include <linux/err.h>
17#include <linux/fs.h>
ca872302 18#include <linux/hrtimer.h>
b0c632db
HC
19#include <linux/init.h>
20#include <linux/kvm.h>
21#include <linux/kvm_host.h>
22#include <linux/module.h>
23#include <linux/slab.h>
ba5c1e9b 24#include <linux/timer.h>
b0c632db
HC
25#include <asm/lowcore.h>
26#include <asm/pgtable.h>
f5daba1d 27#include <asm/nmi.h>
8f2abe6a 28#include "kvm-s390.h"
b0c632db
HC
29#include "gaccess.h"
30
31#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
32
33struct kvm_stats_debugfs_item debugfs_entries[] = {
34 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 35 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
36 { "exit_validity", VCPU_STAT(exit_validity) },
37 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
38 { "exit_external_request", VCPU_STAT(exit_external_request) },
39 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
40 { "exit_instruction", VCPU_STAT(exit_instruction) },
41 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
42 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 43 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
44 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
45 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
46 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
47 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
48 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
49 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
50 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
51 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
52 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
53 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
54 { "instruction_spx", VCPU_STAT(instruction_spx) },
55 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
56 { "instruction_stap", VCPU_STAT(instruction_stap) },
57 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
58 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
59 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
60 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
61 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
5288fbf0
CB
62 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
63 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
64 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
65 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
66 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
67 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
e28acfea 68 { "diagnose_44", VCPU_STAT(diagnose_44) },
b0c632db
HC
69 { NULL }
70};
71
72
73/* Section: not file related */
74void kvm_arch_hardware_enable(void *garbage)
75{
76 /* every s390 is virtualization enabled ;-) */
77}
78
79void kvm_arch_hardware_disable(void *garbage)
80{
81}
82
b0c632db
HC
83int kvm_arch_hardware_setup(void)
84{
85 return 0;
86}
87
88void kvm_arch_hardware_unsetup(void)
89{
90}
91
92void kvm_arch_check_processor_compat(void *rtn)
93{
94}
95
96int kvm_arch_init(void *opaque)
97{
98 return 0;
99}
100
101void kvm_arch_exit(void)
102{
103}
104
105/* Section: device related */
106long kvm_arch_dev_ioctl(struct file *filp,
107 unsigned int ioctl, unsigned long arg)
108{
109 if (ioctl == KVM_S390_ENABLE_SIE)
110 return s390_enable_sie();
111 return -EINVAL;
112}
113
114int kvm_dev_ioctl_check_extension(long ext)
115{
2bd0ac4e 116 switch (ext) {
2bd0ac4e
CO
117 default:
118 return 0;
119 }
b0c632db
HC
120}
121
122/* Section: vm related */
123/*
124 * Get (and clear) the dirty memory log for a memory slot.
125 */
126int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
127 struct kvm_dirty_log *log)
128{
129 return 0;
130}
131
132long kvm_arch_vm_ioctl(struct file *filp,
133 unsigned int ioctl, unsigned long arg)
134{
135 struct kvm *kvm = filp->private_data;
136 void __user *argp = (void __user *)arg;
137 int r;
138
139 switch (ioctl) {
ba5c1e9b
CO
140 case KVM_S390_INTERRUPT: {
141 struct kvm_s390_interrupt s390int;
142
143 r = -EFAULT;
144 if (copy_from_user(&s390int, argp, sizeof(s390int)))
145 break;
146 r = kvm_s390_inject_vm(kvm, &s390int);
147 break;
148 }
b0c632db
HC
149 default:
150 r = -EINVAL;
151 }
152
153 return r;
154}
155
156struct kvm *kvm_arch_create_vm(void)
157{
158 struct kvm *kvm;
159 int rc;
160 char debug_name[16];
161
162 rc = s390_enable_sie();
163 if (rc)
164 goto out_nokvm;
165
166 rc = -ENOMEM;
167 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
168 if (!kvm)
169 goto out_nokvm;
170
171 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
172 if (!kvm->arch.sca)
173 goto out_nosca;
174
175 sprintf(debug_name, "kvm-%u", current->pid);
176
177 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
178 if (!kvm->arch.dbf)
179 goto out_nodbf;
180
ba5c1e9b
CO
181 spin_lock_init(&kvm->arch.float_int.lock);
182 INIT_LIST_HEAD(&kvm->arch.float_int.list);
183
b0c632db
HC
184 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
185 VM_EVENT(kvm, 3, "%s", "vm created");
186
b0c632db
HC
187 return kvm;
188out_nodbf:
189 free_page((unsigned long)(kvm->arch.sca));
190out_nosca:
191 kfree(kvm);
192out_nokvm:
193 return ERR_PTR(rc);
194}
195
d329c035
CB
196void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
197{
198 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
199 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 200 kvm_vcpu_uninit(vcpu);
d329c035
CB
201 kfree(vcpu);
202}
203
204static void kvm_free_vcpus(struct kvm *kvm)
205{
206 unsigned int i;
207
208 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
209 if (kvm->vcpus[i]) {
210 kvm_arch_vcpu_destroy(kvm->vcpus[i]);
211 kvm->vcpus[i] = NULL;
212 }
213 }
214}
215
ad8ba2cd
SY
216void kvm_arch_sync_events(struct kvm *kvm)
217{
218}
219
b0c632db
HC
220void kvm_arch_destroy_vm(struct kvm *kvm)
221{
d329c035 222 kvm_free_vcpus(kvm);
dfdded7c 223 kvm_free_physmem(kvm);
b0c632db 224 free_page((unsigned long)(kvm->arch.sca));
d329c035 225 debug_unregister(kvm->arch.dbf);
b0c632db 226 kfree(kvm);
b0c632db
HC
227}
228
229/* Section: vcpu related */
230int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
231{
232 return 0;
233}
234
235void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
236{
6692cef3 237 /* Nothing todo */
b0c632db
HC
238}
239
240void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
241{
242 save_fp_regs(&vcpu->arch.host_fpregs);
243 save_access_regs(vcpu->arch.host_acrs);
244 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
245 restore_fp_regs(&vcpu->arch.guest_fpregs);
246 restore_access_regs(vcpu->arch.guest_acrs);
b0c632db
HC
247}
248
249void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
250{
251 save_fp_regs(&vcpu->arch.guest_fpregs);
252 save_access_regs(vcpu->arch.guest_acrs);
253 restore_fp_regs(&vcpu->arch.host_fpregs);
254 restore_access_regs(vcpu->arch.host_acrs);
255}
256
257static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
258{
259 /* this equals initial cpu reset in pop, but we don't switch to ESA */
260 vcpu->arch.sie_block->gpsw.mask = 0UL;
261 vcpu->arch.sie_block->gpsw.addr = 0UL;
262 vcpu->arch.sie_block->prefix = 0UL;
263 vcpu->arch.sie_block->ihcpu = 0xffff;
264 vcpu->arch.sie_block->cputm = 0UL;
265 vcpu->arch.sie_block->ckc = 0UL;
266 vcpu->arch.sie_block->todpr = 0;
267 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
268 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
269 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
270 vcpu->arch.guest_fpregs.fpc = 0;
271 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
272 vcpu->arch.sie_block->gbea = 1;
273}
274
4da29e90
CB
275/* The current code can have up to 256 pages for virtio */
276#define VIRTIODESCSPACE (256ul * 4096ul)
277
b0c632db
HC
278int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
279{
280 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
4da29e90
CB
281 vcpu->arch.sie_block->gmslm = vcpu->kvm->arch.guest_memsize +
282 vcpu->kvm->arch.guest_origin +
283 VIRTIODESCSPACE - 1ul;
284 vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin;
b0c632db
HC
285 vcpu->arch.sie_block->ecb = 2;
286 vcpu->arch.sie_block->eca = 0xC1002001U;
ca872302
CB
287 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
288 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
289 (unsigned long) vcpu);
290 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 291 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 292 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
293 return 0;
294}
295
296struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
297 unsigned int id)
298{
299 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
300 int rc = -ENOMEM;
301
302 if (!vcpu)
303 goto out_nomem;
304
180c12fb
CB
305 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
306 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
307
308 if (!vcpu->arch.sie_block)
309 goto out_free_cpu;
310
311 vcpu->arch.sie_block->icpua = id;
312 BUG_ON(!kvm->arch.sca);
313 BUG_ON(kvm->arch.sca->cpu[id].sda);
314 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
315 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
316 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
317
ba5c1e9b
CO
318 spin_lock_init(&vcpu->arch.local_int.lock);
319 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
320 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 321 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
322 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
323 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 324 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 325 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 326
b0c632db
HC
327 rc = kvm_vcpu_init(vcpu, kvm, id);
328 if (rc)
329 goto out_free_cpu;
330 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
331 vcpu->arch.sie_block);
332
b0c632db
HC
333 return vcpu;
334out_free_cpu:
335 kfree(vcpu);
336out_nomem:
337 return ERR_PTR(rc);
338}
339
b0c632db
HC
340int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
341{
342 /* kvm common code refers to this, but never calls it */
343 BUG();
344 return 0;
345}
346
347static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
348{
349 vcpu_load(vcpu);
350 kvm_s390_vcpu_initial_reset(vcpu);
351 vcpu_put(vcpu);
352 return 0;
353}
354
355int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
356{
357 vcpu_load(vcpu);
358 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
359 vcpu_put(vcpu);
360 return 0;
361}
362
363int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
364{
365 vcpu_load(vcpu);
366 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
367 vcpu_put(vcpu);
368 return 0;
369}
370
371int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
372 struct kvm_sregs *sregs)
373{
374 vcpu_load(vcpu);
375 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
376 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
377 vcpu_put(vcpu);
378 return 0;
379}
380
381int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
382 struct kvm_sregs *sregs)
383{
384 vcpu_load(vcpu);
385 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
386 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
387 vcpu_put(vcpu);
388 return 0;
389}
390
391int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
392{
393 vcpu_load(vcpu);
394 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
395 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
396 vcpu_put(vcpu);
397 return 0;
398}
399
400int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
401{
402 vcpu_load(vcpu);
403 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
404 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
405 vcpu_put(vcpu);
406 return 0;
407}
408
409static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
410{
411 int rc = 0;
412
413 vcpu_load(vcpu);
414 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
415 rc = -EBUSY;
416 else
417 vcpu->arch.sie_block->gpsw = psw;
418 vcpu_put(vcpu);
419 return rc;
420}
421
422int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
423 struct kvm_translation *tr)
424{
425 return -EINVAL; /* not implemented yet */
426}
427
d0bfb940
JK
428int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
429 struct kvm_guest_debug *dbg)
b0c632db
HC
430{
431 return -EINVAL; /* not implemented yet */
432}
433
62d9f0db
MT
434int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
435 struct kvm_mp_state *mp_state)
436{
437 return -EINVAL; /* not implemented yet */
438}
439
440int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
441 struct kvm_mp_state *mp_state)
442{
443 return -EINVAL; /* not implemented yet */
444}
445
b0c632db
HC
446static void __vcpu_run(struct kvm_vcpu *vcpu)
447{
448 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
449
450 if (need_resched())
451 schedule();
452
71cde587
CB
453 if (test_thread_flag(TIF_MCCK_PENDING))
454 s390_handle_mcck();
455
0ff31867
CO
456 kvm_s390_deliver_pending_interrupts(vcpu);
457
b0c632db
HC
458 vcpu->arch.sie_block->icptcode = 0;
459 local_irq_disable();
460 kvm_guest_enter();
461 local_irq_enable();
462 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
463 atomic_read(&vcpu->arch.sie_block->cpuflags));
1f0d0f09
CO
464 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
465 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
466 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
467 }
b0c632db
HC
468 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
469 vcpu->arch.sie_block->icptcode);
470 local_irq_disable();
471 kvm_guest_exit();
472 local_irq_enable();
473
474 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
475}
476
477int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
478{
8f2abe6a 479 int rc;
b0c632db
HC
480 sigset_t sigsaved;
481
482 vcpu_load(vcpu);
483
484 if (vcpu->sigset_active)
485 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
486
487 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
488
ba5c1e9b
CO
489 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
490
8f2abe6a
CB
491 switch (kvm_run->exit_reason) {
492 case KVM_EXIT_S390_SIEIC:
493 vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
494 vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
495 break;
496 case KVM_EXIT_UNKNOWN:
497 case KVM_EXIT_S390_RESET:
498 break;
499 default:
500 BUG();
501 }
502
503 might_sleep();
504
505 do {
506 __vcpu_run(vcpu);
8f2abe6a
CB
507 rc = kvm_handle_sie_intercept(vcpu);
508 } while (!signal_pending(current) && !rc);
509
510 if (signal_pending(current) && !rc)
511 rc = -EINTR;
512
513 if (rc == -ENOTSUPP) {
514 /* intercept cannot be handled in-kernel, prepare kvm-run */
515 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
516 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
517 kvm_run->s390_sieic.mask = vcpu->arch.sie_block->gpsw.mask;
518 kvm_run->s390_sieic.addr = vcpu->arch.sie_block->gpsw.addr;
519 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
520 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
521 rc = 0;
522 }
523
524 if (rc == -EREMOTE) {
525 /* intercept was handled, but userspace support is needed
526 * kvm_run has been prepared by the handler */
527 rc = 0;
528 }
b0c632db
HC
529
530 if (vcpu->sigset_active)
531 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
532
533 vcpu_put(vcpu);
534
535 vcpu->stat.exit_userspace++;
7e8e6ab4 536 return rc;
b0c632db
HC
537}
538
539static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
540 unsigned long n, int prefix)
541{
542 if (prefix)
543 return copy_to_guest(vcpu, guestdest, from, n);
544 else
545 return copy_to_guest_absolute(vcpu, guestdest, from, n);
546}
547
548/*
549 * store status at address
550 * we use have two special cases:
551 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
552 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
553 */
554int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
555{
556 const unsigned char archmode = 1;
557 int prefix;
558
559 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
560 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
561 return -EFAULT;
562 addr = SAVE_AREA_BASE;
563 prefix = 0;
564 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
565 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
566 return -EFAULT;
567 addr = SAVE_AREA_BASE;
568 prefix = 1;
569 } else
570 prefix = 0;
571
572 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
573 vcpu->arch.guest_fpregs.fprs, 128, prefix))
574 return -EFAULT;
575
576 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
577 vcpu->arch.guest_gprs, 128, prefix))
578 return -EFAULT;
579
580 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
581 &vcpu->arch.sie_block->gpsw, 16, prefix))
582 return -EFAULT;
583
584 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
585 &vcpu->arch.sie_block->prefix, 4, prefix))
586 return -EFAULT;
587
588 if (__guestcopy(vcpu,
589 addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
590 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
591 return -EFAULT;
592
593 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
594 &vcpu->arch.sie_block->todpr, 4, prefix))
595 return -EFAULT;
596
597 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
598 &vcpu->arch.sie_block->cputm, 8, prefix))
599 return -EFAULT;
600
601 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
602 &vcpu->arch.sie_block->ckc, 8, prefix))
603 return -EFAULT;
604
605 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
606 &vcpu->arch.guest_acrs, 64, prefix))
607 return -EFAULT;
608
609 if (__guestcopy(vcpu,
610 addr + offsetof(struct save_area_s390x, ctrl_regs),
611 &vcpu->arch.sie_block->gcr, 128, prefix))
612 return -EFAULT;
613 return 0;
614}
615
616static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
617{
618 int rc;
619
620 vcpu_load(vcpu);
621 rc = __kvm_s390_vcpu_store_status(vcpu, addr);
622 vcpu_put(vcpu);
623 return rc;
624}
625
626long kvm_arch_vcpu_ioctl(struct file *filp,
627 unsigned int ioctl, unsigned long arg)
628{
629 struct kvm_vcpu *vcpu = filp->private_data;
630 void __user *argp = (void __user *)arg;
631
632 switch (ioctl) {
ba5c1e9b
CO
633 case KVM_S390_INTERRUPT: {
634 struct kvm_s390_interrupt s390int;
635
636 if (copy_from_user(&s390int, argp, sizeof(s390int)))
637 return -EFAULT;
638 return kvm_s390_inject_vcpu(vcpu, &s390int);
639 }
b0c632db
HC
640 case KVM_S390_STORE_STATUS:
641 return kvm_s390_vcpu_store_status(vcpu, arg);
642 case KVM_S390_SET_INITIAL_PSW: {
643 psw_t psw;
644
645 if (copy_from_user(&psw, argp, sizeof(psw)))
646 return -EFAULT;
647 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
648 }
649 case KVM_S390_INITIAL_RESET:
650 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
651 default:
652 ;
653 }
654 return -EINVAL;
655}
656
657/* Section: memory related */
658int kvm_arch_set_memory_region(struct kvm *kvm,
659 struct kvm_userspace_memory_region *mem,
660 struct kvm_memory_slot old,
661 int user_alloc)
662{
2668dab7
CO
663 int i;
664
b0c632db
HC
665 /* A few sanity checks. We can have exactly one memory slot which has
666 to start at guest virtual zero and which has to be located at a
667 page boundary in userland and which has to end at a page boundary.
668 The memory in userland is ok to be fragmented into various different
669 vmas. It is okay to mmap() and munmap() stuff in this slot after
670 doing this call at any time */
671
2668dab7 672 if (mem->slot || kvm->arch.guest_memsize)
b0c632db
HC
673 return -EINVAL;
674
675 if (mem->guest_phys_addr)
676 return -EINVAL;
677
678 if (mem->userspace_addr & (PAGE_SIZE - 1))
679 return -EINVAL;
680
681 if (mem->memory_size & (PAGE_SIZE - 1))
682 return -EINVAL;
683
2668dab7
CO
684 if (!user_alloc)
685 return -EINVAL;
686
687 /* lock all vcpus */
688 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
689 if (!kvm->vcpus[i])
690 continue;
691 if (!mutex_trylock(&kvm->vcpus[i]->mutex))
692 goto fail_out;
693 }
694
b0c632db
HC
695 kvm->arch.guest_origin = mem->userspace_addr;
696 kvm->arch.guest_memsize = mem->memory_size;
697
2668dab7
CO
698 /* update sie control blocks, and unlock all vcpus */
699 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
700 if (kvm->vcpus[i]) {
701 kvm->vcpus[i]->arch.sie_block->gmsor =
702 kvm->arch.guest_origin;
703 kvm->vcpus[i]->arch.sie_block->gmslm =
704 kvm->arch.guest_memsize +
705 kvm->arch.guest_origin +
706 VIRTIODESCSPACE - 1ul;
707 mutex_unlock(&kvm->vcpus[i]->mutex);
708 }
709 }
b0c632db
HC
710
711 return 0;
2668dab7
CO
712
713fail_out:
714 for (; i >= 0; i--)
715 mutex_unlock(&kvm->vcpus[i]->mutex);
716 return -EINVAL;
b0c632db
HC
717}
718
34d4cb8f
MT
719void kvm_arch_flush_shadow(struct kvm *kvm)
720{
721}
722
b0c632db
HC
723gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
724{
725 return gfn;
726}
727
728static int __init kvm_s390_init(void)
729{
730 return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
731}
732
733static void __exit kvm_s390_exit(void)
734{
735 kvm_exit();
736}
737
738module_init(kvm_s390_init);
739module_exit(kvm_s390_exit);
This page took 0.172786 seconds and 5 git commands to generate.