KVM: VMX: Support for injecting software exceptions
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 */
14
15#include <linux/compiler.h>
16#include <linux/err.h>
17#include <linux/fs.h>
18#include <linux/init.h>
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
21#include <linux/module.h>
22#include <linux/slab.h>
ba5c1e9b 23#include <linux/timer.h>
b0c632db
HC
24#include <asm/lowcore.h>
25#include <asm/pgtable.h>
26
8f2abe6a 27#include "kvm-s390.h"
b0c632db
HC
28#include "gaccess.h"
29
30#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
31
32struct kvm_stats_debugfs_item debugfs_entries[] = {
33 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 34 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
35 { "exit_validity", VCPU_STAT(exit_validity) },
36 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
37 { "exit_external_request", VCPU_STAT(exit_external_request) },
38 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
39 { "exit_instruction", VCPU_STAT(exit_instruction) },
40 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
41 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 42 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
43 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
44 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
45 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
46 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
47 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
48 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
49 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
50 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
51 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
52 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
53 { "instruction_spx", VCPU_STAT(instruction_spx) },
54 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
55 { "instruction_stap", VCPU_STAT(instruction_stap) },
56 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
57 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
58 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
59 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
60 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
5288fbf0
CB
61 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
62 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
63 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
64 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
65 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
66 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
e28acfea 67 { "diagnose_44", VCPU_STAT(diagnose_44) },
b0c632db
HC
68 { NULL }
69};
70
71
72/* Section: not file related */
73void kvm_arch_hardware_enable(void *garbage)
74{
75 /* every s390 is virtualization enabled ;-) */
76}
77
78void kvm_arch_hardware_disable(void *garbage)
79{
80}
81
b0c632db
HC
82int kvm_arch_hardware_setup(void)
83{
84 return 0;
85}
86
87void kvm_arch_hardware_unsetup(void)
88{
89}
90
91void kvm_arch_check_processor_compat(void *rtn)
92{
93}
94
95int kvm_arch_init(void *opaque)
96{
97 return 0;
98}
99
100void kvm_arch_exit(void)
101{
102}
103
104/* Section: device related */
105long kvm_arch_dev_ioctl(struct file *filp,
106 unsigned int ioctl, unsigned long arg)
107{
108 if (ioctl == KVM_S390_ENABLE_SIE)
109 return s390_enable_sie();
110 return -EINVAL;
111}
112
113int kvm_dev_ioctl_check_extension(long ext)
114{
2bd0ac4e 115 switch (ext) {
2bd0ac4e
CO
116 default:
117 return 0;
118 }
b0c632db
HC
119}
120
121/* Section: vm related */
122/*
123 * Get (and clear) the dirty memory log for a memory slot.
124 */
125int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
126 struct kvm_dirty_log *log)
127{
128 return 0;
129}
130
131long kvm_arch_vm_ioctl(struct file *filp,
132 unsigned int ioctl, unsigned long arg)
133{
134 struct kvm *kvm = filp->private_data;
135 void __user *argp = (void __user *)arg;
136 int r;
137
138 switch (ioctl) {
ba5c1e9b
CO
139 case KVM_S390_INTERRUPT: {
140 struct kvm_s390_interrupt s390int;
141
142 r = -EFAULT;
143 if (copy_from_user(&s390int, argp, sizeof(s390int)))
144 break;
145 r = kvm_s390_inject_vm(kvm, &s390int);
146 break;
147 }
b0c632db
HC
148 default:
149 r = -EINVAL;
150 }
151
152 return r;
153}
154
155struct kvm *kvm_arch_create_vm(void)
156{
157 struct kvm *kvm;
158 int rc;
159 char debug_name[16];
160
161 rc = s390_enable_sie();
162 if (rc)
163 goto out_nokvm;
164
165 rc = -ENOMEM;
166 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
167 if (!kvm)
168 goto out_nokvm;
169
170 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
171 if (!kvm->arch.sca)
172 goto out_nosca;
173
174 sprintf(debug_name, "kvm-%u", current->pid);
175
176 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
177 if (!kvm->arch.dbf)
178 goto out_nodbf;
179
ba5c1e9b
CO
180 spin_lock_init(&kvm->arch.float_int.lock);
181 INIT_LIST_HEAD(&kvm->arch.float_int.list);
182
b0c632db
HC
183 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
184 VM_EVENT(kvm, 3, "%s", "vm created");
185
b0c632db
HC
186 return kvm;
187out_nodbf:
188 free_page((unsigned long)(kvm->arch.sca));
189out_nosca:
190 kfree(kvm);
191out_nokvm:
192 return ERR_PTR(rc);
193}
194
d329c035
CB
195void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
196{
197 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
198 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 199 kvm_vcpu_uninit(vcpu);
d329c035
CB
200 kfree(vcpu);
201}
202
203static void kvm_free_vcpus(struct kvm *kvm)
204{
205 unsigned int i;
206
207 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
208 if (kvm->vcpus[i]) {
209 kvm_arch_vcpu_destroy(kvm->vcpus[i]);
210 kvm->vcpus[i] = NULL;
211 }
212 }
213}
214
ad8ba2cd
SY
215void kvm_arch_sync_events(struct kvm *kvm)
216{
217}
218
b0c632db
HC
219void kvm_arch_destroy_vm(struct kvm *kvm)
220{
d329c035 221 kvm_free_vcpus(kvm);
dfdded7c 222 kvm_free_physmem(kvm);
b0c632db 223 free_page((unsigned long)(kvm->arch.sca));
d329c035 224 debug_unregister(kvm->arch.dbf);
b0c632db 225 kfree(kvm);
b0c632db
HC
226}
227
228/* Section: vcpu related */
229int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
230{
231 return 0;
232}
233
234void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
235{
6692cef3 236 /* Nothing todo */
b0c632db
HC
237}
238
239void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
240{
241 save_fp_regs(&vcpu->arch.host_fpregs);
242 save_access_regs(vcpu->arch.host_acrs);
243 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
244 restore_fp_regs(&vcpu->arch.guest_fpregs);
245 restore_access_regs(vcpu->arch.guest_acrs);
b0c632db
HC
246}
247
248void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
249{
250 save_fp_regs(&vcpu->arch.guest_fpregs);
251 save_access_regs(vcpu->arch.guest_acrs);
252 restore_fp_regs(&vcpu->arch.host_fpregs);
253 restore_access_regs(vcpu->arch.host_acrs);
254}
255
256static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
257{
258 /* this equals initial cpu reset in pop, but we don't switch to ESA */
259 vcpu->arch.sie_block->gpsw.mask = 0UL;
260 vcpu->arch.sie_block->gpsw.addr = 0UL;
261 vcpu->arch.sie_block->prefix = 0UL;
262 vcpu->arch.sie_block->ihcpu = 0xffff;
263 vcpu->arch.sie_block->cputm = 0UL;
264 vcpu->arch.sie_block->ckc = 0UL;
265 vcpu->arch.sie_block->todpr = 0;
266 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
267 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
268 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
269 vcpu->arch.guest_fpregs.fpc = 0;
270 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
271 vcpu->arch.sie_block->gbea = 1;
272}
273
4da29e90
CB
274/* The current code can have up to 256 pages for virtio */
275#define VIRTIODESCSPACE (256ul * 4096ul)
276
b0c632db
HC
277int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
278{
279 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
4da29e90
CB
280 vcpu->arch.sie_block->gmslm = vcpu->kvm->arch.guest_memsize +
281 vcpu->kvm->arch.guest_origin +
282 VIRTIODESCSPACE - 1ul;
283 vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin;
b0c632db
HC
284 vcpu->arch.sie_block->ecb = 2;
285 vcpu->arch.sie_block->eca = 0xC1002001U;
ba5c1e9b
CO
286 setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup,
287 (unsigned long) vcpu);
453423dc
CB
288 get_cpu_id(&vcpu->arch.cpu_id);
289 vcpu->arch.cpu_id.version = 0xfe;
b0c632db
HC
290 return 0;
291}
292
293struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
294 unsigned int id)
295{
296 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
297 int rc = -ENOMEM;
298
299 if (!vcpu)
300 goto out_nomem;
301
180c12fb
CB
302 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
303 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
304
305 if (!vcpu->arch.sie_block)
306 goto out_free_cpu;
307
308 vcpu->arch.sie_block->icpua = id;
309 BUG_ON(!kvm->arch.sca);
310 BUG_ON(kvm->arch.sca->cpu[id].sda);
311 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
312 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
313 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
314
ba5c1e9b
CO
315 spin_lock_init(&vcpu->arch.local_int.lock);
316 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
317 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
318 spin_lock_bh(&kvm->arch.float_int.lock);
319 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
320 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 321 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
ba5c1e9b
CO
322 spin_unlock_bh(&kvm->arch.float_int.lock);
323
b0c632db
HC
324 rc = kvm_vcpu_init(vcpu, kvm, id);
325 if (rc)
326 goto out_free_cpu;
327 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
328 vcpu->arch.sie_block);
329
b0c632db
HC
330 return vcpu;
331out_free_cpu:
332 kfree(vcpu);
333out_nomem:
334 return ERR_PTR(rc);
335}
336
b0c632db
HC
337int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
338{
339 /* kvm common code refers to this, but never calls it */
340 BUG();
341 return 0;
342}
343
344static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
345{
346 vcpu_load(vcpu);
347 kvm_s390_vcpu_initial_reset(vcpu);
348 vcpu_put(vcpu);
349 return 0;
350}
351
352int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
353{
354 vcpu_load(vcpu);
355 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
356 vcpu_put(vcpu);
357 return 0;
358}
359
360int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
361{
362 vcpu_load(vcpu);
363 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
364 vcpu_put(vcpu);
365 return 0;
366}
367
368int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
369 struct kvm_sregs *sregs)
370{
371 vcpu_load(vcpu);
372 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
373 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
374 vcpu_put(vcpu);
375 return 0;
376}
377
378int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
379 struct kvm_sregs *sregs)
380{
381 vcpu_load(vcpu);
382 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
383 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
384 vcpu_put(vcpu);
385 return 0;
386}
387
388int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
389{
390 vcpu_load(vcpu);
391 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
392 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
393 vcpu_put(vcpu);
394 return 0;
395}
396
397int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
398{
399 vcpu_load(vcpu);
400 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
401 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
402 vcpu_put(vcpu);
403 return 0;
404}
405
406static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
407{
408 int rc = 0;
409
410 vcpu_load(vcpu);
411 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
412 rc = -EBUSY;
413 else
414 vcpu->arch.sie_block->gpsw = psw;
415 vcpu_put(vcpu);
416 return rc;
417}
418
419int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
420 struct kvm_translation *tr)
421{
422 return -EINVAL; /* not implemented yet */
423}
424
425int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
426 struct kvm_debug_guest *dbg)
427{
428 return -EINVAL; /* not implemented yet */
429}
430
62d9f0db
MT
431int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
432 struct kvm_mp_state *mp_state)
433{
434 return -EINVAL; /* not implemented yet */
435}
436
437int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
438 struct kvm_mp_state *mp_state)
439{
440 return -EINVAL; /* not implemented yet */
441}
442
71cde587
CB
443extern void s390_handle_mcck(void);
444
b0c632db
HC
445static void __vcpu_run(struct kvm_vcpu *vcpu)
446{
447 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
448
449 if (need_resched())
450 schedule();
451
71cde587
CB
452 if (test_thread_flag(TIF_MCCK_PENDING))
453 s390_handle_mcck();
454
0ff31867
CO
455 kvm_s390_deliver_pending_interrupts(vcpu);
456
b0c632db
HC
457 vcpu->arch.sie_block->icptcode = 0;
458 local_irq_disable();
459 kvm_guest_enter();
460 local_irq_enable();
461 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
462 atomic_read(&vcpu->arch.sie_block->cpuflags));
1f0d0f09
CO
463 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
464 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
465 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
466 }
b0c632db
HC
467 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
468 vcpu->arch.sie_block->icptcode);
469 local_irq_disable();
470 kvm_guest_exit();
471 local_irq_enable();
472
473 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
474}
475
476int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
477{
8f2abe6a 478 int rc;
b0c632db
HC
479 sigset_t sigsaved;
480
481 vcpu_load(vcpu);
482
483 if (vcpu->sigset_active)
484 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
485
486 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
487
ba5c1e9b
CO
488 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
489
8f2abe6a
CB
490 switch (kvm_run->exit_reason) {
491 case KVM_EXIT_S390_SIEIC:
492 vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
493 vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
494 break;
495 case KVM_EXIT_UNKNOWN:
496 case KVM_EXIT_S390_RESET:
497 break;
498 default:
499 BUG();
500 }
501
502 might_sleep();
503
504 do {
505 __vcpu_run(vcpu);
8f2abe6a
CB
506 rc = kvm_handle_sie_intercept(vcpu);
507 } while (!signal_pending(current) && !rc);
508
509 if (signal_pending(current) && !rc)
510 rc = -EINTR;
511
512 if (rc == -ENOTSUPP) {
513 /* intercept cannot be handled in-kernel, prepare kvm-run */
514 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
515 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
516 kvm_run->s390_sieic.mask = vcpu->arch.sie_block->gpsw.mask;
517 kvm_run->s390_sieic.addr = vcpu->arch.sie_block->gpsw.addr;
518 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
519 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
520 rc = 0;
521 }
522
523 if (rc == -EREMOTE) {
524 /* intercept was handled, but userspace support is needed
525 * kvm_run has been prepared by the handler */
526 rc = 0;
527 }
b0c632db
HC
528
529 if (vcpu->sigset_active)
530 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
531
532 vcpu_put(vcpu);
533
534 vcpu->stat.exit_userspace++;
7e8e6ab4 535 return rc;
b0c632db
HC
536}
537
538static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
539 unsigned long n, int prefix)
540{
541 if (prefix)
542 return copy_to_guest(vcpu, guestdest, from, n);
543 else
544 return copy_to_guest_absolute(vcpu, guestdest, from, n);
545}
546
547/*
548 * store status at address
549 * we use have two special cases:
550 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
551 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
552 */
553int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
554{
555 const unsigned char archmode = 1;
556 int prefix;
557
558 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
559 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
560 return -EFAULT;
561 addr = SAVE_AREA_BASE;
562 prefix = 0;
563 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
564 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
565 return -EFAULT;
566 addr = SAVE_AREA_BASE;
567 prefix = 1;
568 } else
569 prefix = 0;
570
571 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
572 vcpu->arch.guest_fpregs.fprs, 128, prefix))
573 return -EFAULT;
574
575 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
576 vcpu->arch.guest_gprs, 128, prefix))
577 return -EFAULT;
578
579 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
580 &vcpu->arch.sie_block->gpsw, 16, prefix))
581 return -EFAULT;
582
583 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
584 &vcpu->arch.sie_block->prefix, 4, prefix))
585 return -EFAULT;
586
587 if (__guestcopy(vcpu,
588 addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
589 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
590 return -EFAULT;
591
592 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
593 &vcpu->arch.sie_block->todpr, 4, prefix))
594 return -EFAULT;
595
596 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
597 &vcpu->arch.sie_block->cputm, 8, prefix))
598 return -EFAULT;
599
600 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
601 &vcpu->arch.sie_block->ckc, 8, prefix))
602 return -EFAULT;
603
604 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
605 &vcpu->arch.guest_acrs, 64, prefix))
606 return -EFAULT;
607
608 if (__guestcopy(vcpu,
609 addr + offsetof(struct save_area_s390x, ctrl_regs),
610 &vcpu->arch.sie_block->gcr, 128, prefix))
611 return -EFAULT;
612 return 0;
613}
614
615static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
616{
617 int rc;
618
619 vcpu_load(vcpu);
620 rc = __kvm_s390_vcpu_store_status(vcpu, addr);
621 vcpu_put(vcpu);
622 return rc;
623}
624
625long kvm_arch_vcpu_ioctl(struct file *filp,
626 unsigned int ioctl, unsigned long arg)
627{
628 struct kvm_vcpu *vcpu = filp->private_data;
629 void __user *argp = (void __user *)arg;
630
631 switch (ioctl) {
ba5c1e9b
CO
632 case KVM_S390_INTERRUPT: {
633 struct kvm_s390_interrupt s390int;
634
635 if (copy_from_user(&s390int, argp, sizeof(s390int)))
636 return -EFAULT;
637 return kvm_s390_inject_vcpu(vcpu, &s390int);
638 }
b0c632db
HC
639 case KVM_S390_STORE_STATUS:
640 return kvm_s390_vcpu_store_status(vcpu, arg);
641 case KVM_S390_SET_INITIAL_PSW: {
642 psw_t psw;
643
644 if (copy_from_user(&psw, argp, sizeof(psw)))
645 return -EFAULT;
646 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
647 }
648 case KVM_S390_INITIAL_RESET:
649 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
650 default:
651 ;
652 }
653 return -EINVAL;
654}
655
656/* Section: memory related */
657int kvm_arch_set_memory_region(struct kvm *kvm,
658 struct kvm_userspace_memory_region *mem,
659 struct kvm_memory_slot old,
660 int user_alloc)
661{
662 /* A few sanity checks. We can have exactly one memory slot which has
663 to start at guest virtual zero and which has to be located at a
664 page boundary in userland and which has to end at a page boundary.
665 The memory in userland is ok to be fragmented into various different
666 vmas. It is okay to mmap() and munmap() stuff in this slot after
667 doing this call at any time */
668
669 if (mem->slot)
670 return -EINVAL;
671
672 if (mem->guest_phys_addr)
673 return -EINVAL;
674
675 if (mem->userspace_addr & (PAGE_SIZE - 1))
676 return -EINVAL;
677
678 if (mem->memory_size & (PAGE_SIZE - 1))
679 return -EINVAL;
680
681 kvm->arch.guest_origin = mem->userspace_addr;
682 kvm->arch.guest_memsize = mem->memory_size;
683
684 /* FIXME: we do want to interrupt running CPUs and update their memory
685 configuration now to avoid race conditions. But hey, changing the
686 memory layout while virtual CPUs are running is usually bad
687 programming practice. */
688
689 return 0;
690}
691
34d4cb8f
MT
692void kvm_arch_flush_shadow(struct kvm *kvm)
693{
694}
695
b0c632db
HC
696gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
697{
698 return gfn;
699}
700
701static int __init kvm_s390_init(void)
702{
703 return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
704}
705
706static void __exit kvm_s390_exit(void)
707{
708 kvm_exit();
709}
710
711module_init(kvm_s390_init);
712module_exit(kvm_s390_exit);
This page took 0.140975 seconds and 5 git commands to generate.