KVM: s390: handle SIGP sense running intercepts
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
4 * Copyright IBM Corp. 2008,2009
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
14 */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/system.h>
31 #include "kvm-s390.h"
32 #include "gaccess.h"
33
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
38 { "exit_null", VCPU_STAT(exit_null) },
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
68 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
69 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
70 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
75 { "diagnose_10", VCPU_STAT(diagnose_10) },
76 { "diagnose_44", VCPU_STAT(diagnose_44) },
77 { NULL }
78 };
79
80 static unsigned long long *facilities;
81
82 /* Section: not file related */
83 int kvm_arch_hardware_enable(void *garbage)
84 {
85 /* every s390 is virtualization enabled ;-) */
86 return 0;
87 }
88
89 void kvm_arch_hardware_disable(void *garbage)
90 {
91 }
92
93 int kvm_arch_hardware_setup(void)
94 {
95 return 0;
96 }
97
98 void kvm_arch_hardware_unsetup(void)
99 {
100 }
101
102 void kvm_arch_check_processor_compat(void *rtn)
103 {
104 }
105
106 int kvm_arch_init(void *opaque)
107 {
108 return 0;
109 }
110
111 void kvm_arch_exit(void)
112 {
113 }
114
115 /* Section: device related */
116 long kvm_arch_dev_ioctl(struct file *filp,
117 unsigned int ioctl, unsigned long arg)
118 {
119 if (ioctl == KVM_S390_ENABLE_SIE)
120 return s390_enable_sie();
121 return -EINVAL;
122 }
123
124 int kvm_dev_ioctl_check_extension(long ext)
125 {
126 int r;
127
128 switch (ext) {
129 case KVM_CAP_S390_PSW:
130 case KVM_CAP_S390_GMAP:
131 r = 1;
132 break;
133 default:
134 r = 0;
135 }
136 return r;
137 }
138
139 /* Section: vm related */
140 /*
141 * Get (and clear) the dirty memory log for a memory slot.
142 */
143 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
144 struct kvm_dirty_log *log)
145 {
146 return 0;
147 }
148
149 long kvm_arch_vm_ioctl(struct file *filp,
150 unsigned int ioctl, unsigned long arg)
151 {
152 struct kvm *kvm = filp->private_data;
153 void __user *argp = (void __user *)arg;
154 int r;
155
156 switch (ioctl) {
157 case KVM_S390_INTERRUPT: {
158 struct kvm_s390_interrupt s390int;
159
160 r = -EFAULT;
161 if (copy_from_user(&s390int, argp, sizeof(s390int)))
162 break;
163 r = kvm_s390_inject_vm(kvm, &s390int);
164 break;
165 }
166 default:
167 r = -ENOTTY;
168 }
169
170 return r;
171 }
172
173 int kvm_arch_init_vm(struct kvm *kvm)
174 {
175 int rc;
176 char debug_name[16];
177
178 rc = s390_enable_sie();
179 if (rc)
180 goto out_err;
181
182 rc = -ENOMEM;
183
184 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
185 if (!kvm->arch.sca)
186 goto out_err;
187
188 sprintf(debug_name, "kvm-%u", current->pid);
189
190 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
191 if (!kvm->arch.dbf)
192 goto out_nodbf;
193
194 spin_lock_init(&kvm->arch.float_int.lock);
195 INIT_LIST_HEAD(&kvm->arch.float_int.list);
196
197 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
198 VM_EVENT(kvm, 3, "%s", "vm created");
199
200 kvm->arch.gmap = gmap_alloc(current->mm);
201 if (!kvm->arch.gmap)
202 goto out_nogmap;
203
204 return 0;
205 out_nogmap:
206 debug_unregister(kvm->arch.dbf);
207 out_nodbf:
208 free_page((unsigned long)(kvm->arch.sca));
209 out_err:
210 return rc;
211 }
212
213 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
214 {
215 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
216 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
217 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
218 (__u64) vcpu->arch.sie_block)
219 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
220 smp_mb();
221 free_page((unsigned long)(vcpu->arch.sie_block));
222 kvm_vcpu_uninit(vcpu);
223 kfree(vcpu);
224 }
225
226 static void kvm_free_vcpus(struct kvm *kvm)
227 {
228 unsigned int i;
229 struct kvm_vcpu *vcpu;
230
231 kvm_for_each_vcpu(i, vcpu, kvm)
232 kvm_arch_vcpu_destroy(vcpu);
233
234 mutex_lock(&kvm->lock);
235 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
236 kvm->vcpus[i] = NULL;
237
238 atomic_set(&kvm->online_vcpus, 0);
239 mutex_unlock(&kvm->lock);
240 }
241
242 void kvm_arch_sync_events(struct kvm *kvm)
243 {
244 }
245
246 void kvm_arch_destroy_vm(struct kvm *kvm)
247 {
248 kvm_free_vcpus(kvm);
249 free_page((unsigned long)(kvm->arch.sca));
250 debug_unregister(kvm->arch.dbf);
251 gmap_free(kvm->arch.gmap);
252 }
253
254 /* Section: vcpu related */
255 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
256 {
257 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
258 return 0;
259 }
260
261 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
262 {
263 /* Nothing todo */
264 }
265
266 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
267 {
268 save_fp_regs(&vcpu->arch.host_fpregs);
269 save_access_regs(vcpu->arch.host_acrs);
270 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
271 restore_fp_regs(&vcpu->arch.guest_fpregs);
272 restore_access_regs(vcpu->arch.guest_acrs);
273 gmap_enable(vcpu->arch.gmap);
274 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
275 }
276
277 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
278 {
279 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
280 gmap_disable(vcpu->arch.gmap);
281 save_fp_regs(&vcpu->arch.guest_fpregs);
282 save_access_regs(vcpu->arch.guest_acrs);
283 restore_fp_regs(&vcpu->arch.host_fpregs);
284 restore_access_regs(vcpu->arch.host_acrs);
285 }
286
287 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
288 {
289 /* this equals initial cpu reset in pop, but we don't switch to ESA */
290 vcpu->arch.sie_block->gpsw.mask = 0UL;
291 vcpu->arch.sie_block->gpsw.addr = 0UL;
292 vcpu->arch.sie_block->prefix = 0UL;
293 vcpu->arch.sie_block->ihcpu = 0xffff;
294 vcpu->arch.sie_block->cputm = 0UL;
295 vcpu->arch.sie_block->ckc = 0UL;
296 vcpu->arch.sie_block->todpr = 0;
297 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
298 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
299 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
300 vcpu->arch.guest_fpregs.fpc = 0;
301 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
302 vcpu->arch.sie_block->gbea = 1;
303 }
304
305 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
306 {
307 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
308 CPUSTAT_SM |
309 CPUSTAT_STOPPED);
310 vcpu->arch.sie_block->ecb = 6;
311 vcpu->arch.sie_block->eca = 0xC1002001U;
312 vcpu->arch.sie_block->fac = (int) (long) facilities;
313 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
314 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
315 (unsigned long) vcpu);
316 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
317 get_cpu_id(&vcpu->arch.cpu_id);
318 vcpu->arch.cpu_id.version = 0xff;
319 return 0;
320 }
321
322 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
323 unsigned int id)
324 {
325 struct kvm_vcpu *vcpu;
326 int rc = -EINVAL;
327
328 if (id >= KVM_MAX_VCPUS)
329 goto out;
330
331 rc = -ENOMEM;
332
333 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
334 if (!vcpu)
335 goto out;
336
337 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
338 get_zeroed_page(GFP_KERNEL);
339
340 if (!vcpu->arch.sie_block)
341 goto out_free_cpu;
342
343 vcpu->arch.sie_block->icpua = id;
344 BUG_ON(!kvm->arch.sca);
345 if (!kvm->arch.sca->cpu[id].sda)
346 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
347 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
348 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
349 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
350
351 spin_lock_init(&vcpu->arch.local_int.lock);
352 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
353 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
354 spin_lock(&kvm->arch.float_int.lock);
355 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
356 init_waitqueue_head(&vcpu->arch.local_int.wq);
357 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
358 spin_unlock(&kvm->arch.float_int.lock);
359
360 rc = kvm_vcpu_init(vcpu, kvm, id);
361 if (rc)
362 goto out_free_sie_block;
363 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
364 vcpu->arch.sie_block);
365
366 return vcpu;
367 out_free_sie_block:
368 free_page((unsigned long)(vcpu->arch.sie_block));
369 out_free_cpu:
370 kfree(vcpu);
371 out:
372 return ERR_PTR(rc);
373 }
374
375 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
376 {
377 /* kvm common code refers to this, but never calls it */
378 BUG();
379 return 0;
380 }
381
382 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
383 {
384 kvm_s390_vcpu_initial_reset(vcpu);
385 return 0;
386 }
387
388 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
389 {
390 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
391 return 0;
392 }
393
394 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
395 {
396 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
397 return 0;
398 }
399
400 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
401 struct kvm_sregs *sregs)
402 {
403 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
404 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
405 restore_access_regs(vcpu->arch.guest_acrs);
406 return 0;
407 }
408
409 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
410 struct kvm_sregs *sregs)
411 {
412 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
413 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
414 return 0;
415 }
416
417 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
418 {
419 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
420 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
421 restore_fp_regs(&vcpu->arch.guest_fpregs);
422 return 0;
423 }
424
425 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
426 {
427 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
428 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
429 return 0;
430 }
431
432 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
433 {
434 int rc = 0;
435
436 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
437 rc = -EBUSY;
438 else {
439 vcpu->run->psw_mask = psw.mask;
440 vcpu->run->psw_addr = psw.addr;
441 }
442 return rc;
443 }
444
445 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
446 struct kvm_translation *tr)
447 {
448 return -EINVAL; /* not implemented yet */
449 }
450
451 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
452 struct kvm_guest_debug *dbg)
453 {
454 return -EINVAL; /* not implemented yet */
455 }
456
457 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
458 struct kvm_mp_state *mp_state)
459 {
460 return -EINVAL; /* not implemented yet */
461 }
462
463 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
464 struct kvm_mp_state *mp_state)
465 {
466 return -EINVAL; /* not implemented yet */
467 }
468
469 static void __vcpu_run(struct kvm_vcpu *vcpu)
470 {
471 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
472
473 if (need_resched())
474 schedule();
475
476 if (test_thread_flag(TIF_MCCK_PENDING))
477 s390_handle_mcck();
478
479 kvm_s390_deliver_pending_interrupts(vcpu);
480
481 vcpu->arch.sie_block->icptcode = 0;
482 local_irq_disable();
483 kvm_guest_enter();
484 local_irq_enable();
485 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
486 atomic_read(&vcpu->arch.sie_block->cpuflags));
487 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
488 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
489 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
490 }
491 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
492 vcpu->arch.sie_block->icptcode);
493 local_irq_disable();
494 kvm_guest_exit();
495 local_irq_enable();
496
497 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
498 }
499
500 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
501 {
502 int rc;
503 sigset_t sigsaved;
504
505 rerun_vcpu:
506 if (vcpu->sigset_active)
507 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
508
509 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
510
511 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
512
513 switch (kvm_run->exit_reason) {
514 case KVM_EXIT_S390_SIEIC:
515 case KVM_EXIT_UNKNOWN:
516 case KVM_EXIT_INTR:
517 case KVM_EXIT_S390_RESET:
518 break;
519 default:
520 BUG();
521 }
522
523 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
524 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
525
526 might_fault();
527
528 do {
529 __vcpu_run(vcpu);
530 rc = kvm_handle_sie_intercept(vcpu);
531 } while (!signal_pending(current) && !rc);
532
533 if (rc == SIE_INTERCEPT_RERUNVCPU)
534 goto rerun_vcpu;
535
536 if (signal_pending(current) && !rc) {
537 kvm_run->exit_reason = KVM_EXIT_INTR;
538 rc = -EINTR;
539 }
540
541 if (rc == -EOPNOTSUPP) {
542 /* intercept cannot be handled in-kernel, prepare kvm-run */
543 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
544 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
545 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
546 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
547 rc = 0;
548 }
549
550 if (rc == -EREMOTE) {
551 /* intercept was handled, but userspace support is needed
552 * kvm_run has been prepared by the handler */
553 rc = 0;
554 }
555
556 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
557 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
558
559 if (vcpu->sigset_active)
560 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
561
562 vcpu->stat.exit_userspace++;
563 return rc;
564 }
565
566 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
567 unsigned long n, int prefix)
568 {
569 if (prefix)
570 return copy_to_guest(vcpu, guestdest, from, n);
571 else
572 return copy_to_guest_absolute(vcpu, guestdest, from, n);
573 }
574
575 /*
576 * store status at address
577 * we use have two special cases:
578 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
579 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
580 */
581 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
582 {
583 unsigned char archmode = 1;
584 int prefix;
585
586 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
587 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
588 return -EFAULT;
589 addr = SAVE_AREA_BASE;
590 prefix = 0;
591 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
592 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
593 return -EFAULT;
594 addr = SAVE_AREA_BASE;
595 prefix = 1;
596 } else
597 prefix = 0;
598
599 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
600 vcpu->arch.guest_fpregs.fprs, 128, prefix))
601 return -EFAULT;
602
603 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
604 vcpu->arch.guest_gprs, 128, prefix))
605 return -EFAULT;
606
607 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
608 &vcpu->arch.sie_block->gpsw, 16, prefix))
609 return -EFAULT;
610
611 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
612 &vcpu->arch.sie_block->prefix, 4, prefix))
613 return -EFAULT;
614
615 if (__guestcopy(vcpu,
616 addr + offsetof(struct save_area, fp_ctrl_reg),
617 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
618 return -EFAULT;
619
620 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
621 &vcpu->arch.sie_block->todpr, 4, prefix))
622 return -EFAULT;
623
624 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
625 &vcpu->arch.sie_block->cputm, 8, prefix))
626 return -EFAULT;
627
628 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
629 &vcpu->arch.sie_block->ckc, 8, prefix))
630 return -EFAULT;
631
632 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
633 &vcpu->arch.guest_acrs, 64, prefix))
634 return -EFAULT;
635
636 if (__guestcopy(vcpu,
637 addr + offsetof(struct save_area, ctrl_regs),
638 &vcpu->arch.sie_block->gcr, 128, prefix))
639 return -EFAULT;
640 return 0;
641 }
642
643 long kvm_arch_vcpu_ioctl(struct file *filp,
644 unsigned int ioctl, unsigned long arg)
645 {
646 struct kvm_vcpu *vcpu = filp->private_data;
647 void __user *argp = (void __user *)arg;
648 long r;
649
650 switch (ioctl) {
651 case KVM_S390_INTERRUPT: {
652 struct kvm_s390_interrupt s390int;
653
654 r = -EFAULT;
655 if (copy_from_user(&s390int, argp, sizeof(s390int)))
656 break;
657 r = kvm_s390_inject_vcpu(vcpu, &s390int);
658 break;
659 }
660 case KVM_S390_STORE_STATUS:
661 r = kvm_s390_vcpu_store_status(vcpu, arg);
662 break;
663 case KVM_S390_SET_INITIAL_PSW: {
664 psw_t psw;
665
666 r = -EFAULT;
667 if (copy_from_user(&psw, argp, sizeof(psw)))
668 break;
669 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
670 break;
671 }
672 case KVM_S390_INITIAL_RESET:
673 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
674 break;
675 default:
676 r = -EINVAL;
677 }
678 return r;
679 }
680
681 /* Section: memory related */
682 int kvm_arch_prepare_memory_region(struct kvm *kvm,
683 struct kvm_memory_slot *memslot,
684 struct kvm_memory_slot old,
685 struct kvm_userspace_memory_region *mem,
686 int user_alloc)
687 {
688 /* A few sanity checks. We can have exactly one memory slot which has
689 to start at guest virtual zero and which has to be located at a
690 page boundary in userland and which has to end at a page boundary.
691 The memory in userland is ok to be fragmented into various different
692 vmas. It is okay to mmap() and munmap() stuff in this slot after
693 doing this call at any time */
694
695 if (mem->slot)
696 return -EINVAL;
697
698 if (mem->guest_phys_addr)
699 return -EINVAL;
700
701 if (mem->userspace_addr & 0xffffful)
702 return -EINVAL;
703
704 if (mem->memory_size & 0xffffful)
705 return -EINVAL;
706
707 if (!user_alloc)
708 return -EINVAL;
709
710 return 0;
711 }
712
713 void kvm_arch_commit_memory_region(struct kvm *kvm,
714 struct kvm_userspace_memory_region *mem,
715 struct kvm_memory_slot old,
716 int user_alloc)
717 {
718 int rc;
719
720
721 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
722 mem->guest_phys_addr, mem->memory_size);
723 if (rc)
724 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
725 return;
726 }
727
728 void kvm_arch_flush_shadow(struct kvm *kvm)
729 {
730 }
731
732 static int __init kvm_s390_init(void)
733 {
734 int ret;
735 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
736 if (ret)
737 return ret;
738
739 /*
740 * guests can ask for up to 255+1 double words, we need a full page
741 * to hold the maximum amount of facilities. On the other hand, we
742 * only set facilities that are known to work in KVM.
743 */
744 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
745 if (!facilities) {
746 kvm_exit();
747 return -ENOMEM;
748 }
749 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
750 facilities[0] &= 0xff00fff3f47c0000ULL;
751 facilities[1] &= 0x201c000000000000ULL;
752 return 0;
753 }
754
755 static void __exit kvm_s390_exit(void)
756 {
757 free_page((unsigned long) facilities);
758 kvm_exit();
759 }
760
761 module_init(kvm_s390_init);
762 module_exit(kvm_s390_exit);
This page took 0.05702 seconds and 5 git commands to generate.