KVM: s390: fix return value of kvm_arch_init_vm
[deliverable/linux.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
4 * Copyright IBM Corp. 2008,2009
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
14 */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/system.h>
31 #include "kvm-s390.h"
32 #include "gaccess.h"
33
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
38 { "exit_null", VCPU_STAT(exit_null) },
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
50 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
51 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
52 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
53 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
54 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
55 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
56 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
57 { "instruction_spx", VCPU_STAT(instruction_spx) },
58 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
59 { "instruction_stap", VCPU_STAT(instruction_stap) },
60 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
61 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
62 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
63 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
64 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
65 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
66 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
67 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
68 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
69 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
70 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
71 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
72 { "diagnose_44", VCPU_STAT(diagnose_44) },
73 { NULL }
74 };
75
76 static unsigned long long *facilities;
77
78 /* Section: not file related */
79 int kvm_arch_hardware_enable(void *garbage)
80 {
81 /* every s390 is virtualization enabled ;-) */
82 return 0;
83 }
84
85 void kvm_arch_hardware_disable(void *garbage)
86 {
87 }
88
89 int kvm_arch_hardware_setup(void)
90 {
91 return 0;
92 }
93
94 void kvm_arch_hardware_unsetup(void)
95 {
96 }
97
98 void kvm_arch_check_processor_compat(void *rtn)
99 {
100 }
101
102 int kvm_arch_init(void *opaque)
103 {
104 return 0;
105 }
106
107 void kvm_arch_exit(void)
108 {
109 }
110
111 /* Section: device related */
112 long kvm_arch_dev_ioctl(struct file *filp,
113 unsigned int ioctl, unsigned long arg)
114 {
115 if (ioctl == KVM_S390_ENABLE_SIE)
116 return s390_enable_sie();
117 return -EINVAL;
118 }
119
120 int kvm_dev_ioctl_check_extension(long ext)
121 {
122 int r;
123
124 switch (ext) {
125 case KVM_CAP_S390_PSW:
126 case KVM_CAP_S390_GMAP:
127 r = 1;
128 break;
129 default:
130 r = 0;
131 }
132 return r;
133 }
134
135 /* Section: vm related */
136 /*
137 * Get (and clear) the dirty memory log for a memory slot.
138 */
139 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
140 struct kvm_dirty_log *log)
141 {
142 return 0;
143 }
144
145 long kvm_arch_vm_ioctl(struct file *filp,
146 unsigned int ioctl, unsigned long arg)
147 {
148 struct kvm *kvm = filp->private_data;
149 void __user *argp = (void __user *)arg;
150 int r;
151
152 switch (ioctl) {
153 case KVM_S390_INTERRUPT: {
154 struct kvm_s390_interrupt s390int;
155
156 r = -EFAULT;
157 if (copy_from_user(&s390int, argp, sizeof(s390int)))
158 break;
159 r = kvm_s390_inject_vm(kvm, &s390int);
160 break;
161 }
162 default:
163 r = -ENOTTY;
164 }
165
166 return r;
167 }
168
169 int kvm_arch_init_vm(struct kvm *kvm)
170 {
171 int rc;
172 char debug_name[16];
173
174 rc = s390_enable_sie();
175 if (rc)
176 goto out_err;
177
178 rc = -ENOMEM;
179
180 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
181 if (!kvm->arch.sca)
182 goto out_err;
183
184 sprintf(debug_name, "kvm-%u", current->pid);
185
186 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
187 if (!kvm->arch.dbf)
188 goto out_nodbf;
189
190 spin_lock_init(&kvm->arch.float_int.lock);
191 INIT_LIST_HEAD(&kvm->arch.float_int.list);
192
193 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
194 VM_EVENT(kvm, 3, "%s", "vm created");
195
196 kvm->arch.gmap = gmap_alloc(current->mm);
197 if (!kvm->arch.gmap)
198 goto out_nogmap;
199
200 return 0;
201 out_nogmap:
202 debug_unregister(kvm->arch.dbf);
203 out_nodbf:
204 free_page((unsigned long)(kvm->arch.sca));
205 out_err:
206 return rc;
207 }
208
209 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
210 {
211 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
212 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
213 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
214 (__u64) vcpu->arch.sie_block)
215 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
216 smp_mb();
217 free_page((unsigned long)(vcpu->arch.sie_block));
218 kvm_vcpu_uninit(vcpu);
219 kfree(vcpu);
220 }
221
222 static void kvm_free_vcpus(struct kvm *kvm)
223 {
224 unsigned int i;
225 struct kvm_vcpu *vcpu;
226
227 kvm_for_each_vcpu(i, vcpu, kvm)
228 kvm_arch_vcpu_destroy(vcpu);
229
230 mutex_lock(&kvm->lock);
231 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
232 kvm->vcpus[i] = NULL;
233
234 atomic_set(&kvm->online_vcpus, 0);
235 mutex_unlock(&kvm->lock);
236 }
237
238 void kvm_arch_sync_events(struct kvm *kvm)
239 {
240 }
241
242 void kvm_arch_destroy_vm(struct kvm *kvm)
243 {
244 kvm_free_vcpus(kvm);
245 free_page((unsigned long)(kvm->arch.sca));
246 debug_unregister(kvm->arch.dbf);
247 gmap_free(kvm->arch.gmap);
248 }
249
250 /* Section: vcpu related */
251 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
252 {
253 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
254 return 0;
255 }
256
257 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
258 {
259 /* Nothing todo */
260 }
261
262 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
263 {
264 save_fp_regs(&vcpu->arch.host_fpregs);
265 save_access_regs(vcpu->arch.host_acrs);
266 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
267 restore_fp_regs(&vcpu->arch.guest_fpregs);
268 restore_access_regs(vcpu->arch.guest_acrs);
269 gmap_enable(vcpu->arch.gmap);
270 }
271
272 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
273 {
274 gmap_disable(vcpu->arch.gmap);
275 save_fp_regs(&vcpu->arch.guest_fpregs);
276 save_access_regs(vcpu->arch.guest_acrs);
277 restore_fp_regs(&vcpu->arch.host_fpregs);
278 restore_access_regs(vcpu->arch.host_acrs);
279 }
280
281 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
282 {
283 /* this equals initial cpu reset in pop, but we don't switch to ESA */
284 vcpu->arch.sie_block->gpsw.mask = 0UL;
285 vcpu->arch.sie_block->gpsw.addr = 0UL;
286 vcpu->arch.sie_block->prefix = 0UL;
287 vcpu->arch.sie_block->ihcpu = 0xffff;
288 vcpu->arch.sie_block->cputm = 0UL;
289 vcpu->arch.sie_block->ckc = 0UL;
290 vcpu->arch.sie_block->todpr = 0;
291 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
292 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
293 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
294 vcpu->arch.guest_fpregs.fpc = 0;
295 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
296 vcpu->arch.sie_block->gbea = 1;
297 }
298
299 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
300 {
301 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM);
302 vcpu->arch.sie_block->ecb = 6;
303 vcpu->arch.sie_block->eca = 0xC1002001U;
304 vcpu->arch.sie_block->fac = (int) (long) facilities;
305 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
306 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
307 (unsigned long) vcpu);
308 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
309 get_cpu_id(&vcpu->arch.cpu_id);
310 vcpu->arch.cpu_id.version = 0xff;
311 return 0;
312 }
313
314 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
315 unsigned int id)
316 {
317 struct kvm_vcpu *vcpu;
318 int rc = -EINVAL;
319
320 if (id >= KVM_MAX_VCPUS)
321 goto out;
322
323 rc = -ENOMEM;
324
325 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
326 if (!vcpu)
327 goto out;
328
329 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
330 get_zeroed_page(GFP_KERNEL);
331
332 if (!vcpu->arch.sie_block)
333 goto out_free_cpu;
334
335 vcpu->arch.sie_block->icpua = id;
336 BUG_ON(!kvm->arch.sca);
337 if (!kvm->arch.sca->cpu[id].sda)
338 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
339 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
340 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
341 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
342
343 spin_lock_init(&vcpu->arch.local_int.lock);
344 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
345 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
346 spin_lock(&kvm->arch.float_int.lock);
347 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
348 init_waitqueue_head(&vcpu->arch.local_int.wq);
349 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
350 spin_unlock(&kvm->arch.float_int.lock);
351
352 rc = kvm_vcpu_init(vcpu, kvm, id);
353 if (rc)
354 goto out_free_sie_block;
355 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
356 vcpu->arch.sie_block);
357
358 return vcpu;
359 out_free_sie_block:
360 free_page((unsigned long)(vcpu->arch.sie_block));
361 out_free_cpu:
362 kfree(vcpu);
363 out:
364 return ERR_PTR(rc);
365 }
366
367 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
368 {
369 /* kvm common code refers to this, but never calls it */
370 BUG();
371 return 0;
372 }
373
374 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
375 {
376 kvm_s390_vcpu_initial_reset(vcpu);
377 return 0;
378 }
379
380 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
381 {
382 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
383 return 0;
384 }
385
386 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
387 {
388 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
389 return 0;
390 }
391
392 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
393 struct kvm_sregs *sregs)
394 {
395 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
396 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
397 return 0;
398 }
399
400 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
401 struct kvm_sregs *sregs)
402 {
403 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
404 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
405 return 0;
406 }
407
408 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
409 {
410 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
411 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
412 return 0;
413 }
414
415 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
416 {
417 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
418 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
419 return 0;
420 }
421
422 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
423 {
424 int rc = 0;
425
426 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
427 rc = -EBUSY;
428 else {
429 vcpu->run->psw_mask = psw.mask;
430 vcpu->run->psw_addr = psw.addr;
431 }
432 return rc;
433 }
434
435 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
436 struct kvm_translation *tr)
437 {
438 return -EINVAL; /* not implemented yet */
439 }
440
441 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
442 struct kvm_guest_debug *dbg)
443 {
444 return -EINVAL; /* not implemented yet */
445 }
446
447 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
448 struct kvm_mp_state *mp_state)
449 {
450 return -EINVAL; /* not implemented yet */
451 }
452
453 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
454 struct kvm_mp_state *mp_state)
455 {
456 return -EINVAL; /* not implemented yet */
457 }
458
459 static void __vcpu_run(struct kvm_vcpu *vcpu)
460 {
461 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
462
463 if (need_resched())
464 schedule();
465
466 if (test_thread_flag(TIF_MCCK_PENDING))
467 s390_handle_mcck();
468
469 kvm_s390_deliver_pending_interrupts(vcpu);
470
471 vcpu->arch.sie_block->icptcode = 0;
472 local_irq_disable();
473 kvm_guest_enter();
474 local_irq_enable();
475 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
476 atomic_read(&vcpu->arch.sie_block->cpuflags));
477 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
478 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
479 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
480 }
481 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
482 vcpu->arch.sie_block->icptcode);
483 local_irq_disable();
484 kvm_guest_exit();
485 local_irq_enable();
486
487 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
488 }
489
490 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
491 {
492 int rc;
493 sigset_t sigsaved;
494
495 rerun_vcpu:
496 if (vcpu->sigset_active)
497 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
498
499 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
500
501 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
502
503 switch (kvm_run->exit_reason) {
504 case KVM_EXIT_S390_SIEIC:
505 case KVM_EXIT_UNKNOWN:
506 case KVM_EXIT_INTR:
507 case KVM_EXIT_S390_RESET:
508 break;
509 default:
510 BUG();
511 }
512
513 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
514 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
515
516 might_fault();
517
518 do {
519 __vcpu_run(vcpu);
520 rc = kvm_handle_sie_intercept(vcpu);
521 } while (!signal_pending(current) && !rc);
522
523 if (rc == SIE_INTERCEPT_RERUNVCPU)
524 goto rerun_vcpu;
525
526 if (signal_pending(current) && !rc) {
527 kvm_run->exit_reason = KVM_EXIT_INTR;
528 rc = -EINTR;
529 }
530
531 if (rc == -EOPNOTSUPP) {
532 /* intercept cannot be handled in-kernel, prepare kvm-run */
533 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
534 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
535 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
536 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
537 rc = 0;
538 }
539
540 if (rc == -EREMOTE) {
541 /* intercept was handled, but userspace support is needed
542 * kvm_run has been prepared by the handler */
543 rc = 0;
544 }
545
546 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
547 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
548
549 if (vcpu->sigset_active)
550 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
551
552 vcpu->stat.exit_userspace++;
553 return rc;
554 }
555
556 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
557 unsigned long n, int prefix)
558 {
559 if (prefix)
560 return copy_to_guest(vcpu, guestdest, from, n);
561 else
562 return copy_to_guest_absolute(vcpu, guestdest, from, n);
563 }
564
565 /*
566 * store status at address
567 * we use have two special cases:
568 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
569 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
570 */
571 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
572 {
573 unsigned char archmode = 1;
574 int prefix;
575
576 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
577 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
578 return -EFAULT;
579 addr = SAVE_AREA_BASE;
580 prefix = 0;
581 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
582 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
583 return -EFAULT;
584 addr = SAVE_AREA_BASE;
585 prefix = 1;
586 } else
587 prefix = 0;
588
589 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
590 vcpu->arch.guest_fpregs.fprs, 128, prefix))
591 return -EFAULT;
592
593 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
594 vcpu->arch.guest_gprs, 128, prefix))
595 return -EFAULT;
596
597 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
598 &vcpu->arch.sie_block->gpsw, 16, prefix))
599 return -EFAULT;
600
601 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
602 &vcpu->arch.sie_block->prefix, 4, prefix))
603 return -EFAULT;
604
605 if (__guestcopy(vcpu,
606 addr + offsetof(struct save_area, fp_ctrl_reg),
607 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
608 return -EFAULT;
609
610 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
611 &vcpu->arch.sie_block->todpr, 4, prefix))
612 return -EFAULT;
613
614 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
615 &vcpu->arch.sie_block->cputm, 8, prefix))
616 return -EFAULT;
617
618 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
619 &vcpu->arch.sie_block->ckc, 8, prefix))
620 return -EFAULT;
621
622 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
623 &vcpu->arch.guest_acrs, 64, prefix))
624 return -EFAULT;
625
626 if (__guestcopy(vcpu,
627 addr + offsetof(struct save_area, ctrl_regs),
628 &vcpu->arch.sie_block->gcr, 128, prefix))
629 return -EFAULT;
630 return 0;
631 }
632
633 long kvm_arch_vcpu_ioctl(struct file *filp,
634 unsigned int ioctl, unsigned long arg)
635 {
636 struct kvm_vcpu *vcpu = filp->private_data;
637 void __user *argp = (void __user *)arg;
638 long r;
639
640 switch (ioctl) {
641 case KVM_S390_INTERRUPT: {
642 struct kvm_s390_interrupt s390int;
643
644 r = -EFAULT;
645 if (copy_from_user(&s390int, argp, sizeof(s390int)))
646 break;
647 r = kvm_s390_inject_vcpu(vcpu, &s390int);
648 break;
649 }
650 case KVM_S390_STORE_STATUS:
651 r = kvm_s390_vcpu_store_status(vcpu, arg);
652 break;
653 case KVM_S390_SET_INITIAL_PSW: {
654 psw_t psw;
655
656 r = -EFAULT;
657 if (copy_from_user(&psw, argp, sizeof(psw)))
658 break;
659 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
660 break;
661 }
662 case KVM_S390_INITIAL_RESET:
663 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
664 break;
665 default:
666 r = -EINVAL;
667 }
668 return r;
669 }
670
671 /* Section: memory related */
672 int kvm_arch_prepare_memory_region(struct kvm *kvm,
673 struct kvm_memory_slot *memslot,
674 struct kvm_memory_slot old,
675 struct kvm_userspace_memory_region *mem,
676 int user_alloc)
677 {
678 /* A few sanity checks. We can have exactly one memory slot which has
679 to start at guest virtual zero and which has to be located at a
680 page boundary in userland and which has to end at a page boundary.
681 The memory in userland is ok to be fragmented into various different
682 vmas. It is okay to mmap() and munmap() stuff in this slot after
683 doing this call at any time */
684
685 if (mem->slot)
686 return -EINVAL;
687
688 if (mem->guest_phys_addr)
689 return -EINVAL;
690
691 if (mem->userspace_addr & 0xffffful)
692 return -EINVAL;
693
694 if (mem->memory_size & 0xffffful)
695 return -EINVAL;
696
697 if (!user_alloc)
698 return -EINVAL;
699
700 return 0;
701 }
702
703 void kvm_arch_commit_memory_region(struct kvm *kvm,
704 struct kvm_userspace_memory_region *mem,
705 struct kvm_memory_slot old,
706 int user_alloc)
707 {
708 int rc;
709
710
711 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
712 mem->guest_phys_addr, mem->memory_size);
713 if (rc)
714 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
715 return;
716 }
717
718 void kvm_arch_flush_shadow(struct kvm *kvm)
719 {
720 }
721
722 static int __init kvm_s390_init(void)
723 {
724 int ret;
725 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
726 if (ret)
727 return ret;
728
729 /*
730 * guests can ask for up to 255+1 double words, we need a full page
731 * to hold the maximum amount of facilities. On the other hand, we
732 * only set facilities that are known to work in KVM.
733 */
734 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
735 if (!facilities) {
736 kvm_exit();
737 return -ENOMEM;
738 }
739 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
740 facilities[0] &= 0xff00fff3f47c0000ULL;
741 facilities[1] &= 0x201c000000000000ULL;
742 return 0;
743 }
744
745 static void __exit kvm_s390_exit(void)
746 {
747 free_page((unsigned long) facilities);
748 kvm_exit();
749 }
750
751 module_init(kvm_s390_init);
752 module_exit(kvm_s390_exit);
This page took 0.082169 seconds and 5 git commands to generate.