KVM: PPC: Remove PPC970 from KVM_BOOK3S_64_HV text in Kconfig
[deliverable/linux.git] / arch / powerpc / kvm / powerpc.c
CommitLineData
bbf45ba5
HB
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19 */
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/kvm_host.h>
bbf45ba5 24#include <linux/vmalloc.h>
544c6761 25#include <linux/hrtimer.h>
bbf45ba5 26#include <linux/fs.h>
5a0e3ad6 27#include <linux/slab.h>
eb1e4f43 28#include <linux/file.h>
cbbc58d4 29#include <linux/module.h>
bbf45ba5
HB
30#include <asm/cputable.h>
31#include <asm/uaccess.h>
32#include <asm/kvm_ppc.h>
83aae4a8 33#include <asm/tlbflush.h>
371fefd6 34#include <asm/cputhreads.h>
bd2be683 35#include <asm/irqflags.h>
73e75b41 36#include "timing.h"
5efdb4be 37#include "irq.h"
fad7b9b5 38#include "../mm/mmu_decl.h"
bbf45ba5 39
46f43c6e
MT
40#define CREATE_TRACE_POINTS
41#include "trace.h"
42
cbbc58d4
AK
43struct kvmppc_ops *kvmppc_hv_ops;
44EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
45struct kvmppc_ops *kvmppc_pr_ops;
46EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
47
3a167bea 48
bbf45ba5
HB
49int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
50{
9202e076 51 return !!(v->arch.pending_exceptions) ||
dfd4d47e 52 v->requests;
bbf45ba5
HB
53}
54
b6d33834
CD
55int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
56{
57 return 1;
58}
59
03d25c5b
AG
60/*
61 * Common checks before entering the guest world. Call with interrupts
62 * disabled.
63 *
7ee78855
AG
64 * returns:
65 *
66 * == 1 if we're ready to go into guest state
67 * <= 0 if we need to go back to the host with return value
03d25c5b
AG
68 */
69int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
70{
6c85f52b
SW
71 int r;
72
73 WARN_ON(irqs_disabled());
74 hard_irq_disable();
03d25c5b 75
03d25c5b
AG
76 while (true) {
77 if (need_resched()) {
78 local_irq_enable();
79 cond_resched();
6c85f52b 80 hard_irq_disable();
03d25c5b
AG
81 continue;
82 }
83
84 if (signal_pending(current)) {
7ee78855
AG
85 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
86 vcpu->run->exit_reason = KVM_EXIT_INTR;
87 r = -EINTR;
03d25c5b
AG
88 break;
89 }
90
5bd1cf11
SW
91 vcpu->mode = IN_GUEST_MODE;
92
93 /*
94 * Reading vcpu->requests must happen after setting vcpu->mode,
95 * so we don't miss a request because the requester sees
96 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
97 * before next entering the guest (and thus doesn't IPI).
98 */
03d25c5b 99 smp_mb();
5bd1cf11 100
03d25c5b
AG
101 if (vcpu->requests) {
102 /* Make sure we process requests preemptable */
103 local_irq_enable();
104 trace_kvm_check_requests(vcpu);
7c973a2e 105 r = kvmppc_core_check_requests(vcpu);
6c85f52b 106 hard_irq_disable();
7c973a2e
AG
107 if (r > 0)
108 continue;
109 break;
03d25c5b
AG
110 }
111
112 if (kvmppc_core_prepare_to_enter(vcpu)) {
113 /* interrupts got enabled in between, so we
114 are back at square 1 */
115 continue;
116 }
117
ccf73aaf 118 __kvm_guest_enter();
6c85f52b 119 return 1;
03d25c5b
AG
120 }
121
6c85f52b
SW
122 /* return to host */
123 local_irq_enable();
03d25c5b
AG
124 return r;
125}
2ba9f0d8 126EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
03d25c5b 127
5deb8e7a
AG
128#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
129static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
130{
131 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
132 int i;
133
134 shared->sprg0 = swab64(shared->sprg0);
135 shared->sprg1 = swab64(shared->sprg1);
136 shared->sprg2 = swab64(shared->sprg2);
137 shared->sprg3 = swab64(shared->sprg3);
138 shared->srr0 = swab64(shared->srr0);
139 shared->srr1 = swab64(shared->srr1);
140 shared->dar = swab64(shared->dar);
141 shared->msr = swab64(shared->msr);
142 shared->dsisr = swab32(shared->dsisr);
143 shared->int_pending = swab32(shared->int_pending);
144 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
145 shared->sr[i] = swab32(shared->sr[i]);
146}
147#endif
148
2a342ed5
AG
149int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
150{
151 int nr = kvmppc_get_gpr(vcpu, 11);
152 int r;
153 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
154 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
155 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
156 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
157 unsigned long r2 = 0;
158
5deb8e7a 159 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
2a342ed5
AG
160 /* 32 bit mode */
161 param1 &= 0xffffffff;
162 param2 &= 0xffffffff;
163 param3 &= 0xffffffff;
164 param4 &= 0xffffffff;
165 }
166
167 switch (nr) {
fdcf8bd7 168 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
5fc87407 169 {
5deb8e7a
AG
170#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
171 /* Book3S can be little endian, find it out here */
172 int shared_big_endian = true;
173 if (vcpu->arch.intr_msr & MSR_LE)
174 shared_big_endian = false;
175 if (shared_big_endian != vcpu->arch.shared_big_endian)
176 kvmppc_swab_shared(vcpu);
177 vcpu->arch.shared_big_endian = shared_big_endian;
178#endif
179
f3383cf8
AG
180 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
181 /*
182 * Older versions of the Linux magic page code had
183 * a bug where they would map their trampoline code
184 * NX. If that's the case, remove !PR NX capability.
185 */
186 vcpu->arch.disable_kernel_nx = true;
187 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
188 }
189
190 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
191 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
5fc87407 192
89b68c96
AG
193#ifdef CONFIG_PPC_64K_PAGES
194 /*
195 * Make sure our 4k magic page is in the same window of a 64k
196 * page within the guest and within the host's page.
197 */
198 if ((vcpu->arch.magic_page_pa & 0xf000) !=
199 ((ulong)vcpu->arch.shared & 0xf000)) {
200 void *old_shared = vcpu->arch.shared;
201 ulong shared = (ulong)vcpu->arch.shared;
202 void *new_shared;
203
204 shared &= PAGE_MASK;
205 shared |= vcpu->arch.magic_page_pa & 0xf000;
206 new_shared = (void*)shared;
207 memcpy(new_shared, old_shared, 0x1000);
208 vcpu->arch.shared = new_shared;
209 }
210#endif
211
b5904972 212 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
7508e16c 213
fdcf8bd7 214 r = EV_SUCCESS;
5fc87407
AG
215 break;
216 }
fdcf8bd7
SY
217 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
218 r = EV_SUCCESS;
bf7ca4bd 219#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
5fc87407
AG
220 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
221#endif
2a342ed5
AG
222
223 /* Second return value is in r4 */
2a342ed5 224 break;
9202e076
LYB
225 case EV_HCALL_TOKEN(EV_IDLE):
226 r = EV_SUCCESS;
227 kvm_vcpu_block(vcpu);
228 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
229 break;
2a342ed5 230 default:
fdcf8bd7 231 r = EV_UNIMPLEMENTED;
2a342ed5
AG
232 break;
233 }
234
7508e16c
AG
235 kvmppc_set_gpr(vcpu, 4, r2);
236
2a342ed5
AG
237 return r;
238}
2ba9f0d8 239EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
bbf45ba5 240
af8f38b3
AG
241int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
242{
243 int r = false;
244
245 /* We have to know what CPU to virtualize */
246 if (!vcpu->arch.pvr)
247 goto out;
248
249 /* PAPR only works with book3s_64 */
250 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
251 goto out;
252
af8f38b3 253 /* HV KVM can only do PAPR mode for now */
a78b55d1 254 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
af8f38b3 255 goto out;
af8f38b3 256
d30f6e48
SW
257#ifdef CONFIG_KVM_BOOKE_HV
258 if (!cpu_has_feature(CPU_FTR_EMB_HV))
259 goto out;
260#endif
261
af8f38b3
AG
262 r = true;
263
264out:
265 vcpu->arch.sane = r;
266 return r ? 0 : -EINVAL;
267}
2ba9f0d8 268EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
af8f38b3 269
bbf45ba5
HB
270int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
271{
272 enum emulation_result er;
273 int r;
274
d69614a2 275 er = kvmppc_emulate_loadstore(vcpu);
bbf45ba5
HB
276 switch (er) {
277 case EMULATE_DONE:
278 /* Future optimization: only reload non-volatiles if they were
279 * actually modified. */
280 r = RESUME_GUEST_NV;
281 break;
51f04726
MC
282 case EMULATE_AGAIN:
283 r = RESUME_GUEST;
284 break;
bbf45ba5
HB
285 case EMULATE_DO_MMIO:
286 run->exit_reason = KVM_EXIT_MMIO;
287 /* We must reload nonvolatiles because "update" load/store
288 * instructions modify register state. */
289 /* Future optimization: only reload non-volatiles if they were
290 * actually modified. */
291 r = RESUME_HOST_NV;
292 break;
293 case EMULATE_FAIL:
51f04726
MC
294 {
295 u32 last_inst;
296
8d0eff63 297 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
bbf45ba5 298 /* XXX Deliver Program interrupt to guest. */
51f04726 299 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
bbf45ba5
HB
300 r = RESUME_HOST;
301 break;
51f04726 302 }
bbf45ba5 303 default:
5a33169e
AG
304 WARN_ON(1);
305 r = RESUME_GUEST;
bbf45ba5
HB
306 }
307
308 return r;
309}
2ba9f0d8 310EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
bbf45ba5 311
35c4a733
AG
312int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
313 bool data)
314{
c12fb43c 315 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
35c4a733
AG
316 struct kvmppc_pte pte;
317 int r;
318
319 vcpu->stat.st++;
320
321 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
322 XLATE_WRITE, &pte);
323 if (r < 0)
324 return r;
325
326 *eaddr = pte.raddr;
327
328 if (!pte.may_write)
329 return -EPERM;
330
c12fb43c
AG
331 /* Magic page override */
332 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
333 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
334 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
335 void *magic = vcpu->arch.shared;
336 magic += pte.eaddr & 0xfff;
337 memcpy(magic, ptr, size);
338 return EMULATE_DONE;
339 }
340
35c4a733
AG
341 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
342 return EMULATE_DO_MMIO;
343
344 return EMULATE_DONE;
345}
346EXPORT_SYMBOL_GPL(kvmppc_st);
347
348int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
349 bool data)
350{
c12fb43c 351 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
35c4a733 352 struct kvmppc_pte pte;
35c4a733
AG
353 int rc;
354
355 vcpu->stat.ld++;
356
357 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
358 XLATE_READ, &pte);
359 if (rc)
360 return rc;
361
362 *eaddr = pte.raddr;
363
364 if (!pte.may_read)
365 return -EPERM;
366
367 if (!data && !pte.may_execute)
368 return -ENOEXEC;
369
c12fb43c
AG
370 /* Magic page override */
371 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
372 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
373 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
374 void *magic = vcpu->arch.shared;
375 magic += pte.eaddr & 0xfff;
376 memcpy(ptr, magic, size);
377 return EMULATE_DONE;
378 }
379
c45c5514
AG
380 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
381 return EMULATE_DO_MMIO;
35c4a733
AG
382
383 return EMULATE_DONE;
35c4a733
AG
384}
385EXPORT_SYMBOL_GPL(kvmppc_ld);
386
13a34e06 387int kvm_arch_hardware_enable(void)
bbf45ba5 388{
10474ae8 389 return 0;
bbf45ba5
HB
390}
391
bbf45ba5
HB
392int kvm_arch_hardware_setup(void)
393{
394 return 0;
395}
396
bbf45ba5
HB
397void kvm_arch_check_processor_compat(void *rtn)
398{
9dd921cf 399 *(int *)rtn = kvmppc_core_check_processor_compat();
bbf45ba5
HB
400}
401
e08b9637 402int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
bbf45ba5 403{
cbbc58d4
AK
404 struct kvmppc_ops *kvm_ops = NULL;
405 /*
406 * if we have both HV and PR enabled, default is HV
407 */
408 if (type == 0) {
409 if (kvmppc_hv_ops)
410 kvm_ops = kvmppc_hv_ops;
411 else
412 kvm_ops = kvmppc_pr_ops;
413 if (!kvm_ops)
414 goto err_out;
415 } else if (type == KVM_VM_PPC_HV) {
416 if (!kvmppc_hv_ops)
417 goto err_out;
418 kvm_ops = kvmppc_hv_ops;
419 } else if (type == KVM_VM_PPC_PR) {
420 if (!kvmppc_pr_ops)
421 goto err_out;
422 kvm_ops = kvmppc_pr_ops;
423 } else
424 goto err_out;
425
426 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
427 return -ENOENT;
428
429 kvm->arch.kvm_ops = kvm_ops;
f9e0554d 430 return kvmppc_core_init_vm(kvm);
cbbc58d4
AK
431err_out:
432 return -EINVAL;
bbf45ba5
HB
433}
434
d89f5eff 435void kvm_arch_destroy_vm(struct kvm *kvm)
bbf45ba5
HB
436{
437 unsigned int i;
988a2cae 438 struct kvm_vcpu *vcpu;
bbf45ba5 439
988a2cae
GN
440 kvm_for_each_vcpu(i, vcpu, kvm)
441 kvm_arch_vcpu_free(vcpu);
442
443 mutex_lock(&kvm->lock);
444 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
445 kvm->vcpus[i] = NULL;
446
447 atomic_set(&kvm->online_vcpus, 0);
f9e0554d
PM
448
449 kvmppc_core_destroy_vm(kvm);
450
988a2cae 451 mutex_unlock(&kvm->lock);
cbbc58d4
AK
452
453 /* drop the module reference */
454 module_put(kvm->arch.kvm_ops->owner);
bbf45ba5
HB
455}
456
784aa3d7 457int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
bbf45ba5
HB
458{
459 int r;
7a58777a 460 /* Assume we're using HV mode when the HV module is loaded */
cbbc58d4 461 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
bbf45ba5 462
7a58777a
AG
463 if (kvm) {
464 /*
465 * Hooray - we know which VM type we're running on. Depend on
466 * that rather than the guess above.
467 */
468 hv_enabled = is_kvmppc_hv_enabled(kvm);
469 }
470
bbf45ba5 471 switch (ext) {
5ce941ee
SW
472#ifdef CONFIG_BOOKE
473 case KVM_CAP_PPC_BOOKE_SREGS:
f61c94bb 474 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1c810636 475 case KVM_CAP_PPC_EPR:
5ce941ee 476#else
e15a1137 477 case KVM_CAP_PPC_SEGSTATE:
1022fc3d 478 case KVM_CAP_PPC_HIOR:
930b412a 479 case KVM_CAP_PPC_PAPR:
5ce941ee 480#endif
18978768 481 case KVM_CAP_PPC_UNSET_IRQ:
7b4203e8 482 case KVM_CAP_PPC_IRQ_LEVEL:
71fbfd5f 483 case KVM_CAP_ENABLE_CAP:
699a0ea0 484 case KVM_CAP_ENABLE_CAP_VM:
e24ed81f 485 case KVM_CAP_ONE_REG:
0e673fb6 486 case KVM_CAP_IOEVENTFD:
5df554ad 487 case KVM_CAP_DEVICE_CTRL:
de56a948
PM
488 r = 1;
489 break;
de56a948 490 case KVM_CAP_PPC_PAIRED_SINGLES:
ad0a048b 491 case KVM_CAP_PPC_OSI:
15711e9c 492 case KVM_CAP_PPC_GET_PVINFO:
bf7ca4bd 493#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
dc83b8bc 494 case KVM_CAP_SW_TLB:
eb1e4f43 495#endif
699cc876 496 /* We support this only for PR */
cbbc58d4 497 r = !hv_enabled;
e15a1137 498 break;
699cc876 499#ifdef CONFIG_KVM_MMIO
588968b6
LV
500 case KVM_CAP_COALESCED_MMIO:
501 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
502 break;
54738c09 503#endif
699cc876
AK
504#ifdef CONFIG_KVM_MPIC
505 case KVM_CAP_IRQ_MPIC:
506 r = 1;
507 break;
508#endif
509
f31e65e1 510#ifdef CONFIG_PPC_BOOK3S_64
54738c09 511 case KVM_CAP_SPAPR_TCE:
32fad281 512 case KVM_CAP_PPC_ALLOC_HTAB:
8e591cb7 513 case KVM_CAP_PPC_RTAS:
f2e91042 514 case KVM_CAP_PPC_FIXUP_HCALL:
699a0ea0 515 case KVM_CAP_PPC_ENABLE_HCALL:
5975a2e0
PM
516#ifdef CONFIG_KVM_XICS
517 case KVM_CAP_IRQ_XICS:
518#endif
54738c09
DG
519 r = 1;
520 break;
f31e65e1 521#endif /* CONFIG_PPC_BOOK3S_64 */
699cc876 522#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
371fefd6 523 case KVM_CAP_PPC_SMT:
cbbc58d4 524 if (hv_enabled)
3102f784 525 r = threads_per_subcore;
699cc876
AK
526 else
527 r = 0;
371fefd6 528 break;
aa04b4cc 529 case KVM_CAP_PPC_RMA:
c17b98cf 530 r = 0;
aa04b4cc 531 break;
e928e9cb
ME
532 case KVM_CAP_PPC_HWRNG:
533 r = kvmppc_hwrng_present();
534 break;
f4800b1f 535#endif
342d3db7 536 case KVM_CAP_SYNC_MMU:
699cc876 537#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
c17b98cf 538 r = hv_enabled;
f4800b1f
AG
539#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
540 r = 1;
541#else
542 r = 0;
a2932923 543#endif
699cc876
AK
544 break;
545#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
a2932923 546 case KVM_CAP_PPC_HTAB_FD:
cbbc58d4 547 r = hv_enabled;
a2932923 548 break;
de56a948 549#endif
b5434032
ME
550 case KVM_CAP_NR_VCPUS:
551 /*
552 * Recommending a number of CPUs is somewhat arbitrary; we
553 * return the number of present CPUs for -HV (since a host
554 * will have secondary threads "offline"), and for other KVM
555 * implementations just count online CPUs.
556 */
cbbc58d4 557 if (hv_enabled)
699cc876
AK
558 r = num_present_cpus();
559 else
560 r = num_online_cpus();
b5434032
ME
561 break;
562 case KVM_CAP_MAX_VCPUS:
563 r = KVM_MAX_VCPUS;
564 break;
5b74716e
BH
565#ifdef CONFIG_PPC_BOOK3S_64
566 case KVM_CAP_PPC_GET_SMMU_INFO:
567 r = 1;
568 break;
569#endif
bbf45ba5
HB
570 default:
571 r = 0;
572 break;
573 }
574 return r;
575
576}
577
578long kvm_arch_dev_ioctl(struct file *filp,
579 unsigned int ioctl, unsigned long arg)
580{
581 return -EINVAL;
582}
583
5587027c 584void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
db3fe4eb
TY
585 struct kvm_memory_slot *dont)
586{
5587027c 587 kvmppc_core_free_memslot(kvm, free, dont);
db3fe4eb
TY
588}
589
5587027c
AK
590int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
591 unsigned long npages)
db3fe4eb 592{
5587027c 593 return kvmppc_core_create_memslot(kvm, slot, npages);
db3fe4eb
TY
594}
595
f7784b8e 596int kvm_arch_prepare_memory_region(struct kvm *kvm,
462fce46 597 struct kvm_memory_slot *memslot,
09170a49 598 const struct kvm_userspace_memory_region *mem,
7b6195a9 599 enum kvm_mr_change change)
bbf45ba5 600{
a66b48c3 601 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
bbf45ba5
HB
602}
603
f7784b8e 604void kvm_arch_commit_memory_region(struct kvm *kvm,
09170a49 605 const struct kvm_userspace_memory_region *mem,
8482644a 606 const struct kvm_memory_slot *old,
f36f3f28 607 const struct kvm_memory_slot *new,
8482644a 608 enum kvm_mr_change change)
f7784b8e 609{
f36f3f28 610 kvmppc_core_commit_memory_region(kvm, mem, old, new);
f7784b8e
MT
611}
612
2df72e9b
MT
613void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
614 struct kvm_memory_slot *slot)
34d4cb8f 615{
dfe49dbd 616 kvmppc_core_flush_memslot(kvm, slot);
34d4cb8f
MT
617}
618
bbf45ba5
HB
619struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
620{
73e75b41
HB
621 struct kvm_vcpu *vcpu;
622 vcpu = kvmppc_core_vcpu_create(kvm, id);
03cdab53
ME
623 if (!IS_ERR(vcpu)) {
624 vcpu->arch.wqp = &vcpu->wq;
06056bfb 625 kvmppc_create_vcpu_debugfs(vcpu, id);
03cdab53 626 }
73e75b41 627 return vcpu;
bbf45ba5
HB
628}
629
31928aa5 630void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 631{
42897d86
MT
632}
633
bbf45ba5
HB
634void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
635{
a595405d
AG
636 /* Make sure we're not using the vcpu anymore */
637 hrtimer_cancel(&vcpu->arch.dec_timer);
a595405d 638
73e75b41 639 kvmppc_remove_vcpu_debugfs(vcpu);
eb1e4f43
SW
640
641 switch (vcpu->arch.irq_type) {
642 case KVMPPC_IRQ_MPIC:
643 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
644 break;
bc5ad3f3
BH
645 case KVMPPC_IRQ_XICS:
646 kvmppc_xics_free_icp(vcpu);
647 break;
eb1e4f43
SW
648 }
649
db93f574 650 kvmppc_core_vcpu_free(vcpu);
bbf45ba5
HB
651}
652
653void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
654{
655 kvm_arch_vcpu_free(vcpu);
656}
657
658int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
659{
9dd921cf 660 return kvmppc_core_pending_dec(vcpu);
bbf45ba5
HB
661}
662
544c6761
AG
663enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
664{
665 struct kvm_vcpu *vcpu;
666
667 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
d02d4d15 668 kvmppc_decrementer_func(vcpu);
544c6761
AG
669
670 return HRTIMER_NORESTART;
671}
672
bbf45ba5
HB
673int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
674{
f61c94bb
BB
675 int ret;
676
544c6761 677 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
544c6761 678 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
de56a948 679 vcpu->arch.dec_expires = ~(u64)0;
bbf45ba5 680
09000adb
BB
681#ifdef CONFIG_KVM_EXIT_TIMING
682 mutex_init(&vcpu->arch.exit_timing_lock);
683#endif
f61c94bb
BB
684 ret = kvmppc_subarch_vcpu_init(vcpu);
685 return ret;
bbf45ba5
HB
686}
687
688void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
689{
ecc0981f 690 kvmppc_mmu_destroy(vcpu);
f61c94bb 691 kvmppc_subarch_vcpu_uninit(vcpu);
bbf45ba5
HB
692}
693
694void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
695{
eab17672
SW
696#ifdef CONFIG_BOOKE
697 /*
698 * vrsave (formerly usprg0) isn't used by Linux, but may
699 * be used by the guest.
700 *
701 * On non-booke this is associated with Altivec and
702 * is handled by code in book3s.c.
703 */
704 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
705#endif
9dd921cf 706 kvmppc_core_vcpu_load(vcpu, cpu);
bbf45ba5
HB
707}
708
709void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
710{
9dd921cf 711 kvmppc_core_vcpu_put(vcpu);
eab17672
SW
712#ifdef CONFIG_BOOKE
713 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
714#endif
bbf45ba5
HB
715}
716
bbf45ba5
HB
717static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
718 struct kvm_run *run)
719{
69b61833 720 u64 uninitialized_var(gpr);
bbf45ba5 721
8e5b26b5 722 if (run->mmio.len > sizeof(gpr)) {
bbf45ba5
HB
723 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
724 return;
725 }
726
d078eed3 727 if (!vcpu->arch.mmio_host_swabbed) {
bbf45ba5 728 switch (run->mmio.len) {
b104d066 729 case 8: gpr = *(u64 *)run->mmio.data; break;
8e5b26b5
AG
730 case 4: gpr = *(u32 *)run->mmio.data; break;
731 case 2: gpr = *(u16 *)run->mmio.data; break;
732 case 1: gpr = *(u8 *)run->mmio.data; break;
bbf45ba5
HB
733 }
734 } else {
bbf45ba5 735 switch (run->mmio.len) {
d078eed3
DG
736 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
737 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
738 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
8e5b26b5 739 case 1: gpr = *(u8 *)run->mmio.data; break;
bbf45ba5
HB
740 }
741 }
8e5b26b5 742
3587d534
AG
743 if (vcpu->arch.mmio_sign_extend) {
744 switch (run->mmio.len) {
745#ifdef CONFIG_PPC64
746 case 4:
747 gpr = (s64)(s32)gpr;
748 break;
749#endif
750 case 2:
751 gpr = (s64)(s16)gpr;
752 break;
753 case 1:
754 gpr = (s64)(s8)gpr;
755 break;
756 }
757 }
758
8e5b26b5 759 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
b104d066 760
b3c5d3c2
AG
761 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
762 case KVM_MMIO_REG_GPR:
b104d066
AG
763 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
764 break;
b3c5d3c2 765 case KVM_MMIO_REG_FPR:
efff1912 766 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
b104d066 767 break;
287d5611 768#ifdef CONFIG_PPC_BOOK3S
b3c5d3c2
AG
769 case KVM_MMIO_REG_QPR:
770 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
b104d066 771 break;
b3c5d3c2 772 case KVM_MMIO_REG_FQPR:
efff1912 773 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
b3c5d3c2 774 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
b104d066 775 break;
287d5611 776#endif
b104d066
AG
777 default:
778 BUG();
779 }
bbf45ba5
HB
780}
781
782int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
73601775
CLG
783 unsigned int rt, unsigned int bytes,
784 int is_default_endian)
bbf45ba5 785{
ed840ee9 786 int idx, ret;
d078eed3 787 bool host_swabbed;
73601775 788
d078eed3 789 /* Pity C doesn't have a logical XOR operator */
73601775 790 if (kvmppc_need_byteswap(vcpu)) {
d078eed3 791 host_swabbed = is_default_endian;
73601775 792 } else {
d078eed3 793 host_swabbed = !is_default_endian;
73601775 794 }
ed840ee9 795
bbf45ba5
HB
796 if (bytes > sizeof(run->mmio.data)) {
797 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
798 run->mmio.len);
799 }
800
801 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
802 run->mmio.len = bytes;
803 run->mmio.is_write = 0;
804
805 vcpu->arch.io_gpr = rt;
d078eed3 806 vcpu->arch.mmio_host_swabbed = host_swabbed;
bbf45ba5
HB
807 vcpu->mmio_needed = 1;
808 vcpu->mmio_is_write = 0;
3587d534 809 vcpu->arch.mmio_sign_extend = 0;
bbf45ba5 810
ed840ee9
SW
811 idx = srcu_read_lock(&vcpu->kvm->srcu);
812
e32edf4f 813 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
ed840ee9
SW
814 bytes, &run->mmio.data);
815
816 srcu_read_unlock(&vcpu->kvm->srcu, idx);
817
818 if (!ret) {
0e673fb6
AG
819 kvmppc_complete_mmio_load(vcpu, run);
820 vcpu->mmio_needed = 0;
821 return EMULATE_DONE;
822 }
823
bbf45ba5
HB
824 return EMULATE_DO_MMIO;
825}
2ba9f0d8 826EXPORT_SYMBOL_GPL(kvmppc_handle_load);
bbf45ba5 827
3587d534
AG
828/* Same as above, but sign extends */
829int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
73601775
CLG
830 unsigned int rt, unsigned int bytes,
831 int is_default_endian)
3587d534
AG
832{
833 int r;
834
3587d534 835 vcpu->arch.mmio_sign_extend = 1;
73601775 836 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian);
3587d534
AG
837
838 return r;
839}
840
bbf45ba5 841int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
73601775 842 u64 val, unsigned int bytes, int is_default_endian)
bbf45ba5
HB
843{
844 void *data = run->mmio.data;
ed840ee9 845 int idx, ret;
d078eed3 846 bool host_swabbed;
73601775 847
d078eed3 848 /* Pity C doesn't have a logical XOR operator */
73601775 849 if (kvmppc_need_byteswap(vcpu)) {
d078eed3 850 host_swabbed = is_default_endian;
73601775 851 } else {
d078eed3 852 host_swabbed = !is_default_endian;
73601775 853 }
bbf45ba5
HB
854
855 if (bytes > sizeof(run->mmio.data)) {
856 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
857 run->mmio.len);
858 }
859
860 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
861 run->mmio.len = bytes;
862 run->mmio.is_write = 1;
863 vcpu->mmio_needed = 1;
864 vcpu->mmio_is_write = 1;
865
866 /* Store the value at the lowest bytes in 'data'. */
d078eed3 867 if (!host_swabbed) {
bbf45ba5 868 switch (bytes) {
b104d066 869 case 8: *(u64 *)data = val; break;
bbf45ba5
HB
870 case 4: *(u32 *)data = val; break;
871 case 2: *(u16 *)data = val; break;
872 case 1: *(u8 *)data = val; break;
873 }
874 } else {
bbf45ba5 875 switch (bytes) {
d078eed3
DG
876 case 8: *(u64 *)data = swab64(val); break;
877 case 4: *(u32 *)data = swab32(val); break;
878 case 2: *(u16 *)data = swab16(val); break;
879 case 1: *(u8 *)data = val; break;
bbf45ba5
HB
880 }
881 }
882
ed840ee9
SW
883 idx = srcu_read_lock(&vcpu->kvm->srcu);
884
e32edf4f 885 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
ed840ee9
SW
886 bytes, &run->mmio.data);
887
888 srcu_read_unlock(&vcpu->kvm->srcu, idx);
889
890 if (!ret) {
0e673fb6
AG
891 vcpu->mmio_needed = 0;
892 return EMULATE_DONE;
893 }
894
bbf45ba5
HB
895 return EMULATE_DO_MMIO;
896}
2ba9f0d8 897EXPORT_SYMBOL_GPL(kvmppc_handle_store);
bbf45ba5 898
8a41ea53
MC
899int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
900{
901 int r = 0;
902 union kvmppc_one_reg val;
903 int size;
904
905 size = one_reg_size(reg->id);
906 if (size > sizeof(val))
907 return -EINVAL;
908
909 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
910 if (r == -EINVAL) {
911 r = 0;
912 switch (reg->id) {
3840edc8
MC
913#ifdef CONFIG_ALTIVEC
914 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
915 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
916 r = -ENXIO;
917 break;
918 }
919 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
920 break;
921 case KVM_REG_PPC_VSCR:
922 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
923 r = -ENXIO;
924 break;
925 }
926 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
927 break;
928 case KVM_REG_PPC_VRSAVE:
929 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
930 r = -ENXIO;
931 break;
932 }
933 vcpu->arch.vrsave = set_reg_val(reg->id, val);
934 break;
935#endif /* CONFIG_ALTIVEC */
8a41ea53
MC
936 default:
937 r = -EINVAL;
938 break;
939 }
940 }
941
942 if (r)
943 return r;
944
945 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
946 r = -EFAULT;
947
948 return r;
949}
950
951int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
952{
953 int r;
954 union kvmppc_one_reg val;
955 int size;
956
957 size = one_reg_size(reg->id);
958 if (size > sizeof(val))
959 return -EINVAL;
960
961 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
962 return -EFAULT;
963
964 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
965 if (r == -EINVAL) {
966 r = 0;
967 switch (reg->id) {
3840edc8
MC
968#ifdef CONFIG_ALTIVEC
969 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
970 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
971 r = -ENXIO;
972 break;
973 }
974 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
975 break;
976 case KVM_REG_PPC_VSCR:
977 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
978 r = -ENXIO;
979 break;
980 }
981 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
982 break;
983 case KVM_REG_PPC_VRSAVE:
984 val = get_reg_val(reg->id, vcpu->arch.vrsave);
985 break;
986#endif /* CONFIG_ALTIVEC */
8a41ea53
MC
987 default:
988 r = -EINVAL;
989 break;
990 }
991 }
992
993 return r;
994}
995
bbf45ba5
HB
996int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
997{
998 int r;
999 sigset_t sigsaved;
1000
1001 if (vcpu->sigset_active)
1002 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1003
1004 if (vcpu->mmio_needed) {
1005 if (!vcpu->mmio_is_write)
1006 kvmppc_complete_mmio_load(vcpu, run);
1007 vcpu->mmio_needed = 0;
ad0a048b
AG
1008 } else if (vcpu->arch.osi_needed) {
1009 u64 *gprs = run->osi.gprs;
1010 int i;
1011
1012 for (i = 0; i < 32; i++)
1013 kvmppc_set_gpr(vcpu, i, gprs[i]);
1014 vcpu->arch.osi_needed = 0;
de56a948
PM
1015 } else if (vcpu->arch.hcall_needed) {
1016 int i;
1017
1018 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1019 for (i = 0; i < 9; ++i)
1020 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1021 vcpu->arch.hcall_needed = 0;
1c810636
AG
1022#ifdef CONFIG_BOOKE
1023 } else if (vcpu->arch.epr_needed) {
1024 kvmppc_set_epr(vcpu, run->epr.epr);
1025 vcpu->arch.epr_needed = 0;
1026#endif
bbf45ba5
HB
1027 }
1028
df6909e5 1029 r = kvmppc_vcpu_run(run, vcpu);
bbf45ba5
HB
1030
1031 if (vcpu->sigset_active)
1032 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1033
1034 return r;
1035}
1036
1037int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1038{
19ccb76a 1039 if (irq->irq == KVM_INTERRUPT_UNSET) {
4fe27d2a 1040 kvmppc_core_dequeue_external(vcpu);
19ccb76a
PM
1041 return 0;
1042 }
1043
1044 kvmppc_core_queue_external(vcpu, irq);
b6d33834 1045
dfd4d47e 1046 kvm_vcpu_kick(vcpu);
45c5eb67 1047
bbf45ba5
HB
1048 return 0;
1049}
1050
71fbfd5f
AG
1051static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1052 struct kvm_enable_cap *cap)
1053{
1054 int r;
1055
1056 if (cap->flags)
1057 return -EINVAL;
1058
1059 switch (cap->cap) {
ad0a048b
AG
1060 case KVM_CAP_PPC_OSI:
1061 r = 0;
1062 vcpu->arch.osi_enabled = true;
1063 break;
930b412a
AG
1064 case KVM_CAP_PPC_PAPR:
1065 r = 0;
1066 vcpu->arch.papr_enabled = true;
1067 break;
1c810636
AG
1068 case KVM_CAP_PPC_EPR:
1069 r = 0;
5df554ad
SW
1070 if (cap->args[0])
1071 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1072 else
1073 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1c810636 1074 break;
f61c94bb
BB
1075#ifdef CONFIG_BOOKE
1076 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1077 r = 0;
1078 vcpu->arch.watchdog_enabled = true;
1079 break;
1080#endif
bf7ca4bd 1081#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
dc83b8bc
SW
1082 case KVM_CAP_SW_TLB: {
1083 struct kvm_config_tlb cfg;
1084 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1085
1086 r = -EFAULT;
1087 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1088 break;
1089
1090 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1091 break;
eb1e4f43
SW
1092 }
1093#endif
1094#ifdef CONFIG_KVM_MPIC
1095 case KVM_CAP_IRQ_MPIC: {
70abaded 1096 struct fd f;
eb1e4f43
SW
1097 struct kvm_device *dev;
1098
1099 r = -EBADF;
70abaded
AV
1100 f = fdget(cap->args[0]);
1101 if (!f.file)
eb1e4f43
SW
1102 break;
1103
1104 r = -EPERM;
70abaded 1105 dev = kvm_device_from_filp(f.file);
eb1e4f43
SW
1106 if (dev)
1107 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1108
70abaded 1109 fdput(f);
eb1e4f43 1110 break;
dc83b8bc
SW
1111 }
1112#endif
5975a2e0
PM
1113#ifdef CONFIG_KVM_XICS
1114 case KVM_CAP_IRQ_XICS: {
70abaded 1115 struct fd f;
5975a2e0
PM
1116 struct kvm_device *dev;
1117
1118 r = -EBADF;
70abaded
AV
1119 f = fdget(cap->args[0]);
1120 if (!f.file)
5975a2e0
PM
1121 break;
1122
1123 r = -EPERM;
70abaded 1124 dev = kvm_device_from_filp(f.file);
5975a2e0
PM
1125 if (dev)
1126 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1127
70abaded 1128 fdput(f);
5975a2e0
PM
1129 break;
1130 }
1131#endif /* CONFIG_KVM_XICS */
71fbfd5f
AG
1132 default:
1133 r = -EINVAL;
1134 break;
1135 }
1136
af8f38b3
AG
1137 if (!r)
1138 r = kvmppc_sanity_check(vcpu);
1139
71fbfd5f
AG
1140 return r;
1141}
1142
bbf45ba5
HB
1143int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1144 struct kvm_mp_state *mp_state)
1145{
1146 return -EINVAL;
1147}
1148
1149int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1150 struct kvm_mp_state *mp_state)
1151{
1152 return -EINVAL;
1153}
1154
1155long kvm_arch_vcpu_ioctl(struct file *filp,
1156 unsigned int ioctl, unsigned long arg)
1157{
1158 struct kvm_vcpu *vcpu = filp->private_data;
1159 void __user *argp = (void __user *)arg;
1160 long r;
1161
93736624
AK
1162 switch (ioctl) {
1163 case KVM_INTERRUPT: {
bbf45ba5
HB
1164 struct kvm_interrupt irq;
1165 r = -EFAULT;
1166 if (copy_from_user(&irq, argp, sizeof(irq)))
93736624 1167 goto out;
bbf45ba5 1168 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
93736624 1169 goto out;
bbf45ba5 1170 }
19483d14 1171
71fbfd5f
AG
1172 case KVM_ENABLE_CAP:
1173 {
1174 struct kvm_enable_cap cap;
1175 r = -EFAULT;
1176 if (copy_from_user(&cap, argp, sizeof(cap)))
1177 goto out;
1178 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1179 break;
1180 }
dc83b8bc 1181
e24ed81f
AG
1182 case KVM_SET_ONE_REG:
1183 case KVM_GET_ONE_REG:
1184 {
1185 struct kvm_one_reg reg;
1186 r = -EFAULT;
1187 if (copy_from_user(&reg, argp, sizeof(reg)))
1188 goto out;
1189 if (ioctl == KVM_SET_ONE_REG)
1190 r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
1191 else
1192 r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
1193 break;
1194 }
1195
bf7ca4bd 1196#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
dc83b8bc
SW
1197 case KVM_DIRTY_TLB: {
1198 struct kvm_dirty_tlb dirty;
1199 r = -EFAULT;
1200 if (copy_from_user(&dirty, argp, sizeof(dirty)))
1201 goto out;
1202 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
1203 break;
1204 }
1205#endif
bbf45ba5
HB
1206 default:
1207 r = -EINVAL;
1208 }
1209
1210out:
1211 return r;
1212}
1213
5b1c1493
CO
1214int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1215{
1216 return VM_FAULT_SIGBUS;
1217}
1218
15711e9c
AG
1219static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
1220{
784bafac
SY
1221 u32 inst_nop = 0x60000000;
1222#ifdef CONFIG_KVM_BOOKE_HV
1223 u32 inst_sc1 = 0x44000022;
2743103f
AG
1224 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
1225 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
1226 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
1227 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
784bafac 1228#else
15711e9c
AG
1229 u32 inst_lis = 0x3c000000;
1230 u32 inst_ori = 0x60000000;
15711e9c
AG
1231 u32 inst_sc = 0x44000002;
1232 u32 inst_imm_mask = 0xffff;
1233
1234 /*
1235 * The hypercall to get into KVM from within guest context is as
1236 * follows:
1237 *
1238 * lis r0, r0, KVM_SC_MAGIC_R0@h
1239 * ori r0, KVM_SC_MAGIC_R0@l
1240 * sc
1241 * nop
1242 */
2743103f
AG
1243 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
1244 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
1245 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
1246 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
784bafac 1247#endif
15711e9c 1248
9202e076
LYB
1249 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
1250
15711e9c
AG
1251 return 0;
1252}
1253
5efdb4be
AG
1254int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
1255 bool line_status)
1256{
1257 if (!irqchip_in_kernel(kvm))
1258 return -ENXIO;
1259
1260 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1261 irq_event->irq, irq_event->level,
1262 line_status);
1263 return 0;
1264}
1265
699a0ea0
PM
1266
1267static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1268 struct kvm_enable_cap *cap)
1269{
1270 int r;
1271
1272 if (cap->flags)
1273 return -EINVAL;
1274
1275 switch (cap->cap) {
1276#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1277 case KVM_CAP_PPC_ENABLE_HCALL: {
1278 unsigned long hcall = cap->args[0];
1279
1280 r = -EINVAL;
1281 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
1282 cap->args[1] > 1)
1283 break;
ae2113a4
PM
1284 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
1285 break;
699a0ea0
PM
1286 if (cap->args[1])
1287 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
1288 else
1289 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
1290 r = 0;
1291 break;
1292 }
1293#endif
1294 default:
1295 r = -EINVAL;
1296 break;
1297 }
1298
1299 return r;
1300}
1301
bbf45ba5
HB
1302long kvm_arch_vm_ioctl(struct file *filp,
1303 unsigned int ioctl, unsigned long arg)
1304{
5df554ad 1305 struct kvm *kvm __maybe_unused = filp->private_data;
15711e9c 1306 void __user *argp = (void __user *)arg;
bbf45ba5
HB
1307 long r;
1308
1309 switch (ioctl) {
15711e9c
AG
1310 case KVM_PPC_GET_PVINFO: {
1311 struct kvm_ppc_pvinfo pvinfo;
d8cdddcd 1312 memset(&pvinfo, 0, sizeof(pvinfo));
15711e9c
AG
1313 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
1314 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
1315 r = -EFAULT;
1316 goto out;
1317 }
1318
1319 break;
1320 }
699a0ea0
PM
1321 case KVM_ENABLE_CAP:
1322 {
1323 struct kvm_enable_cap cap;
1324 r = -EFAULT;
1325 if (copy_from_user(&cap, argp, sizeof(cap)))
1326 goto out;
1327 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1328 break;
1329 }
f31e65e1 1330#ifdef CONFIG_PPC_BOOK3S_64
54738c09
DG
1331 case KVM_CREATE_SPAPR_TCE: {
1332 struct kvm_create_spapr_tce create_tce;
54738c09
DG
1333
1334 r = -EFAULT;
1335 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
1336 goto out;
1337 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
1338 goto out;
1339 }
5b74716e 1340 case KVM_PPC_GET_SMMU_INFO: {
5b74716e 1341 struct kvm_ppc_smmu_info info;
cbbc58d4 1342 struct kvm *kvm = filp->private_data;
5b74716e
BH
1343
1344 memset(&info, 0, sizeof(info));
cbbc58d4 1345 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
5b74716e
BH
1346 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1347 r = -EFAULT;
1348 break;
1349 }
8e591cb7
ME
1350 case KVM_PPC_RTAS_DEFINE_TOKEN: {
1351 struct kvm *kvm = filp->private_data;
1352
1353 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
1354 break;
1355 }
cbbc58d4
AK
1356 default: {
1357 struct kvm *kvm = filp->private_data;
1358 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
1359 }
3a167bea 1360#else /* CONFIG_PPC_BOOK3S_64 */
bbf45ba5 1361 default:
367e1319 1362 r = -ENOTTY;
3a167bea 1363#endif
bbf45ba5 1364 }
15711e9c 1365out:
bbf45ba5
HB
1366 return r;
1367}
1368
043cc4d7
SW
1369static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
1370static unsigned long nr_lpids;
1371
1372long kvmppc_alloc_lpid(void)
1373{
1374 long lpid;
1375
1376 do {
1377 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
1378 if (lpid >= nr_lpids) {
1379 pr_err("%s: No LPIDs free\n", __func__);
1380 return -ENOMEM;
1381 }
1382 } while (test_and_set_bit(lpid, lpid_inuse));
1383
1384 return lpid;
1385}
2ba9f0d8 1386EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
043cc4d7
SW
1387
1388void kvmppc_claim_lpid(long lpid)
1389{
1390 set_bit(lpid, lpid_inuse);
1391}
2ba9f0d8 1392EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
043cc4d7
SW
1393
1394void kvmppc_free_lpid(long lpid)
1395{
1396 clear_bit(lpid, lpid_inuse);
1397}
2ba9f0d8 1398EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
043cc4d7
SW
1399
1400void kvmppc_init_lpid(unsigned long nr_lpids_param)
1401{
1402 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
1403 memset(lpid_inuse, 0, sizeof(lpid_inuse));
1404}
2ba9f0d8 1405EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
043cc4d7 1406
bbf45ba5
HB
1407int kvm_arch_init(void *opaque)
1408{
1409 return 0;
1410}
1411
478d6686 1412EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
This page took 0.48343 seconds and 5 git commands to generate.