2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hrtimer.h>
27 #include <linux/slab.h>
28 #include <asm/cputable.h>
29 #include <asm/uaccess.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/tlbflush.h>
32 #include <asm/cputhreads.h>
34 #include "../mm/mmu_decl.h"
36 #define CREATE_TRACE_POINTS
39 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*v
)
41 return !(v
->arch
.shared
->msr
& MSR_WE
) ||
42 !!(v
->arch
.pending_exceptions
) ||
46 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
51 int kvmppc_kvm_pv(struct kvm_vcpu
*vcpu
)
53 int nr
= kvmppc_get_gpr(vcpu
, 11);
55 unsigned long __maybe_unused param1
= kvmppc_get_gpr(vcpu
, 3);
56 unsigned long __maybe_unused param2
= kvmppc_get_gpr(vcpu
, 4);
57 unsigned long __maybe_unused param3
= kvmppc_get_gpr(vcpu
, 5);
58 unsigned long __maybe_unused param4
= kvmppc_get_gpr(vcpu
, 6);
61 if (!(vcpu
->arch
.shared
->msr
& MSR_SF
)) {
70 case HC_VENDOR_KVM
| KVM_HC_PPC_MAP_MAGIC_PAGE
:
72 vcpu
->arch
.magic_page_pa
= param1
;
73 vcpu
->arch
.magic_page_ea
= param2
;
75 r2
= KVM_MAGIC_FEAT_SR
| KVM_MAGIC_FEAT_MAS0_TO_SPRG7
;
80 case HC_VENDOR_KVM
| KVM_HC_FEATURES
:
82 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
83 /* XXX Missing magic page on 44x */
84 r2
|= (1 << KVM_FEATURE_MAGIC_PAGE
);
87 /* Second return value is in r4 */
90 r
= HC_EV_UNIMPLEMENTED
;
94 kvmppc_set_gpr(vcpu
, 4, r2
);
99 int kvmppc_sanity_check(struct kvm_vcpu
*vcpu
)
103 /* We have to know what CPU to virtualize */
107 /* PAPR only works with book3s_64 */
108 if ((vcpu
->arch
.cpu_type
!= KVM_CPU_3S_64
) && vcpu
->arch
.papr_enabled
)
111 #ifdef CONFIG_KVM_BOOK3S_64_HV
112 /* HV KVM can only do PAPR mode for now */
113 if (!vcpu
->arch
.papr_enabled
)
117 #ifdef CONFIG_KVM_BOOKE_HV
118 if (!cpu_has_feature(CPU_FTR_EMB_HV
))
126 return r
? 0 : -EINVAL
;
129 int kvmppc_emulate_mmio(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
131 enum emulation_result er
;
134 er
= kvmppc_emulate_instruction(run
, vcpu
);
137 /* Future optimization: only reload non-volatiles if they were
138 * actually modified. */
141 case EMULATE_DO_MMIO
:
142 run
->exit_reason
= KVM_EXIT_MMIO
;
143 /* We must reload nonvolatiles because "update" load/store
144 * instructions modify register state. */
145 /* Future optimization: only reload non-volatiles if they were
146 * actually modified. */
150 /* XXX Deliver Program interrupt to guest. */
151 printk(KERN_EMERG
"%s: emulation failed (%08x)\n", __func__
,
152 kvmppc_get_last_inst(vcpu
));
162 int kvm_arch_hardware_enable(void *garbage
)
167 void kvm_arch_hardware_disable(void *garbage
)
171 int kvm_arch_hardware_setup(void)
176 void kvm_arch_hardware_unsetup(void)
180 void kvm_arch_check_processor_compat(void *rtn
)
182 *(int *)rtn
= kvmppc_core_check_processor_compat();
185 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
190 return kvmppc_core_init_vm(kvm
);
193 void kvm_arch_destroy_vm(struct kvm
*kvm
)
196 struct kvm_vcpu
*vcpu
;
198 kvm_for_each_vcpu(i
, vcpu
, kvm
)
199 kvm_arch_vcpu_free(vcpu
);
201 mutex_lock(&kvm
->lock
);
202 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
203 kvm
->vcpus
[i
] = NULL
;
205 atomic_set(&kvm
->online_vcpus
, 0);
207 kvmppc_core_destroy_vm(kvm
);
209 mutex_unlock(&kvm
->lock
);
212 void kvm_arch_sync_events(struct kvm
*kvm
)
216 int kvm_dev_ioctl_check_extension(long ext
)
222 case KVM_CAP_PPC_BOOKE_SREGS
:
224 case KVM_CAP_PPC_SEGSTATE
:
225 case KVM_CAP_PPC_HIOR
:
226 case KVM_CAP_PPC_PAPR
:
228 case KVM_CAP_PPC_UNSET_IRQ
:
229 case KVM_CAP_PPC_IRQ_LEVEL
:
230 case KVM_CAP_ENABLE_CAP
:
231 case KVM_CAP_ONE_REG
:
234 #ifndef CONFIG_KVM_BOOK3S_64_HV
235 case KVM_CAP_PPC_PAIRED_SINGLES
:
236 case KVM_CAP_PPC_OSI
:
237 case KVM_CAP_PPC_GET_PVINFO
:
238 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
243 case KVM_CAP_COALESCED_MMIO
:
244 r
= KVM_COALESCED_MMIO_PAGE_OFFSET
;
247 #ifdef CONFIG_PPC_BOOK3S_64
248 case KVM_CAP_SPAPR_TCE
:
249 case KVM_CAP_PPC_ALLOC_HTAB
:
252 #endif /* CONFIG_PPC_BOOK3S_64 */
253 #ifdef CONFIG_KVM_BOOK3S_64_HV
254 case KVM_CAP_PPC_SMT
:
255 r
= threads_per_core
;
257 case KVM_CAP_PPC_RMA
:
259 /* PPC970 requires an RMA */
260 if (cpu_has_feature(CPU_FTR_ARCH_201
))
263 case KVM_CAP_SYNC_MMU
:
264 r
= cpu_has_feature(CPU_FTR_ARCH_206
) ? 1 : 0;
267 case KVM_CAP_NR_VCPUS
:
269 * Recommending a number of CPUs is somewhat arbitrary; we
270 * return the number of present CPUs for -HV (since a host
271 * will have secondary threads "offline"), and for other KVM
272 * implementations just count online CPUs.
274 #ifdef CONFIG_KVM_BOOK3S_64_HV
275 r
= num_present_cpus();
277 r
= num_online_cpus();
280 case KVM_CAP_MAX_VCPUS
:
283 #ifdef CONFIG_PPC_BOOK3S_64
284 case KVM_CAP_PPC_GET_SMMU_INFO
:
296 long kvm_arch_dev_ioctl(struct file
*filp
,
297 unsigned int ioctl
, unsigned long arg
)
302 void kvm_arch_free_memslot(struct kvm_memory_slot
*free
,
303 struct kvm_memory_slot
*dont
)
305 if (!dont
|| free
->arch
.rmap
!= dont
->arch
.rmap
) {
306 vfree(free
->arch
.rmap
);
307 free
->arch
.rmap
= NULL
;
311 int kvm_arch_create_memslot(struct kvm_memory_slot
*slot
, unsigned long npages
)
313 slot
->arch
.rmap
= vzalloc(npages
* sizeof(*slot
->arch
.rmap
));
314 if (!slot
->arch
.rmap
)
320 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
321 struct kvm_memory_slot
*memslot
,
322 struct kvm_memory_slot old
,
323 struct kvm_userspace_memory_region
*mem
,
326 return kvmppc_core_prepare_memory_region(kvm
, mem
);
329 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
330 struct kvm_userspace_memory_region
*mem
,
331 struct kvm_memory_slot old
,
334 kvmppc_core_commit_memory_region(kvm
, mem
);
337 void kvm_arch_flush_shadow_all(struct kvm
*kvm
)
341 void kvm_arch_flush_shadow_memslot(struct kvm
*kvm
,
342 struct kvm_memory_slot
*slot
)
346 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
, unsigned int id
)
348 struct kvm_vcpu
*vcpu
;
349 vcpu
= kvmppc_core_vcpu_create(kvm
, id
);
351 vcpu
->arch
.wqp
= &vcpu
->wq
;
352 kvmppc_create_vcpu_debugfs(vcpu
, id
);
357 void kvm_arch_vcpu_free(struct kvm_vcpu
*vcpu
)
359 /* Make sure we're not using the vcpu anymore */
360 hrtimer_cancel(&vcpu
->arch
.dec_timer
);
361 tasklet_kill(&vcpu
->arch
.tasklet
);
363 kvmppc_remove_vcpu_debugfs(vcpu
);
364 kvmppc_core_vcpu_free(vcpu
);
367 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
369 kvm_arch_vcpu_free(vcpu
);
372 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
374 return kvmppc_core_pending_dec(vcpu
);
378 * low level hrtimer wake routine. Because this runs in hardirq context
379 * we schedule a tasklet to do the real work.
381 enum hrtimer_restart
kvmppc_decrementer_wakeup(struct hrtimer
*timer
)
383 struct kvm_vcpu
*vcpu
;
385 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.dec_timer
);
386 tasklet_schedule(&vcpu
->arch
.tasklet
);
388 return HRTIMER_NORESTART
;
391 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
393 hrtimer_init(&vcpu
->arch
.dec_timer
, CLOCK_REALTIME
, HRTIMER_MODE_ABS
);
394 tasklet_init(&vcpu
->arch
.tasklet
, kvmppc_decrementer_func
, (ulong
)vcpu
);
395 vcpu
->arch
.dec_timer
.function
= kvmppc_decrementer_wakeup
;
396 vcpu
->arch
.dec_expires
= ~(u64
)0;
398 #ifdef CONFIG_KVM_EXIT_TIMING
399 mutex_init(&vcpu
->arch
.exit_timing_lock
);
405 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
407 kvmppc_mmu_destroy(vcpu
);
410 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
414 * vrsave (formerly usprg0) isn't used by Linux, but may
415 * be used by the guest.
417 * On non-booke this is associated with Altivec and
418 * is handled by code in book3s.c.
420 mtspr(SPRN_VRSAVE
, vcpu
->arch
.vrsave
);
422 kvmppc_core_vcpu_load(vcpu
, cpu
);
423 vcpu
->cpu
= smp_processor_id();
426 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
428 kvmppc_core_vcpu_put(vcpu
);
430 vcpu
->arch
.vrsave
= mfspr(SPRN_VRSAVE
);
435 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
436 struct kvm_guest_debug
*dbg
)
441 static void kvmppc_complete_dcr_load(struct kvm_vcpu
*vcpu
,
444 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, run
->dcr
.data
);
447 static void kvmppc_complete_mmio_load(struct kvm_vcpu
*vcpu
,
450 u64
uninitialized_var(gpr
);
452 if (run
->mmio
.len
> sizeof(gpr
)) {
453 printk(KERN_ERR
"bad MMIO length: %d\n", run
->mmio
.len
);
457 if (vcpu
->arch
.mmio_is_bigendian
) {
458 switch (run
->mmio
.len
) {
459 case 8: gpr
= *(u64
*)run
->mmio
.data
; break;
460 case 4: gpr
= *(u32
*)run
->mmio
.data
; break;
461 case 2: gpr
= *(u16
*)run
->mmio
.data
; break;
462 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
465 /* Convert BE data from userland back to LE. */
466 switch (run
->mmio
.len
) {
467 case 4: gpr
= ld_le32((u32
*)run
->mmio
.data
); break;
468 case 2: gpr
= ld_le16((u16
*)run
->mmio
.data
); break;
469 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
473 if (vcpu
->arch
.mmio_sign_extend
) {
474 switch (run
->mmio
.len
) {
489 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, gpr
);
491 switch (vcpu
->arch
.io_gpr
& KVM_MMIO_REG_EXT_MASK
) {
492 case KVM_MMIO_REG_GPR
:
493 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, gpr
);
495 case KVM_MMIO_REG_FPR
:
496 vcpu
->arch
.fpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
498 #ifdef CONFIG_PPC_BOOK3S
499 case KVM_MMIO_REG_QPR
:
500 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
502 case KVM_MMIO_REG_FQPR
:
503 vcpu
->arch
.fpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
504 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
512 int kvmppc_handle_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
513 unsigned int rt
, unsigned int bytes
, int is_bigendian
)
515 if (bytes
> sizeof(run
->mmio
.data
)) {
516 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
520 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
521 run
->mmio
.len
= bytes
;
522 run
->mmio
.is_write
= 0;
524 vcpu
->arch
.io_gpr
= rt
;
525 vcpu
->arch
.mmio_is_bigendian
= is_bigendian
;
526 vcpu
->mmio_needed
= 1;
527 vcpu
->mmio_is_write
= 0;
528 vcpu
->arch
.mmio_sign_extend
= 0;
530 return EMULATE_DO_MMIO
;
533 /* Same as above, but sign extends */
534 int kvmppc_handle_loads(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
535 unsigned int rt
, unsigned int bytes
, int is_bigendian
)
539 r
= kvmppc_handle_load(run
, vcpu
, rt
, bytes
, is_bigendian
);
540 vcpu
->arch
.mmio_sign_extend
= 1;
545 int kvmppc_handle_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
546 u64 val
, unsigned int bytes
, int is_bigendian
)
548 void *data
= run
->mmio
.data
;
550 if (bytes
> sizeof(run
->mmio
.data
)) {
551 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
555 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
556 run
->mmio
.len
= bytes
;
557 run
->mmio
.is_write
= 1;
558 vcpu
->mmio_needed
= 1;
559 vcpu
->mmio_is_write
= 1;
561 /* Store the value at the lowest bytes in 'data'. */
564 case 8: *(u64
*)data
= val
; break;
565 case 4: *(u32
*)data
= val
; break;
566 case 2: *(u16
*)data
= val
; break;
567 case 1: *(u8
*)data
= val
; break;
570 /* Store LE value into 'data'. */
572 case 4: st_le32(data
, val
); break;
573 case 2: st_le16(data
, val
); break;
574 case 1: *(u8
*)data
= val
; break;
578 return EMULATE_DO_MMIO
;
581 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
586 if (vcpu
->sigset_active
)
587 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
589 if (vcpu
->mmio_needed
) {
590 if (!vcpu
->mmio_is_write
)
591 kvmppc_complete_mmio_load(vcpu
, run
);
592 vcpu
->mmio_needed
= 0;
593 } else if (vcpu
->arch
.dcr_needed
) {
594 if (!vcpu
->arch
.dcr_is_write
)
595 kvmppc_complete_dcr_load(vcpu
, run
);
596 vcpu
->arch
.dcr_needed
= 0;
597 } else if (vcpu
->arch
.osi_needed
) {
598 u64
*gprs
= run
->osi
.gprs
;
601 for (i
= 0; i
< 32; i
++)
602 kvmppc_set_gpr(vcpu
, i
, gprs
[i
]);
603 vcpu
->arch
.osi_needed
= 0;
604 } else if (vcpu
->arch
.hcall_needed
) {
607 kvmppc_set_gpr(vcpu
, 3, run
->papr_hcall
.ret
);
608 for (i
= 0; i
< 9; ++i
)
609 kvmppc_set_gpr(vcpu
, 4 + i
, run
->papr_hcall
.args
[i
]);
610 vcpu
->arch
.hcall_needed
= 0;
613 r
= kvmppc_vcpu_run(run
, vcpu
);
615 if (vcpu
->sigset_active
)
616 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
621 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
, struct kvm_interrupt
*irq
)
623 if (irq
->irq
== KVM_INTERRUPT_UNSET
) {
624 kvmppc_core_dequeue_external(vcpu
, irq
);
628 kvmppc_core_queue_external(vcpu
, irq
);
635 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
636 struct kvm_enable_cap
*cap
)
644 case KVM_CAP_PPC_OSI
:
646 vcpu
->arch
.osi_enabled
= true;
648 case KVM_CAP_PPC_PAPR
:
650 vcpu
->arch
.papr_enabled
= true;
652 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
653 case KVM_CAP_SW_TLB
: {
654 struct kvm_config_tlb cfg
;
655 void __user
*user_ptr
= (void __user
*)(uintptr_t)cap
->args
[0];
658 if (copy_from_user(&cfg
, user_ptr
, sizeof(cfg
)))
661 r
= kvm_vcpu_ioctl_config_tlb(vcpu
, &cfg
);
671 r
= kvmppc_sanity_check(vcpu
);
676 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
677 struct kvm_mp_state
*mp_state
)
682 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
683 struct kvm_mp_state
*mp_state
)
688 long kvm_arch_vcpu_ioctl(struct file
*filp
,
689 unsigned int ioctl
, unsigned long arg
)
691 struct kvm_vcpu
*vcpu
= filp
->private_data
;
692 void __user
*argp
= (void __user
*)arg
;
696 case KVM_INTERRUPT
: {
697 struct kvm_interrupt irq
;
699 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
701 r
= kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
707 struct kvm_enable_cap cap
;
709 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
711 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
715 case KVM_SET_ONE_REG
:
716 case KVM_GET_ONE_REG
:
718 struct kvm_one_reg reg
;
720 if (copy_from_user(®
, argp
, sizeof(reg
)))
722 if (ioctl
== KVM_SET_ONE_REG
)
723 r
= kvm_vcpu_ioctl_set_one_reg(vcpu
, ®
);
725 r
= kvm_vcpu_ioctl_get_one_reg(vcpu
, ®
);
729 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
730 case KVM_DIRTY_TLB
: {
731 struct kvm_dirty_tlb dirty
;
733 if (copy_from_user(&dirty
, argp
, sizeof(dirty
)))
735 r
= kvm_vcpu_ioctl_dirty_tlb(vcpu
, &dirty
);
747 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
749 return VM_FAULT_SIGBUS
;
752 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo
*pvinfo
)
754 u32 inst_lis
= 0x3c000000;
755 u32 inst_ori
= 0x60000000;
756 u32 inst_nop
= 0x60000000;
757 u32 inst_sc
= 0x44000002;
758 u32 inst_imm_mask
= 0xffff;
761 * The hypercall to get into KVM from within guest context is as
764 * lis r0, r0, KVM_SC_MAGIC_R0@h
765 * ori r0, KVM_SC_MAGIC_R0@l
769 pvinfo
->hcall
[0] = inst_lis
| ((KVM_SC_MAGIC_R0
>> 16) & inst_imm_mask
);
770 pvinfo
->hcall
[1] = inst_ori
| (KVM_SC_MAGIC_R0
& inst_imm_mask
);
771 pvinfo
->hcall
[2] = inst_sc
;
772 pvinfo
->hcall
[3] = inst_nop
;
777 long kvm_arch_vm_ioctl(struct file
*filp
,
778 unsigned int ioctl
, unsigned long arg
)
780 void __user
*argp
= (void __user
*)arg
;
784 case KVM_PPC_GET_PVINFO
: {
785 struct kvm_ppc_pvinfo pvinfo
;
786 memset(&pvinfo
, 0, sizeof(pvinfo
));
787 r
= kvm_vm_ioctl_get_pvinfo(&pvinfo
);
788 if (copy_to_user(argp
, &pvinfo
, sizeof(pvinfo
))) {
795 #ifdef CONFIG_PPC_BOOK3S_64
796 case KVM_CREATE_SPAPR_TCE
: {
797 struct kvm_create_spapr_tce create_tce
;
798 struct kvm
*kvm
= filp
->private_data
;
801 if (copy_from_user(&create_tce
, argp
, sizeof(create_tce
)))
803 r
= kvm_vm_ioctl_create_spapr_tce(kvm
, &create_tce
);
806 #endif /* CONFIG_PPC_BOOK3S_64 */
808 #ifdef CONFIG_KVM_BOOK3S_64_HV
809 case KVM_ALLOCATE_RMA
: {
810 struct kvm
*kvm
= filp
->private_data
;
811 struct kvm_allocate_rma rma
;
813 r
= kvm_vm_ioctl_allocate_rma(kvm
, &rma
);
814 if (r
>= 0 && copy_to_user(argp
, &rma
, sizeof(rma
)))
819 case KVM_PPC_ALLOCATE_HTAB
: {
820 struct kvm
*kvm
= filp
->private_data
;
824 if (get_user(htab_order
, (u32 __user
*)argp
))
826 r
= kvmppc_alloc_reset_hpt(kvm
, &htab_order
);
830 if (put_user(htab_order
, (u32 __user
*)argp
))
835 #endif /* CONFIG_KVM_BOOK3S_64_HV */
837 #ifdef CONFIG_PPC_BOOK3S_64
838 case KVM_PPC_GET_SMMU_INFO
: {
839 struct kvm
*kvm
= filp
->private_data
;
840 struct kvm_ppc_smmu_info info
;
842 memset(&info
, 0, sizeof(info
));
843 r
= kvm_vm_ioctl_get_smmu_info(kvm
, &info
);
844 if (r
>= 0 && copy_to_user(argp
, &info
, sizeof(info
)))
848 #endif /* CONFIG_PPC_BOOK3S_64 */
857 static unsigned long lpid_inuse
[BITS_TO_LONGS(KVMPPC_NR_LPIDS
)];
858 static unsigned long nr_lpids
;
860 long kvmppc_alloc_lpid(void)
865 lpid
= find_first_zero_bit(lpid_inuse
, KVMPPC_NR_LPIDS
);
866 if (lpid
>= nr_lpids
) {
867 pr_err("%s: No LPIDs free\n", __func__
);
870 } while (test_and_set_bit(lpid
, lpid_inuse
));
875 void kvmppc_claim_lpid(long lpid
)
877 set_bit(lpid
, lpid_inuse
);
880 void kvmppc_free_lpid(long lpid
)
882 clear_bit(lpid
, lpid_inuse
);
885 void kvmppc_init_lpid(unsigned long nr_lpids_param
)
887 nr_lpids
= min_t(unsigned long, KVMPPC_NR_LPIDS
, nr_lpids_param
);
888 memset(lpid_inuse
, 0, sizeof(lpid_inuse
));
891 int kvm_arch_init(void *opaque
)
896 void kvm_arch_exit(void)