KVM: PPC: Book3s: PR: Add (dumb) MMU Notifier support
[deliverable/linux.git] / arch / powerpc / kvm / book3s_pr.c
index cae2defd14623d3fd0094597583cb71e774e9f82..10f8217b8c38ca9ee55d4d0ff448282c81eb6fbe 100644 (file)
@@ -90,8 +90,55 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
 
 void kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
 {
+       /* We misuse TLB_FLUSH to indicate that we want to clear
+          all shadow cache entries */
+       if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
+               kvmppc_mmu_pte_flush(vcpu, 0, 0);
 }
 
+/************* MMU Notifiers *************/
+
+int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+{
+       trace_kvm_unmap_hva(hva);
+
+       /*
+        * Flush all shadow tlb entries everywhere. This is slow, but
+        * we are 100% sure that we catch the to be unmapped page
+        */
+       kvm_flush_remote_tlbs(kvm);
+
+       return 0;
+}
+
+int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+{
+       /* kvm_unmap_hva flushes everything anyways */
+       kvm_unmap_hva(kvm, start);
+
+       return 0;
+}
+
+int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+{
+       /* XXX could be more clever ;) */
+       return 0;
+}
+
+int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+{
+       /* XXX could be more clever ;) */
+       return 0;
+}
+
+void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+{
+       /* The page will get remapped properly on its next fault */
+       kvm_unmap_hva(kvm, hva);
+}
+
+/*****************************************/
+
 static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
 {
        ulong smsr = vcpu->arch.shared->msr;
This page took 0.025108 seconds and 5 git commands to generate.