aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2012-08-10 07:23:55 -0400
committerAlexander Graf <agraf@suse.de>2012-10-05 17:38:43 -0400
commit9b0cb3c808fef0d75d6f79ab9684246e6879f9c1 (patch)
treedc42b6ff5c6e1d9adf853934b51d52c143a14e60
parent03d25c5bd5c3125055bd36f4813ddb817def19dd (diff)
KVM: PPC: Book3s: PR: Add (dumb) MMU Notifier support
Now that we have very simple MMU Notifier support for e500 in place, also add the same simple support to book3s. It gets us one step closer to actual fast support. Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r--arch/powerpc/include/asm/kvm_host.h3
-rw-r--r--arch/powerpc/kvm/Kconfig1
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c1
-rw-r--r--arch/powerpc/kvm/book3s_mmu_hpte.c5
-rw-r--r--arch/powerpc/kvm/book3s_pr.c47
6 files changed, 51 insertions, 7 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index cea9d3aab71c..4a5ec8f573c7 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -46,8 +46,7 @@
46#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 46#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
47#endif 47#endif
48 48
49#if defined(CONFIG_KVM_BOOK3S_64_HV) || defined(CONFIG_KVM_E500V2) || \ 49#if !defined(CONFIG_KVM_440)
50 defined(CONFIG_KVM_E500MC)
51#include <linux/mmu_notifier.h> 50#include <linux/mmu_notifier.h>
52 51
53#define KVM_ARCH_WANT_MMU_NOTIFIER 52#define KVM_ARCH_WANT_MMU_NOTIFIER
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 40cad8c8bd0e..71f0cd9edf33 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -36,6 +36,7 @@ config KVM_BOOK3S_64_HANDLER
36config KVM_BOOK3S_PR 36config KVM_BOOK3S_PR
37 bool 37 bool
38 select KVM_MMIO 38 select KVM_MMIO
39 select MMU_NOTIFIER
39 40
40config KVM_BOOK3S_32 41config KVM_BOOK3S_32
41 tristate "KVM support for PowerPC book3s_32 processors" 42 tristate "KVM support for PowerPC book3s_32 processors"
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 837f13e7b6bf..9fac0101ffb9 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -254,6 +254,7 @@ next_pteg:
254 254
255 kvmppc_mmu_hpte_cache_map(vcpu, pte); 255 kvmppc_mmu_hpte_cache_map(vcpu, pte);
256 256
257 kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
257out: 258out:
258 return r; 259 return r;
259} 260}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 0688b6b39585..6b2c80e49681 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -168,6 +168,7 @@ map_again:
168 168
169 kvmppc_mmu_hpte_cache_map(vcpu, pte); 169 kvmppc_mmu_hpte_cache_map(vcpu, pte);
170 } 170 }
171 kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
171 172
172out: 173out:
173 return r; 174 return r;
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c
index 41cb0017e757..2c86b0d63714 100644
--- a/arch/powerpc/kvm/book3s_mmu_hpte.c
+++ b/arch/powerpc/kvm/book3s_mmu_hpte.c
@@ -114,11 +114,6 @@ static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
114 hlist_del_init_rcu(&pte->list_vpte); 114 hlist_del_init_rcu(&pte->list_vpte);
115 hlist_del_init_rcu(&pte->list_vpte_long); 115 hlist_del_init_rcu(&pte->list_vpte_long);
116 116
117 if (pte->pte.may_write)
118 kvm_release_pfn_dirty(pte->pfn);
119 else
120 kvm_release_pfn_clean(pte->pfn);
121
122 spin_unlock(&vcpu3s->mmu_lock); 117 spin_unlock(&vcpu3s->mmu_lock);
123 118
124 vcpu3s->hpte_cache_count--; 119 vcpu3s->hpte_cache_count--;
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index cae2defd1462..10f8217b8c38 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -90,8 +90,55 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
90 90
91void kvmppc_core_check_requests(struct kvm_vcpu *vcpu) 91void kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
92{ 92{
93 /* We misuse TLB_FLUSH to indicate that we want to clear
94 all shadow cache entries */
95 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
96 kvmppc_mmu_pte_flush(vcpu, 0, 0);
93} 97}
94 98
99/************* MMU Notifiers *************/
100
101int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
102{
103 trace_kvm_unmap_hva(hva);
104
105 /*
106 * Flush all shadow tlb entries everywhere. This is slow, but
107 * we are 100% sure that we catch the to be unmapped page
108 */
109 kvm_flush_remote_tlbs(kvm);
110
111 return 0;
112}
113
114int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
115{
116 /* kvm_unmap_hva flushes everything anyways */
117 kvm_unmap_hva(kvm, start);
118
119 return 0;
120}
121
122int kvm_age_hva(struct kvm *kvm, unsigned long hva)
123{
124 /* XXX could be more clever ;) */
125 return 0;
126}
127
128int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
129{
130 /* XXX could be more clever ;) */
131 return 0;
132}
133
134void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
135{
136 /* The page will get remapped properly on its next fault */
137 kvm_unmap_hva(kvm, hva);
138}
139
140/*****************************************/
141
95static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) 142static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
96{ 143{
97 ulong smsr = vcpu->arch.shared->msr; 144 ulong smsr = vcpu->arch.shared->msr;