diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/kvm/44x_tlb.c | 8 |
1 files changed, 1 insertions, 7 deletions
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index 3594bbd1f618..7b11fd7be542 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c | |||
@@ -110,7 +110,6 @@ static int kvmppc_44x_tlbe_is_writable(struct tlbe *tlbe) | |||
110 | return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW); | 110 | return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW); |
111 | } | 111 | } |
112 | 112 | ||
113 | /* Must be called with mmap_sem locked for writing. */ | ||
114 | static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu, | 113 | static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu, |
115 | unsigned int index) | 114 | unsigned int index) |
116 | { | 115 | { |
@@ -150,17 +149,16 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid, | |||
150 | /* Get reference to new page. */ | 149 | /* Get reference to new page. */ |
151 | down_read(¤t->mm->mmap_sem); | 150 | down_read(¤t->mm->mmap_sem); |
152 | new_page = gfn_to_page(vcpu->kvm, gfn); | 151 | new_page = gfn_to_page(vcpu->kvm, gfn); |
152 | up_read(¤t->mm->mmap_sem); | ||
153 | if (is_error_page(new_page)) { | 153 | if (is_error_page(new_page)) { |
154 | printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); | 154 | printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); |
155 | kvm_release_page_clean(new_page); | 155 | kvm_release_page_clean(new_page); |
156 | up_read(¤t->mm->mmap_sem); | ||
157 | return; | 156 | return; |
158 | } | 157 | } |
159 | hpaddr = page_to_phys(new_page); | 158 | hpaddr = page_to_phys(new_page); |
160 | 159 | ||
161 | /* Drop reference to old page. */ | 160 | /* Drop reference to old page. */ |
162 | kvmppc_44x_shadow_release(vcpu, victim); | 161 | kvmppc_44x_shadow_release(vcpu, victim); |
163 | up_read(¤t->mm->mmap_sem); | ||
164 | 162 | ||
165 | vcpu->arch.shadow_pages[victim] = new_page; | 163 | vcpu->arch.shadow_pages[victim] = new_page; |
166 | 164 | ||
@@ -194,7 +192,6 @@ void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
194 | int i; | 192 | int i; |
195 | 193 | ||
196 | /* XXX Replace loop with fancy data structures. */ | 194 | /* XXX Replace loop with fancy data structures. */ |
197 | down_write(¤t->mm->mmap_sem); | ||
198 | for (i = 0; i <= tlb_44x_hwater; i++) { | 195 | for (i = 0; i <= tlb_44x_hwater; i++) { |
199 | struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; | 196 | struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; |
200 | unsigned int tid; | 197 | unsigned int tid; |
@@ -219,7 +216,6 @@ void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
219 | stlbe->tid, stlbe->word0, stlbe->word1, | 216 | stlbe->tid, stlbe->word0, stlbe->word1, |
220 | stlbe->word2, handler); | 217 | stlbe->word2, handler); |
221 | } | 218 | } |
222 | up_write(¤t->mm->mmap_sem); | ||
223 | } | 219 | } |
224 | 220 | ||
225 | /* Invalidate all mappings on the privilege switch after PID has been changed. | 221 | /* Invalidate all mappings on the privilege switch after PID has been changed. |
@@ -231,7 +227,6 @@ void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) | |||
231 | 227 | ||
232 | if (vcpu->arch.swap_pid) { | 228 | if (vcpu->arch.swap_pid) { |
233 | /* XXX Replace loop with fancy data structures. */ | 229 | /* XXX Replace loop with fancy data structures. */ |
234 | down_write(¤t->mm->mmap_sem); | ||
235 | for (i = 0; i <= tlb_44x_hwater; i++) { | 230 | for (i = 0; i <= tlb_44x_hwater; i++) { |
236 | struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; | 231 | struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; |
237 | 232 | ||
@@ -243,7 +238,6 @@ void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) | |||
243 | stlbe->tid, stlbe->word0, stlbe->word1, | 238 | stlbe->tid, stlbe->word0, stlbe->word1, |
244 | stlbe->word2, handler); | 239 | stlbe->word2, handler); |
245 | } | 240 | } |
246 | up_write(¤t->mm->mmap_sem); | ||
247 | vcpu->arch.swap_pid = 0; | 241 | vcpu->arch.swap_pid = 0; |
248 | } | 242 | } |
249 | 243 | ||