aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHollis Blanchard <hollisb@us.ibm.com>2008-11-10 15:57:36 -0500
committerAvi Kivity <avi@redhat.com>2008-12-31 09:52:26 -0500
commitfe4e771d5c37f0949047faf95d16a512b21406bf (patch)
tree518b4ff17a8e1e62b747ce1912c08b62883d2855
parentdf9b856c454e331bc394c80903fcdea19cae2a33 (diff)
KVM: ppc: fix userspace mapping invalidation on context switch
We used to defer invalidating userspace TLB entries until jumping out of the kernel. This was causing MMU weirdness most easily triggered by using a pipe in the guest, e.g. "dmesg | tail". I believe the problem was that after the guest kernel changed the PID (part of context switch), the old process's mappings were still present, and so copy_to_user() on the "return to new process" path ended up using stale mappings. Testing with large pages (64K) exposed the problem, probably because with 4K pages, pressure on the TLB faulted all process A's mappings out before the guest kernel could insert any for process B. Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/powerpc/include/asm/kvm_44x.h2
-rw-r--r--arch/powerpc/kvm/44x_emulate.c9
-rw-r--r--arch/powerpc/kvm/44x_tlb.c31
3 files changed, 20 insertions, 22 deletions
diff --git a/arch/powerpc/include/asm/kvm_44x.h b/arch/powerpc/include/asm/kvm_44x.h
index dece0935071..72e593914ad 100644
--- a/arch/powerpc/include/asm/kvm_44x.h
+++ b/arch/powerpc/include/asm/kvm_44x.h
@@ -44,4 +44,6 @@ static inline struct kvmppc_vcpu_44x *to_44x(struct kvm_vcpu *vcpu)
44 return container_of(vcpu, struct kvmppc_vcpu_44x, vcpu); 44 return container_of(vcpu, struct kvmppc_vcpu_44x, vcpu);
45} 45}
46 46
47void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid);
48
47#endif /* __ASM_44X_H__ */ 49#endif /* __ASM_44X_H__ */
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
index 9bc50cebf9e..9ef79c78ede 100644
--- a/arch/powerpc/kvm/44x_emulate.c
+++ b/arch/powerpc/kvm/44x_emulate.c
@@ -21,6 +21,7 @@
21#include <asm/dcr.h> 21#include <asm/dcr.h>
22#include <asm/dcr-regs.h> 22#include <asm/dcr-regs.h>
23#include <asm/disassemble.h> 23#include <asm/disassemble.h>
24#include <asm/kvm_44x.h>
24 25
25#include "booke.h" 26#include "booke.h"
26#include "44x_tlb.h" 27#include "44x_tlb.h"
@@ -38,14 +39,6 @@
38#define XOP_ICCCI 966 39#define XOP_ICCCI 966
39#define XOP_TLBWE 978 40#define XOP_TLBWE 978
40 41
41static inline void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
42{
43 if (vcpu->arch.pid != new_pid) {
44 vcpu->arch.pid = new_pid;
45 vcpu->arch.swap_pid = 1;
46 }
47}
48
49static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) 42static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
50{ 43{
51 vcpu->arch.pc = vcpu->arch.srr0; 44 vcpu->arch.pc = vcpu->arch.srr0;
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 6fadbd69602..ee2461860bc 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -268,31 +268,34 @@ static void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
268 } 268 }
269} 269}
270 270
271/* Invalidate all mappings on the privilege switch after PID has been changed.
272 * The guest always runs with PID=1, so we must clear the entire TLB when
273 * switching address spaces. */
274void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) 271void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
275{ 272{
273 vcpu->arch.shadow_pid = !usermode;
274}
275
276void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
277{
276 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 278 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
277 int i; 279 int i;
278 280
279 if (vcpu->arch.swap_pid) { 281 if (unlikely(vcpu->arch.pid == new_pid))
280 /* XXX Replace loop with fancy data structures. */ 282 return;
281 for (i = 0; i <= tlb_44x_hwater; i++) { 283
282 struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i]; 284 vcpu->arch.pid = new_pid;
285
286 /* Guest userspace runs with TID=0 mappings and PID=0, to make sure it
287 * can't access guest kernel mappings (TID=1). When we switch to a new
288 * guest PID, which will also use host PID=0, we must discard the old guest
289 * userspace mappings. */
290 for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_tlb); i++) {
291 struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
283 292
284 /* Future optimization: clear only userspace mappings. */ 293 if (get_tlb_tid(stlbe) == 0) {
285 kvmppc_44x_shadow_release(vcpu, i); 294 kvmppc_44x_shadow_release(vcpu, i);
286 stlbe->word0 = 0; 295 stlbe->word0 = 0;
287 kvmppc_tlbe_set_modified(vcpu, i); 296 kvmppc_tlbe_set_modified(vcpu, i);
288 KVMTRACE_5D(STLB_INVAL, vcpu, i,
289 stlbe->tid, stlbe->word0, stlbe->word1,
290 stlbe->word2, handler);
291 } 297 }
292 vcpu->arch.swap_pid = 0;
293 } 298 }
294
295 vcpu->arch.shadow_pid = !usermode;
296} 299}
297 300
298static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, 301static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,