aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorHollis Blanchard <hollisb@us.ibm.com>2008-07-25 14:54:53 -0400
committerAvi Kivity <avi@qumranet.com>2008-10-15 04:15:16 -0400
commit49dd2c492895828a90ecdf889e7fe9cfb40a82a7 (patch)
tree63b5f85b0ed5a94c8265d86dda4db262b00adf95 /arch/powerpc
parent83aae4a8098eb8a40a2e9dab3714354182143b4f (diff)
KVM: powerpc: Map guest userspace with TID=0 mappings
When we use TID=N userspace mappings, we must ensure that kernel mappings have been destroyed when entering userspace. Using TID=1/TID=0 for kernel/user mappings and running userspace with PID=0 means that userspace can't access the kernel mappings, but the kernel can directly access userspace. The net is that we don't need to flush the TLB on privilege switches, but we do on guest context switches (which are far more infrequent). Guest boot time performance improvement: about 30%. Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/kvm_host.h4
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h9
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kvm/44x_tlb.c39
-rw-r--r--arch/powerpc/kvm/booke_guest.c2
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S2
-rw-r--r--arch/powerpc/kvm/emulate.c2
7 files changed, 41 insertions, 19 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 4338b03da8f9..34b52b7180cd 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -129,7 +129,11 @@ struct kvm_vcpu_arch {
129 u32 ivor[16]; 129 u32 ivor[16];
130 u32 ivpr; 130 u32 ivpr;
131 u32 pir; 131 u32 pir;
132
133 u32 shadow_pid;
132 u32 pid; 134 u32 pid;
135 u32 swap_pid;
136
133 u32 pvr; 137 u32 pvr;
134 u32 ccr0; 138 u32 ccr0;
135 u32 ccr1; 139 u32 ccr1;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 8e7e42959903..8931ba729d2b 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -64,6 +64,7 @@ extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn,
64extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, 64extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
65 gva_t eend, u32 asid); 65 gva_t eend, u32 asid);
66extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); 66extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
67extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
67 68
68/* XXX Book E specific */ 69/* XXX Book E specific */
69extern void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i); 70extern void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i);
@@ -95,4 +96,12 @@ static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
95 kvm_vcpu_block(vcpu); 96 kvm_vcpu_block(vcpu);
96} 97}
97 98
99static inline void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
100{
101 if (vcpu->arch.pid != new_pid) {
102 vcpu->arch.pid = new_pid;
103 vcpu->arch.swap_pid = 1;
104 }
105}
106
98#endif /* __POWERPC_KVM_PPC_H__ */ 107#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 1631d670b9ed..52649da344fb 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -369,7 +369,7 @@ int main(void)
369 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); 369 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
370 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); 370 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
371 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); 371 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
372 DEFINE(VCPU_PID, offsetof(struct kvm_vcpu, arch.pid)); 372 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
373 373
374 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); 374 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
375 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); 375 DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 06a5fcfc4d33..3594bbd1f618 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -170,7 +170,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
170 170
171 /* XXX what about AS? */ 171 /* XXX what about AS? */
172 172
173 stlbe->tid = asid & 0xff; 173 stlbe->tid = !(asid & 0xff);
174 174
175 /* Force TS=1 for all guest mappings. */ 175 /* Force TS=1 for all guest mappings. */
176 /* For now we hardcode 4KB mappings, but it will be important to 176 /* For now we hardcode 4KB mappings, but it will be important to
@@ -190,7 +190,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
190void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr, 190void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
191 gva_t eend, u32 asid) 191 gva_t eend, u32 asid)
192{ 192{
193 unsigned int pid = asid & 0xff; 193 unsigned int pid = !(asid & 0xff);
194 int i; 194 int i;
195 195
196 /* XXX Replace loop with fancy data structures. */ 196 /* XXX Replace loop with fancy data structures. */
@@ -222,23 +222,30 @@ void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
222 up_write(&current->mm->mmap_sem); 222 up_write(&current->mm->mmap_sem);
223} 223}
224 224
225/* Invalidate all mappings, so that when they fault back in they will get the 225/* Invalidate all mappings on the privilege switch after PID has been changed.
226 * proper permission bits. */ 226 * The guest always runs with PID=1, so we must clear the entire TLB when
227 * switching address spaces. */
227void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) 228void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
228{ 229{
229 int i; 230 int i;
230 231
231 /* XXX Replace loop with fancy data structures. */ 232 if (vcpu->arch.swap_pid) {
232 down_write(&current->mm->mmap_sem); 233 /* XXX Replace loop with fancy data structures. */
233 for (i = 0; i <= tlb_44x_hwater; i++) { 234 down_write(&current->mm->mmap_sem);
234 struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i]; 235 for (i = 0; i <= tlb_44x_hwater; i++) {
235 236 struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
236 kvmppc_44x_shadow_release(vcpu, i); 237
237 stlbe->word0 = 0; 238 /* Future optimization: clear only userspace mappings. */
238 kvmppc_tlbe_set_modified(vcpu, i); 239 kvmppc_44x_shadow_release(vcpu, i);
239 KVMTRACE_5D(STLB_INVAL, vcpu, i, 240 stlbe->word0 = 0;
240 stlbe->tid, stlbe->word0, stlbe->word1, 241 kvmppc_tlbe_set_modified(vcpu, i);
241 stlbe->word2, handler); 242 KVMTRACE_5D(STLB_INVAL, vcpu, i,
243 stlbe->tid, stlbe->word0, stlbe->word1,
244 stlbe->word2, handler);
245 }
246 up_write(&current->mm->mmap_sem);
247 vcpu->arch.swap_pid = 0;
242 } 248 }
243 up_write(&current->mm->mmap_sem); 249
250 vcpu->arch.shadow_pid = !usermode;
244} 251}
diff --git a/arch/powerpc/kvm/booke_guest.c b/arch/powerpc/kvm/booke_guest.c
index 3cca079975e1..7b2591e26bae 100644
--- a/arch/powerpc/kvm/booke_guest.c
+++ b/arch/powerpc/kvm/booke_guest.c
@@ -486,6 +486,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
486 vcpu->arch.msr = 0; 486 vcpu->arch.msr = 0;
487 vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */ 487 vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */
488 488
489 vcpu->arch.shadow_pid = 1;
490
489 /* Eye-catching number so we know if the guest takes an interrupt 491 /* Eye-catching number so we know if the guest takes an interrupt
490 * before it's programmed its own IVPR. */ 492 * before it's programmed its own IVPR. */
491 vcpu->arch.ivpr = 0x55550000; 493 vcpu->arch.ivpr = 0x55550000;
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 564ea32ecbac..95e165baf85f 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -332,7 +332,7 @@ lightweight_exit:
332 332
333 mfspr r3, SPRN_PID 333 mfspr r3, SPRN_PID
334 stw r3, VCPU_HOST_PID(r4) 334 stw r3, VCPU_HOST_PID(r4)
335 lwz r3, VCPU_PID(r4) 335 lwz r3, VCPU_SHADOW_PID(r4)
336 mtspr SPRN_PID, r3 336 mtspr SPRN_PID, r3
337 337
338 /* Prevent all asynchronous TLB updates. */ 338 /* Prevent all asynchronous TLB updates. */
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index c3ed63b22210..0fce4fbdc20d 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -508,7 +508,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
508 case SPRN_MMUCR: 508 case SPRN_MMUCR:
509 vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break; 509 vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break;
510 case SPRN_PID: 510 case SPRN_PID:
511 vcpu->arch.pid = vcpu->arch.gpr[rs]; break; 511 kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break;
512 case SPRN_CCR0: 512 case SPRN_CCR0:
513 vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break; 513 vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break;
514 case SPRN_CCR1: 514 case SPRN_CCR1: