aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorHollis Blanchard <hollisb@us.ibm.com>2008-07-25 14:54:51 -0400
committerAvi Kivity <avi@qumranet.com>2008-10-15 04:15:16 -0400
commit20754c2495a791b5b429c0da63394c86ade978e7 (patch)
treefbeed7fdab0f91417798aa5e4cea22f15a255275 /arch/powerpc
parent6a0ab738ef42d87951b3980f61b1f4cbb14d4171 (diff)
KVM: ppc: Stop saving host TLB state
We're saving the host TLB state to memory on every exit, but never using it. Originally I had thought that we'd want to restore host TLB for heavyweight exits, but that could actually hurt when context switching to an unrelated host process (i.e. not qemu). Since this decreases the performance penalty of all exits, this patch improves guest boot time by about 15%. Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/kvm_host.h2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S17
3 files changed, 3 insertions, 17 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 23bad40b0ea6..dc3a7562bae4 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -81,8 +81,6 @@ struct kvm_vcpu_arch {
81 struct tlbe shadow_tlb[PPC44x_TLB_SIZE]; 81 struct tlbe shadow_tlb[PPC44x_TLB_SIZE];
82 /* Pages which are referenced in the shadow TLB. */ 82 /* Pages which are referenced in the shadow TLB. */
83 struct page *shadow_pages[PPC44x_TLB_SIZE]; 83 struct page *shadow_pages[PPC44x_TLB_SIZE];
84 /* Copy of the host's TLB. */
85 struct tlbe host_tlb[PPC44x_TLB_SIZE];
86 84
87 u32 host_stack; 85 u32 host_stack;
88 u32 host_pid; 86 u32 host_pid;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 92768d3006f7..594064953951 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -356,7 +356,6 @@ int main(void)
356 356
357 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); 357 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
358 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); 358 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
359 DEFINE(VCPU_HOST_TLB, offsetof(struct kvm_vcpu, arch.host_tlb));
360 DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb)); 359 DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb));
361 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); 360 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
362 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); 361 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 8eaba2613ffd..3e88dfa1dbe4 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -342,26 +342,15 @@ lightweight_exit:
342 andc r6, r5, r6 342 andc r6, r5, r6
343 mtmsr r6 343 mtmsr r6
344 344
345 /* Save the host's non-pinned TLB mappings, and load the guest mappings 345 /* Load the guest mappings, leaving the host's "pinned" kernel mappings
346 * over them. Leave the host's "pinned" kernel mappings in place. */ 346 * in place. */
347 /* XXX optimization: use generation count to avoid swapping unmodified 347 /* XXX optimization: load only modified guest entries. */
348 * entries. */
349 mfspr r10, SPRN_MMUCR /* Save host MMUCR. */ 348 mfspr r10, SPRN_MMUCR /* Save host MMUCR. */
350 lis r8, tlb_44x_hwater@ha 349 lis r8, tlb_44x_hwater@ha
351 lwz r8, tlb_44x_hwater@l(r8) 350 lwz r8, tlb_44x_hwater@l(r8)
352 addi r3, r4, VCPU_HOST_TLB - 4
353 addi r9, r4, VCPU_SHADOW_TLB - 4 351 addi r9, r4, VCPU_SHADOW_TLB - 4
354 li r6, 0 352 li r6, 0
3551: 3531:
356 /* Save host entry. */
357 tlbre r7, r6, PPC44x_TLB_PAGEID
358 mfspr r5, SPRN_MMUCR
359 stwu r5, 4(r3)
360 stwu r7, 4(r3)
361 tlbre r7, r6, PPC44x_TLB_XLAT
362 stwu r7, 4(r3)
363 tlbre r7, r6, PPC44x_TLB_ATTRIB
364 stwu r7, 4(r3)
365 /* Load guest entry. */ 354 /* Load guest entry. */
366 lwzu r7, 4(r9) 355 lwzu r7, 4(r9)
367 mtspr SPRN_MMUCR, r7 356 mtspr SPRN_MMUCR, r7