diff options
author | Hollis Blanchard <hollisb@us.ibm.com> | 2008-07-25 14:54:52 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-10-15 04:15:16 -0400 |
commit | 83aae4a8098eb8a40a2e9dab3714354182143b4f (patch) | |
tree | 872381c8aa610e3c1053008e967728f121fa55cb /arch/powerpc/kvm/powerpc.c | |
parent | 20754c2495a791b5b429c0da63394c86ade978e7 (diff) |
KVM: ppc: Write only modified shadow entries into the TLB on exit
Track which TLB entries need to be written, instead of overwriting everything
below the high water mark. Typically only a single guest TLB entry will be
modified in a single exit.
Guest boot time performance improvement: about 15%.
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/powerpc/kvm/powerpc.c')
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 15 |
1 files changed, 15 insertions, 0 deletions
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index b75607180ddb..90a6fc422b23 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <asm/cputable.h> | 27 | #include <asm/cputable.h> |
28 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
29 | #include <asm/kvm_ppc.h> | 29 | #include <asm/kvm_ppc.h> |
30 | #include <asm/tlbflush.h> | ||
30 | 31 | ||
31 | 32 | ||
32 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) | 33 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) |
@@ -307,14 +308,28 @@ static void kvmppc_load_guest_debug_registers(struct kvm_vcpu *vcpu) | |||
307 | 308 | ||
308 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 309 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
309 | { | 310 | { |
311 | int i; | ||
312 | |||
310 | if (vcpu->guest_debug.enabled) | 313 | if (vcpu->guest_debug.enabled) |
311 | kvmppc_load_guest_debug_registers(vcpu); | 314 | kvmppc_load_guest_debug_registers(vcpu); |
315 | |||
316 | /* Mark every guest entry in the shadow TLB entry modified, so that they | ||
317 | * will all be reloaded on the next vcpu run (instead of being | ||
318 | * demand-faulted). */ | ||
319 | for (i = 0; i <= tlb_44x_hwater; i++) | ||
320 | kvmppc_tlbe_set_modified(vcpu, i); | ||
312 | } | 321 | } |
313 | 322 | ||
314 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | 323 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
315 | { | 324 | { |
316 | if (vcpu->guest_debug.enabled) | 325 | if (vcpu->guest_debug.enabled) |
317 | kvmppc_restore_host_debug_state(vcpu); | 326 | kvmppc_restore_host_debug_state(vcpu); |
327 | |||
328 | /* Don't leave guest TLB entries resident when being de-scheduled. */ | ||
329 | /* XXX It would be nice to differentiate between heavyweight exit and | ||
330 | * sched_out here, since we could avoid the TLB flush for heavyweight | ||
331 | * exits. */ | ||
332 | _tlbia(); | ||
318 | } | 333 | } |
319 | 334 | ||
320 | int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, | 335 | int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, |