diff options
author | Mihai Caraman <mihai.caraman@freescale.com> | 2014-06-18 03:15:22 -0400 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2014-07-28 09:22:16 -0400 |
commit | 1f0eeb7e1a88f46afa0f435cf7c34b0c84cf2394 (patch) | |
tree | f1eb80cb27209f2e395bb34cf6d08af7e1af2e79 /arch/powerpc/kvm/e500mc.c | |
parent | f396df35188c59a5ecb83932190505ef297754e6 (diff) |
KVM: PPC: e500mc: Enhance tlb invalidation condition on vcpu schedule
On vcpu schedule, the condition checked for tlb pollution is too loose.
The tlb entries of a vcpu become polluted (vs stale) only when a different
vcpu within the same logical partition runs in-between. Optimize the tlb
invalidation condition keeping last_vcpu per logical partition id.
With the new invalidation condition, a guest shows 4% performance improvement
on P5020DS while running a memory stress application with the cpu oversubscribed,
the other guest running a cpu intensive workload.
Guest - old invalidation condition
real 3.89
user 3.87
sys 0.01
Guest - enhanced invalidation condition
real 3.75
user 3.73
sys 0.01
Host
real 3.70
user 1.85
sys 0.00
The memory stress application accesses 4KB pages backed by 75% of available
TLB0 entries:
char foo[ENTRIES][4096] __attribute__ ((aligned (4096)));
int main()
{
char bar;
int i, j;
for (i = 0; i < ITERATIONS; i++)
for (j = 0; j < ENTRIES; j++)
bar = foo[j][0];
return 0;
}
Signed-off-by: Mihai Caraman <mihai.caraman@freescale.com>
Reviewed-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm/e500mc.c')
-rw-r--r-- | arch/powerpc/kvm/e500mc.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index 17e456279224..690499d7669d 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c | |||
@@ -110,7 +110,7 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) | |||
110 | { | 110 | { |
111 | } | 111 | } |
112 | 112 | ||
113 | static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu); | 113 | static DEFINE_PER_CPU(struct kvm_vcpu *[KVMPPC_NR_LPIDS], last_vcpu_of_lpid); |
114 | 114 | ||
115 | static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu) | 115 | static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu) |
116 | { | 116 | { |
@@ -141,9 +141,9 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu) | |||
141 | mtspr(SPRN_GESR, vcpu->arch.shared->esr); | 141 | mtspr(SPRN_GESR, vcpu->arch.shared->esr); |
142 | 142 | ||
143 | if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || | 143 | if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || |
144 | __get_cpu_var(last_vcpu_on_cpu) != vcpu) { | 144 | __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] != vcpu) { |
145 | kvmppc_e500_tlbil_all(vcpu_e500); | 145 | kvmppc_e500_tlbil_all(vcpu_e500); |
146 | __get_cpu_var(last_vcpu_on_cpu) = vcpu; | 146 | __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] = vcpu; |
147 | } | 147 | } |
148 | 148 | ||
149 | kvmppc_load_guest_fp(vcpu); | 149 | kvmppc_load_guest_fp(vcpu); |