diff options
author | Alexander Graf <agraf@suse.de> | 2010-04-19 20:49:48 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-05-17 05:18:58 -0400 |
commit | f7bc74e1c306636a659a04805474b2f8fcbd1f7e (patch) | |
tree | 5aa5c5c9676d577b55bbc700f1d5a6ee5c137a27 /arch/powerpc/kvm/book3s_32_mmu.c | |
parent | 7fdaec997cc8ef77e8da7ed70f3d9f074b61c31f (diff) |
KVM: PPC: Improve split mode
When in split mode, instruction relocation and data relocation are not equal.
So far we implemented this mode by reserving a special pseudo-VSID for the
two cases and flushing all PTEs when going into split mode, which is slow.
Unfortunately 32bit Linux and Mac OS X use split mode extensively. So to not
slow down things too much, I came up with a different idea: Mark the split
mode with a bit in the VSID and then treat it like any other segment.
This means we can just flush the shadow segment cache, but keep the PTEs
intact. I verified that this works with ppc32 Linux and Mac OS X 10.4
guests and does speed them up.
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/book3s_32_mmu.c')
-rw-r--r-- | arch/powerpc/kvm/book3s_32_mmu.c | 21 |
1 files changed, 13 insertions, 8 deletions
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index 33186b745c90..0b10503c8a4a 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c | |||
@@ -330,30 +330,35 @@ static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool lar | |||
330 | static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | 330 | static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, |
331 | u64 *vsid) | 331 | u64 *vsid) |
332 | { | 332 | { |
333 | ulong ea = esid << SID_SHIFT; | ||
334 | struct kvmppc_sr *sr; | ||
335 | u64 gvsid = esid; | ||
336 | |||
337 | if (vcpu->arch.msr & (MSR_DR|MSR_IR)) { | ||
338 | sr = find_sr(to_book3s(vcpu), ea); | ||
339 | if (sr->valid) | ||
340 | gvsid = sr->vsid; | ||
341 | } | ||
342 | |||
333 | /* In case we only have one of MSR_IR or MSR_DR set, let's put | 343 | /* In case we only have one of MSR_IR or MSR_DR set, let's put |
334 | that in the real-mode context (and hope RM doesn't access | 344 | that in the real-mode context (and hope RM doesn't access |
335 | high memory) */ | 345 | high memory) */ |
336 | switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { | 346 | switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { |
337 | case 0: | 347 | case 0: |
338 | *vsid = (VSID_REAL >> 16) | esid; | 348 | *vsid = VSID_REAL | esid; |
339 | break; | 349 | break; |
340 | case MSR_IR: | 350 | case MSR_IR: |
341 | *vsid = (VSID_REAL_IR >> 16) | esid; | 351 | *vsid = VSID_REAL_IR | gvsid; |
342 | break; | 352 | break; |
343 | case MSR_DR: | 353 | case MSR_DR: |
344 | *vsid = (VSID_REAL_DR >> 16) | esid; | 354 | *vsid = VSID_REAL_DR | gvsid; |
345 | break; | 355 | break; |
346 | case MSR_DR|MSR_IR: | 356 | case MSR_DR|MSR_IR: |
347 | { | ||
348 | ulong ea = esid << SID_SHIFT; | ||
349 | struct kvmppc_sr *sr = find_sr(to_book3s(vcpu), ea); | ||
350 | |||
351 | if (!sr->valid) | 357 | if (!sr->valid) |
352 | return -1; | 358 | return -1; |
353 | 359 | ||
354 | *vsid = sr->vsid; | 360 | *vsid = sr->vsid; |
355 | break; | 361 | break; |
356 | } | ||
357 | default: | 362 | default: |
358 | BUG(); | 363 | BUG(); |
359 | } | 364 | } |