diff options
author | Avi Kivity <avi@redhat.com> | 2010-04-14 12:20:03 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-05-17 05:17:47 -0400 |
commit | 5b7e0102ae744e9175b905f4267a81393bdb7a75 (patch) | |
tree | 763f8ac81b9ebd0bebe316fd016cf0a98f33b9c7 /arch/x86/kvm/mmu.c | |
parent | e269fb2189fb86d79d64c0ca74c6c1a549ad4aa3 (diff) |
KVM: MMU: Replace role.glevels with role.cr4_pae
There is no real distinction between glevels=3 and glevels=4; both have
exactly the same format and the code is treated exactly the same way. Drop
role.glevels and replace is with role.cr4_pae (which is meaningful). This
simplifies the code a bit.
As a side effect, it allows sharing shadow page tables between pae and
longmode guest page tables at the same guest page.
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index ec8900b6692a..51aa580d0aa5 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1206,7 +1206,7 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp); | |||
1206 | 1206 | ||
1207 | static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | 1207 | static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) |
1208 | { | 1208 | { |
1209 | if (sp->role.glevels != vcpu->arch.mmu.root_level) { | 1209 | if (sp->role.cr4_pae != !!is_pae(vcpu)) { |
1210 | kvm_mmu_zap_page(vcpu->kvm, sp); | 1210 | kvm_mmu_zap_page(vcpu->kvm, sp); |
1211 | return 1; | 1211 | return 1; |
1212 | } | 1212 | } |
@@ -1329,7 +1329,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
1329 | role.level = level; | 1329 | role.level = level; |
1330 | role.direct = direct; | 1330 | role.direct = direct; |
1331 | if (role.direct) | 1331 | if (role.direct) |
1332 | role.glevels = 0; | 1332 | role.cr4_pae = 0; |
1333 | role.access = access; | 1333 | role.access = access; |
1334 | if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { | 1334 | if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { |
1335 | quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); | 1335 | quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); |
@@ -2443,7 +2443,7 @@ static int init_kvm_softmmu(struct kvm_vcpu *vcpu) | |||
2443 | else | 2443 | else |
2444 | r = paging32_init_context(vcpu); | 2444 | r = paging32_init_context(vcpu); |
2445 | 2445 | ||
2446 | vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level; | 2446 | vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu); |
2447 | 2447 | ||
2448 | return r; | 2448 | return r; |
2449 | } | 2449 | } |
@@ -2532,7 +2532,7 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, | |||
2532 | } | 2532 | } |
2533 | 2533 | ||
2534 | ++vcpu->kvm->stat.mmu_pte_updated; | 2534 | ++vcpu->kvm->stat.mmu_pte_updated; |
2535 | if (sp->role.glevels == PT32_ROOT_LEVEL) | 2535 | if (!sp->role.cr4_pae) |
2536 | paging32_update_pte(vcpu, sp, spte, new); | 2536 | paging32_update_pte(vcpu, sp, spte, new); |
2537 | else | 2537 | else |
2538 | paging64_update_pte(vcpu, sp, spte, new); | 2538 | paging64_update_pte(vcpu, sp, spte, new); |
@@ -2681,7 +2681,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
2681 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) { | 2681 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) { |
2682 | if (sp->gfn != gfn || sp->role.direct || sp->role.invalid) | 2682 | if (sp->gfn != gfn || sp->role.direct || sp->role.invalid) |
2683 | continue; | 2683 | continue; |
2684 | pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8; | 2684 | pte_size = sp->role.cr4_pae ? 8 : 4; |
2685 | misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); | 2685 | misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); |
2686 | misaligned |= bytes < 4; | 2686 | misaligned |= bytes < 4; |
2687 | if (misaligned || flooded) { | 2687 | if (misaligned || flooded) { |
@@ -2705,7 +2705,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
2705 | page_offset = offset; | 2705 | page_offset = offset; |
2706 | level = sp->role.level; | 2706 | level = sp->role.level; |
2707 | npte = 1; | 2707 | npte = 1; |
2708 | if (sp->role.glevels == PT32_ROOT_LEVEL) { | 2708 | if (!sp->role.cr4_pae) { |
2709 | page_offset <<= 1; /* 32->64 */ | 2709 | page_offset <<= 1; /* 32->64 */ |
2710 | /* | 2710 | /* |
2711 | * A 32-bit pde maps 4MB while the shadow pdes map | 2711 | * A 32-bit pde maps 4MB while the shadow pdes map |