aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-04-14 12:20:03 -0400
committerAvi Kivity <avi@redhat.com>2010-05-17 05:17:47 -0400
commit5b7e0102ae744e9175b905f4267a81393bdb7a75 (patch)
tree763f8ac81b9ebd0bebe316fd016cf0a98f33b9c7 /arch/x86
parente269fb2189fb86d79d64c0ca74c6c1a549ad4aa3 (diff)
KVM: MMU: Replace role.glevels with role.cr4_pae
There is no real distinction between glevels=3 and glevels=4; both have exactly the same format and the code is treated exactly the same way. Drop role.glevels and replace is with role.cr4_pae (which is meaningful). This simplifies the code a bit. As a side effect, it allows sharing shadow page tables between pae and longmode guest page tables at the same guest page. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kvm/mmu.c12
-rw-r--r--arch/x86/kvm/mmutrace.h5
3 files changed, 10 insertions, 9 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3602728d54de..707d272ae4a1 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -171,8 +171,8 @@ struct kvm_pte_chain {
171union kvm_mmu_page_role { 171union kvm_mmu_page_role {
172 unsigned word; 172 unsigned word;
173 struct { 173 struct {
174 unsigned glevels:4;
175 unsigned level:4; 174 unsigned level:4;
175 unsigned cr4_pae:1;
176 unsigned quadrant:2; 176 unsigned quadrant:2;
177 unsigned pad_for_nice_hex_output:6; 177 unsigned pad_for_nice_hex_output:6;
178 unsigned direct:1; 178 unsigned direct:1;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ec8900b6692a..51aa580d0aa5 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1206,7 +1206,7 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
1206 1206
1207static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) 1207static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1208{ 1208{
1209 if (sp->role.glevels != vcpu->arch.mmu.root_level) { 1209 if (sp->role.cr4_pae != !!is_pae(vcpu)) {
1210 kvm_mmu_zap_page(vcpu->kvm, sp); 1210 kvm_mmu_zap_page(vcpu->kvm, sp);
1211 return 1; 1211 return 1;
1212 } 1212 }
@@ -1329,7 +1329,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1329 role.level = level; 1329 role.level = level;
1330 role.direct = direct; 1330 role.direct = direct;
1331 if (role.direct) 1331 if (role.direct)
1332 role.glevels = 0; 1332 role.cr4_pae = 0;
1333 role.access = access; 1333 role.access = access;
1334 if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { 1334 if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
1335 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); 1335 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
@@ -2443,7 +2443,7 @@ static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
2443 else 2443 else
2444 r = paging32_init_context(vcpu); 2444 r = paging32_init_context(vcpu);
2445 2445
2446 vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level; 2446 vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
2447 2447
2448 return r; 2448 return r;
2449} 2449}
@@ -2532,7 +2532,7 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
2532 } 2532 }
2533 2533
2534 ++vcpu->kvm->stat.mmu_pte_updated; 2534 ++vcpu->kvm->stat.mmu_pte_updated;
2535 if (sp->role.glevels == PT32_ROOT_LEVEL) 2535 if (!sp->role.cr4_pae)
2536 paging32_update_pte(vcpu, sp, spte, new); 2536 paging32_update_pte(vcpu, sp, spte, new);
2537 else 2537 else
2538 paging64_update_pte(vcpu, sp, spte, new); 2538 paging64_update_pte(vcpu, sp, spte, new);
@@ -2681,7 +2681,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2681 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) { 2681 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
2682 if (sp->gfn != gfn || sp->role.direct || sp->role.invalid) 2682 if (sp->gfn != gfn || sp->role.direct || sp->role.invalid)
2683 continue; 2683 continue;
2684 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8; 2684 pte_size = sp->role.cr4_pae ? 8 : 4;
2685 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); 2685 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
2686 misaligned |= bytes < 4; 2686 misaligned |= bytes < 4;
2687 if (misaligned || flooded) { 2687 if (misaligned || flooded) {
@@ -2705,7 +2705,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2705 page_offset = offset; 2705 page_offset = offset;
2706 level = sp->role.level; 2706 level = sp->role.level;
2707 npte = 1; 2707 npte = 1;
2708 if (sp->role.glevels == PT32_ROOT_LEVEL) { 2708 if (!sp->role.cr4_pae) {
2709 page_offset <<= 1; /* 32->64 */ 2709 page_offset <<= 1; /* 32->64 */
2710 /* 2710 /*
2711 * A 32-bit pde maps 4MB while the shadow pdes map 2711 * A 32-bit pde maps 4MB while the shadow pdes map
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index 1fe956ab7617..3851f1f3030c 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -28,9 +28,10 @@
28 \ 28 \
29 role.word = __entry->role; \ 29 role.word = __entry->role; \
30 \ 30 \
31 trace_seq_printf(p, "sp gfn %llx %u/%u q%u%s %s%s %spge" \ 31 trace_seq_printf(p, "sp gfn %llx %u%s q%u%s %s%s %spge" \
32 " %snxe root %u %s%c", \ 32 " %snxe root %u %s%c", \
33 __entry->gfn, role.level, role.glevels, \ 33 __entry->gfn, role.level, \
34 role.cr4_pae ? " pae" : "", \
34 role.quadrant, \ 35 role.quadrant, \
35 role.direct ? " direct" : "", \ 36 role.direct ? " direct" : "", \
36 access_str[role.access], \ 37 access_str[role.access], \