diff options
author | Avi Kivity <avi@redhat.com> | 2009-01-11 06:02:10 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-03-24 05:03:04 -0400 |
commit | f6e2c02b6d28ddabe99377c5640a833407a62632 (patch) | |
tree | 57c435d07989069fd315afe48153a07ebc895f99 /arch/x86/kvm/mmu.c | |
parent | 9903a927a4aea4b1ea42356a8453fca664df0b18 (diff) |
KVM: MMU: Rename "metaphysical" attribute to "direct"
This actually describes what is going on, rather than alerting the reader
that something strange is going on.
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index de9a9fbc16ed..ef060ec444a4 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1066,7 +1066,7 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn) | |||
1066 | index = kvm_page_table_hashfn(gfn); | 1066 | index = kvm_page_table_hashfn(gfn); |
1067 | bucket = &kvm->arch.mmu_page_hash[index]; | 1067 | bucket = &kvm->arch.mmu_page_hash[index]; |
1068 | hlist_for_each_entry(sp, node, bucket, hash_link) | 1068 | hlist_for_each_entry(sp, node, bucket, hash_link) |
1069 | if (sp->gfn == gfn && !sp->role.metaphysical | 1069 | if (sp->gfn == gfn && !sp->role.direct |
1070 | && !sp->role.invalid) { | 1070 | && !sp->role.invalid) { |
1071 | pgprintk("%s: found role %x\n", | 1071 | pgprintk("%s: found role %x\n", |
1072 | __func__, sp->role.word); | 1072 | __func__, sp->role.word); |
@@ -1200,7 +1200,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
1200 | gfn_t gfn, | 1200 | gfn_t gfn, |
1201 | gva_t gaddr, | 1201 | gva_t gaddr, |
1202 | unsigned level, | 1202 | unsigned level, |
1203 | int metaphysical, | 1203 | int direct, |
1204 | unsigned access, | 1204 | unsigned access, |
1205 | u64 *parent_pte) | 1205 | u64 *parent_pte) |
1206 | { | 1206 | { |
@@ -1213,7 +1213,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
1213 | 1213 | ||
1214 | role = vcpu->arch.mmu.base_role; | 1214 | role = vcpu->arch.mmu.base_role; |
1215 | role.level = level; | 1215 | role.level = level; |
1216 | role.metaphysical = metaphysical; | 1216 | role.direct = direct; |
1217 | role.access = access; | 1217 | role.access = access; |
1218 | if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { | 1218 | if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { |
1219 | quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); | 1219 | quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); |
@@ -1250,7 +1250,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
1250 | sp->role = role; | 1250 | sp->role = role; |
1251 | sp->global = role.cr4_pge; | 1251 | sp->global = role.cr4_pge; |
1252 | hlist_add_head(&sp->hash_link, bucket); | 1252 | hlist_add_head(&sp->hash_link, bucket); |
1253 | if (!metaphysical) { | 1253 | if (!direct) { |
1254 | if (rmap_write_protect(vcpu->kvm, gfn)) | 1254 | if (rmap_write_protect(vcpu->kvm, gfn)) |
1255 | kvm_flush_remote_tlbs(vcpu->kvm); | 1255 | kvm_flush_remote_tlbs(vcpu->kvm); |
1256 | account_shadowed(vcpu->kvm, gfn); | 1256 | account_shadowed(vcpu->kvm, gfn); |
@@ -1395,7 +1395,7 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) | |||
1395 | kvm_mmu_page_unlink_children(kvm, sp); | 1395 | kvm_mmu_page_unlink_children(kvm, sp); |
1396 | kvm_mmu_unlink_parents(kvm, sp); | 1396 | kvm_mmu_unlink_parents(kvm, sp); |
1397 | kvm_flush_remote_tlbs(kvm); | 1397 | kvm_flush_remote_tlbs(kvm); |
1398 | if (!sp->role.invalid && !sp->role.metaphysical) | 1398 | if (!sp->role.invalid && !sp->role.direct) |
1399 | unaccount_shadowed(kvm, sp->gfn); | 1399 | unaccount_shadowed(kvm, sp->gfn); |
1400 | if (sp->unsync) | 1400 | if (sp->unsync) |
1401 | kvm_unlink_unsync_page(kvm, sp); | 1401 | kvm_unlink_unsync_page(kvm, sp); |
@@ -1458,7 +1458,7 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) | |||
1458 | index = kvm_page_table_hashfn(gfn); | 1458 | index = kvm_page_table_hashfn(gfn); |
1459 | bucket = &kvm->arch.mmu_page_hash[index]; | 1459 | bucket = &kvm->arch.mmu_page_hash[index]; |
1460 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) | 1460 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) |
1461 | if (sp->gfn == gfn && !sp->role.metaphysical) { | 1461 | if (sp->gfn == gfn && !sp->role.direct) { |
1462 | pgprintk("%s: gfn %lx role %x\n", __func__, gfn, | 1462 | pgprintk("%s: gfn %lx role %x\n", __func__, gfn, |
1463 | sp->role.word); | 1463 | sp->role.word); |
1464 | r = 1; | 1464 | r = 1; |
@@ -1478,7 +1478,7 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) | |||
1478 | index = kvm_page_table_hashfn(gfn); | 1478 | index = kvm_page_table_hashfn(gfn); |
1479 | bucket = &kvm->arch.mmu_page_hash[index]; | 1479 | bucket = &kvm->arch.mmu_page_hash[index]; |
1480 | hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) { | 1480 | hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) { |
1481 | if (sp->gfn == gfn && !sp->role.metaphysical | 1481 | if (sp->gfn == gfn && !sp->role.direct |
1482 | && !sp->role.invalid) { | 1482 | && !sp->role.invalid) { |
1483 | pgprintk("%s: zap %lx %x\n", | 1483 | pgprintk("%s: zap %lx %x\n", |
1484 | __func__, gfn, sp->role.word); | 1484 | __func__, gfn, sp->role.word); |
@@ -1638,7 +1638,7 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | |||
1638 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; | 1638 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; |
1639 | /* don't unsync if pagetable is shadowed with multiple roles */ | 1639 | /* don't unsync if pagetable is shadowed with multiple roles */ |
1640 | hlist_for_each_entry_safe(s, node, n, bucket, hash_link) { | 1640 | hlist_for_each_entry_safe(s, node, n, bucket, hash_link) { |
1641 | if (s->gfn != sp->gfn || s->role.metaphysical) | 1641 | if (s->gfn != sp->gfn || s->role.direct) |
1642 | continue; | 1642 | continue; |
1643 | if (s->role.word != sp->role.word) | 1643 | if (s->role.word != sp->role.word) |
1644 | return 1; | 1644 | return 1; |
@@ -1951,7 +1951,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
1951 | int i; | 1951 | int i; |
1952 | gfn_t root_gfn; | 1952 | gfn_t root_gfn; |
1953 | struct kvm_mmu_page *sp; | 1953 | struct kvm_mmu_page *sp; |
1954 | int metaphysical = 0; | 1954 | int direct = 0; |
1955 | 1955 | ||
1956 | root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT; | 1956 | root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT; |
1957 | 1957 | ||
@@ -1960,18 +1960,18 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
1960 | 1960 | ||
1961 | ASSERT(!VALID_PAGE(root)); | 1961 | ASSERT(!VALID_PAGE(root)); |
1962 | if (tdp_enabled) | 1962 | if (tdp_enabled) |
1963 | metaphysical = 1; | 1963 | direct = 1; |
1964 | sp = kvm_mmu_get_page(vcpu, root_gfn, 0, | 1964 | sp = kvm_mmu_get_page(vcpu, root_gfn, 0, |
1965 | PT64_ROOT_LEVEL, metaphysical, | 1965 | PT64_ROOT_LEVEL, direct, |
1966 | ACC_ALL, NULL); | 1966 | ACC_ALL, NULL); |
1967 | root = __pa(sp->spt); | 1967 | root = __pa(sp->spt); |
1968 | ++sp->root_count; | 1968 | ++sp->root_count; |
1969 | vcpu->arch.mmu.root_hpa = root; | 1969 | vcpu->arch.mmu.root_hpa = root; |
1970 | return; | 1970 | return; |
1971 | } | 1971 | } |
1972 | metaphysical = !is_paging(vcpu); | 1972 | direct = !is_paging(vcpu); |
1973 | if (tdp_enabled) | 1973 | if (tdp_enabled) |
1974 | metaphysical = 1; | 1974 | direct = 1; |
1975 | for (i = 0; i < 4; ++i) { | 1975 | for (i = 0; i < 4; ++i) { |
1976 | hpa_t root = vcpu->arch.mmu.pae_root[i]; | 1976 | hpa_t root = vcpu->arch.mmu.pae_root[i]; |
1977 | 1977 | ||
@@ -1985,7 +1985,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
1985 | } else if (vcpu->arch.mmu.root_level == 0) | 1985 | } else if (vcpu->arch.mmu.root_level == 0) |
1986 | root_gfn = 0; | 1986 | root_gfn = 0; |
1987 | sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, | 1987 | sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, |
1988 | PT32_ROOT_LEVEL, metaphysical, | 1988 | PT32_ROOT_LEVEL, direct, |
1989 | ACC_ALL, NULL); | 1989 | ACC_ALL, NULL); |
1990 | root = __pa(sp->spt); | 1990 | root = __pa(sp->spt); |
1991 | ++sp->root_count; | 1991 | ++sp->root_count; |
@@ -2487,7 +2487,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
2487 | index = kvm_page_table_hashfn(gfn); | 2487 | index = kvm_page_table_hashfn(gfn); |
2488 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; | 2488 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; |
2489 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) { | 2489 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) { |
2490 | if (sp->gfn != gfn || sp->role.metaphysical || sp->role.invalid) | 2490 | if (sp->gfn != gfn || sp->role.direct || sp->role.invalid) |
2491 | continue; | 2491 | continue; |
2492 | pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8; | 2492 | pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8; |
2493 | misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); | 2493 | misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); |
@@ -3125,7 +3125,7 @@ static void audit_write_protection(struct kvm_vcpu *vcpu) | |||
3125 | gfn_t gfn; | 3125 | gfn_t gfn; |
3126 | 3126 | ||
3127 | list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) { | 3127 | list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) { |
3128 | if (sp->role.metaphysical) | 3128 | if (sp->role.direct) |
3129 | continue; | 3129 | continue; |
3130 | 3130 | ||
3131 | gfn = unalias_gfn(vcpu->kvm, sp->gfn); | 3131 | gfn = unalias_gfn(vcpu->kvm, sp->gfn); |