diff options
author | Avi Kivity <avi@redhat.com> | 2009-01-11 06:02:10 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-03-24 05:03:04 -0400 |
commit | f6e2c02b6d28ddabe99377c5640a833407a62632 (patch) | |
tree | 57c435d07989069fd315afe48153a07ebc895f99 /arch | |
parent | 9903a927a4aea4b1ea42356a8453fca664df0b18 (diff) |
KVM: MMU: Rename "metaphysical" attribute to "direct"
This actually describes what is going on, rather than alerting the reader
that something strange is going on.
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 5 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 32 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 12 |
3 files changed, 25 insertions, 24 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 863ea73431ad..55fd4c5fd388 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -170,7 +170,8 @@ struct kvm_pte_chain { | |||
170 | * bits 0:3 - total guest paging levels (2-4, or zero for real mode) | 170 | * bits 0:3 - total guest paging levels (2-4, or zero for real mode) |
171 | * bits 4:7 - page table level for this shadow (1-4) | 171 | * bits 4:7 - page table level for this shadow (1-4) |
172 | * bits 8:9 - page table quadrant for 2-level guests | 172 | * bits 8:9 - page table quadrant for 2-level guests |
173 | * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode) | 173 | * bit 16 - direct mapping of virtual to physical mapping at gfn |
174 | * used for real mode and two-dimensional paging | ||
174 | * bits 17:19 - common access permissions for all ptes in this shadow page | 175 | * bits 17:19 - common access permissions for all ptes in this shadow page |
175 | */ | 176 | */ |
176 | union kvm_mmu_page_role { | 177 | union kvm_mmu_page_role { |
@@ -180,7 +181,7 @@ union kvm_mmu_page_role { | |||
180 | unsigned level:4; | 181 | unsigned level:4; |
181 | unsigned quadrant:2; | 182 | unsigned quadrant:2; |
182 | unsigned pad_for_nice_hex_output:6; | 183 | unsigned pad_for_nice_hex_output:6; |
183 | unsigned metaphysical:1; | 184 | unsigned direct:1; |
184 | unsigned access:3; | 185 | unsigned access:3; |
185 | unsigned invalid:1; | 186 | unsigned invalid:1; |
186 | unsigned cr4_pge:1; | 187 | unsigned cr4_pge:1; |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index de9a9fbc16ed..ef060ec444a4 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1066,7 +1066,7 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn) | |||
1066 | index = kvm_page_table_hashfn(gfn); | 1066 | index = kvm_page_table_hashfn(gfn); |
1067 | bucket = &kvm->arch.mmu_page_hash[index]; | 1067 | bucket = &kvm->arch.mmu_page_hash[index]; |
1068 | hlist_for_each_entry(sp, node, bucket, hash_link) | 1068 | hlist_for_each_entry(sp, node, bucket, hash_link) |
1069 | if (sp->gfn == gfn && !sp->role.metaphysical | 1069 | if (sp->gfn == gfn && !sp->role.direct |
1070 | && !sp->role.invalid) { | 1070 | && !sp->role.invalid) { |
1071 | pgprintk("%s: found role %x\n", | 1071 | pgprintk("%s: found role %x\n", |
1072 | __func__, sp->role.word); | 1072 | __func__, sp->role.word); |
@@ -1200,7 +1200,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
1200 | gfn_t gfn, | 1200 | gfn_t gfn, |
1201 | gva_t gaddr, | 1201 | gva_t gaddr, |
1202 | unsigned level, | 1202 | unsigned level, |
1203 | int metaphysical, | 1203 | int direct, |
1204 | unsigned access, | 1204 | unsigned access, |
1205 | u64 *parent_pte) | 1205 | u64 *parent_pte) |
1206 | { | 1206 | { |
@@ -1213,7 +1213,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
1213 | 1213 | ||
1214 | role = vcpu->arch.mmu.base_role; | 1214 | role = vcpu->arch.mmu.base_role; |
1215 | role.level = level; | 1215 | role.level = level; |
1216 | role.metaphysical = metaphysical; | 1216 | role.direct = direct; |
1217 | role.access = access; | 1217 | role.access = access; |
1218 | if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { | 1218 | if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { |
1219 | quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); | 1219 | quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); |
@@ -1250,7 +1250,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
1250 | sp->role = role; | 1250 | sp->role = role; |
1251 | sp->global = role.cr4_pge; | 1251 | sp->global = role.cr4_pge; |
1252 | hlist_add_head(&sp->hash_link, bucket); | 1252 | hlist_add_head(&sp->hash_link, bucket); |
1253 | if (!metaphysical) { | 1253 | if (!direct) { |
1254 | if (rmap_write_protect(vcpu->kvm, gfn)) | 1254 | if (rmap_write_protect(vcpu->kvm, gfn)) |
1255 | kvm_flush_remote_tlbs(vcpu->kvm); | 1255 | kvm_flush_remote_tlbs(vcpu->kvm); |
1256 | account_shadowed(vcpu->kvm, gfn); | 1256 | account_shadowed(vcpu->kvm, gfn); |
@@ -1395,7 +1395,7 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) | |||
1395 | kvm_mmu_page_unlink_children(kvm, sp); | 1395 | kvm_mmu_page_unlink_children(kvm, sp); |
1396 | kvm_mmu_unlink_parents(kvm, sp); | 1396 | kvm_mmu_unlink_parents(kvm, sp); |
1397 | kvm_flush_remote_tlbs(kvm); | 1397 | kvm_flush_remote_tlbs(kvm); |
1398 | if (!sp->role.invalid && !sp->role.metaphysical) | 1398 | if (!sp->role.invalid && !sp->role.direct) |
1399 | unaccount_shadowed(kvm, sp->gfn); | 1399 | unaccount_shadowed(kvm, sp->gfn); |
1400 | if (sp->unsync) | 1400 | if (sp->unsync) |
1401 | kvm_unlink_unsync_page(kvm, sp); | 1401 | kvm_unlink_unsync_page(kvm, sp); |
@@ -1458,7 +1458,7 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) | |||
1458 | index = kvm_page_table_hashfn(gfn); | 1458 | index = kvm_page_table_hashfn(gfn); |
1459 | bucket = &kvm->arch.mmu_page_hash[index]; | 1459 | bucket = &kvm->arch.mmu_page_hash[index]; |
1460 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) | 1460 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) |
1461 | if (sp->gfn == gfn && !sp->role.metaphysical) { | 1461 | if (sp->gfn == gfn && !sp->role.direct) { |
1462 | pgprintk("%s: gfn %lx role %x\n", __func__, gfn, | 1462 | pgprintk("%s: gfn %lx role %x\n", __func__, gfn, |
1463 | sp->role.word); | 1463 | sp->role.word); |
1464 | r = 1; | 1464 | r = 1; |
@@ -1478,7 +1478,7 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) | |||
1478 | index = kvm_page_table_hashfn(gfn); | 1478 | index = kvm_page_table_hashfn(gfn); |
1479 | bucket = &kvm->arch.mmu_page_hash[index]; | 1479 | bucket = &kvm->arch.mmu_page_hash[index]; |
1480 | hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) { | 1480 | hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) { |
1481 | if (sp->gfn == gfn && !sp->role.metaphysical | 1481 | if (sp->gfn == gfn && !sp->role.direct |
1482 | && !sp->role.invalid) { | 1482 | && !sp->role.invalid) { |
1483 | pgprintk("%s: zap %lx %x\n", | 1483 | pgprintk("%s: zap %lx %x\n", |
1484 | __func__, gfn, sp->role.word); | 1484 | __func__, gfn, sp->role.word); |
@@ -1638,7 +1638,7 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | |||
1638 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; | 1638 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; |
1639 | /* don't unsync if pagetable is shadowed with multiple roles */ | 1639 | /* don't unsync if pagetable is shadowed with multiple roles */ |
1640 | hlist_for_each_entry_safe(s, node, n, bucket, hash_link) { | 1640 | hlist_for_each_entry_safe(s, node, n, bucket, hash_link) { |
1641 | if (s->gfn != sp->gfn || s->role.metaphysical) | 1641 | if (s->gfn != sp->gfn || s->role.direct) |
1642 | continue; | 1642 | continue; |
1643 | if (s->role.word != sp->role.word) | 1643 | if (s->role.word != sp->role.word) |
1644 | return 1; | 1644 | return 1; |
@@ -1951,7 +1951,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
1951 | int i; | 1951 | int i; |
1952 | gfn_t root_gfn; | 1952 | gfn_t root_gfn; |
1953 | struct kvm_mmu_page *sp; | 1953 | struct kvm_mmu_page *sp; |
1954 | int metaphysical = 0; | 1954 | int direct = 0; |
1955 | 1955 | ||
1956 | root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT; | 1956 | root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT; |
1957 | 1957 | ||
@@ -1960,18 +1960,18 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
1960 | 1960 | ||
1961 | ASSERT(!VALID_PAGE(root)); | 1961 | ASSERT(!VALID_PAGE(root)); |
1962 | if (tdp_enabled) | 1962 | if (tdp_enabled) |
1963 | metaphysical = 1; | 1963 | direct = 1; |
1964 | sp = kvm_mmu_get_page(vcpu, root_gfn, 0, | 1964 | sp = kvm_mmu_get_page(vcpu, root_gfn, 0, |
1965 | PT64_ROOT_LEVEL, metaphysical, | 1965 | PT64_ROOT_LEVEL, direct, |
1966 | ACC_ALL, NULL); | 1966 | ACC_ALL, NULL); |
1967 | root = __pa(sp->spt); | 1967 | root = __pa(sp->spt); |
1968 | ++sp->root_count; | 1968 | ++sp->root_count; |
1969 | vcpu->arch.mmu.root_hpa = root; | 1969 | vcpu->arch.mmu.root_hpa = root; |
1970 | return; | 1970 | return; |
1971 | } | 1971 | } |
1972 | metaphysical = !is_paging(vcpu); | 1972 | direct = !is_paging(vcpu); |
1973 | if (tdp_enabled) | 1973 | if (tdp_enabled) |
1974 | metaphysical = 1; | 1974 | direct = 1; |
1975 | for (i = 0; i < 4; ++i) { | 1975 | for (i = 0; i < 4; ++i) { |
1976 | hpa_t root = vcpu->arch.mmu.pae_root[i]; | 1976 | hpa_t root = vcpu->arch.mmu.pae_root[i]; |
1977 | 1977 | ||
@@ -1985,7 +1985,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
1985 | } else if (vcpu->arch.mmu.root_level == 0) | 1985 | } else if (vcpu->arch.mmu.root_level == 0) |
1986 | root_gfn = 0; | 1986 | root_gfn = 0; |
1987 | sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, | 1987 | sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, |
1988 | PT32_ROOT_LEVEL, metaphysical, | 1988 | PT32_ROOT_LEVEL, direct, |
1989 | ACC_ALL, NULL); | 1989 | ACC_ALL, NULL); |
1990 | root = __pa(sp->spt); | 1990 | root = __pa(sp->spt); |
1991 | ++sp->root_count; | 1991 | ++sp->root_count; |
@@ -2487,7 +2487,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
2487 | index = kvm_page_table_hashfn(gfn); | 2487 | index = kvm_page_table_hashfn(gfn); |
2488 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; | 2488 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; |
2489 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) { | 2489 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) { |
2490 | if (sp->gfn != gfn || sp->role.metaphysical || sp->role.invalid) | 2490 | if (sp->gfn != gfn || sp->role.direct || sp->role.invalid) |
2491 | continue; | 2491 | continue; |
2492 | pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8; | 2492 | pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8; |
2493 | misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); | 2493 | misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); |
@@ -3125,7 +3125,7 @@ static void audit_write_protection(struct kvm_vcpu *vcpu) | |||
3125 | gfn_t gfn; | 3125 | gfn_t gfn; |
3126 | 3126 | ||
3127 | list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) { | 3127 | list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) { |
3128 | if (sp->role.metaphysical) | 3128 | if (sp->role.direct) |
3129 | continue; | 3129 | continue; |
3130 | 3130 | ||
3131 | gfn = unalias_gfn(vcpu->kvm, sp->gfn); | 3131 | gfn = unalias_gfn(vcpu->kvm, sp->gfn); |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 46b68f941f60..7314c0944c5f 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -277,7 +277,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
277 | unsigned access = gw->pt_access; | 277 | unsigned access = gw->pt_access; |
278 | struct kvm_mmu_page *shadow_page; | 278 | struct kvm_mmu_page *shadow_page; |
279 | u64 spte, *sptep; | 279 | u64 spte, *sptep; |
280 | int metaphysical; | 280 | int direct; |
281 | gfn_t table_gfn; | 281 | gfn_t table_gfn; |
282 | int r; | 282 | int r; |
283 | int level; | 283 | int level; |
@@ -313,17 +313,17 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
313 | 313 | ||
314 | if (level == PT_DIRECTORY_LEVEL | 314 | if (level == PT_DIRECTORY_LEVEL |
315 | && gw->level == PT_DIRECTORY_LEVEL) { | 315 | && gw->level == PT_DIRECTORY_LEVEL) { |
316 | metaphysical = 1; | 316 | direct = 1; |
317 | if (!is_dirty_pte(gw->ptes[level - 1])) | 317 | if (!is_dirty_pte(gw->ptes[level - 1])) |
318 | access &= ~ACC_WRITE_MASK; | 318 | access &= ~ACC_WRITE_MASK; |
319 | table_gfn = gpte_to_gfn(gw->ptes[level - 1]); | 319 | table_gfn = gpte_to_gfn(gw->ptes[level - 1]); |
320 | } else { | 320 | } else { |
321 | metaphysical = 0; | 321 | direct = 0; |
322 | table_gfn = gw->table_gfn[level - 2]; | 322 | table_gfn = gw->table_gfn[level - 2]; |
323 | } | 323 | } |
324 | shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, | 324 | shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, |
325 | metaphysical, access, sptep); | 325 | direct, access, sptep); |
326 | if (!metaphysical) { | 326 | if (!direct) { |
327 | r = kvm_read_guest_atomic(vcpu->kvm, | 327 | r = kvm_read_guest_atomic(vcpu->kvm, |
328 | gw->pte_gpa[level - 2], | 328 | gw->pte_gpa[level - 2], |
329 | &curr_pte, sizeof(curr_pte)); | 329 | &curr_pte, sizeof(curr_pte)); |
@@ -512,7 +512,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, | |||
512 | pt_element_t pt[256 / sizeof(pt_element_t)]; | 512 | pt_element_t pt[256 / sizeof(pt_element_t)]; |
513 | gpa_t pte_gpa; | 513 | gpa_t pte_gpa; |
514 | 514 | ||
515 | if (sp->role.metaphysical | 515 | if (sp->role.direct |
516 | || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) { | 516 | || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) { |
517 | nonpaging_prefetch_page(vcpu, sp); | 517 | nonpaging_prefetch_page(vcpu, sp); |
518 | return; | 518 | return; |