aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2009-06-10 07:12:05 -0400
committerAvi Kivity <avi@redhat.com>2009-09-10 01:32:51 -0400
commit43a3795a3a12425de31e25ce0ebc3bb41501cef7 (patch)
tree93481c7731b8c81ad7066d0f1e2202e767d3bd35 /arch/x86/kvm/mmu.c
parent439e218a6f4716da484314fc5a1f0a59b0212c01 (diff)
KVM: MMU: Adjust pte accessors to explicitly indicate guest or shadow pte
Since the guest and host ptes can have wildly different format, adjust the pte accessor names to indicate on which type of pte they operate on. No functional changes. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8f2cb29db2fe..a039e6bc21f7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -240,12 +240,12 @@ static int is_writeble_pte(unsigned long pte)
240 return pte & PT_WRITABLE_MASK; 240 return pte & PT_WRITABLE_MASK;
241} 241}
242 242
243static int is_dirty_pte(unsigned long pte) 243static int is_dirty_gpte(unsigned long pte)
244{ 244{
245 return pte & PT_DIRTY_MASK; 245 return pte & PT_DIRTY_MASK;
246} 246}
247 247
248static int is_rmap_pte(u64 pte) 248static int is_rmap_spte(u64 pte)
249{ 249{
250 return is_shadow_present_pte(pte); 250 return is_shadow_present_pte(pte);
251} 251}
@@ -502,7 +502,7 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
502 unsigned long *rmapp; 502 unsigned long *rmapp;
503 int i, count = 0; 503 int i, count = 0;
504 504
505 if (!is_rmap_pte(*spte)) 505 if (!is_rmap_spte(*spte))
506 return count; 506 return count;
507 gfn = unalias_gfn(vcpu->kvm, gfn); 507 gfn = unalias_gfn(vcpu->kvm, gfn);
508 sp = page_header(__pa(spte)); 508 sp = page_header(__pa(spte));
@@ -567,7 +567,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
567 unsigned long *rmapp; 567 unsigned long *rmapp;
568 int i; 568 int i;
569 569
570 if (!is_rmap_pte(*spte)) 570 if (!is_rmap_spte(*spte))
571 return; 571 return;
572 sp = page_header(__pa(spte)); 572 sp = page_header(__pa(spte));
573 pfn = spte_to_pfn(*spte); 573 pfn = spte_to_pfn(*spte);
@@ -1769,7 +1769,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1769 __func__, *shadow_pte, pt_access, 1769 __func__, *shadow_pte, pt_access,
1770 write_fault, user_fault, gfn); 1770 write_fault, user_fault, gfn);
1771 1771
1772 if (is_rmap_pte(*shadow_pte)) { 1772 if (is_rmap_spte(*shadow_pte)) {
1773 /* 1773 /*
1774 * If we overwrite a PTE page pointer with a 2MB PMD, unlink 1774 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1775 * the parent of the now unreachable PTE. 1775 * the parent of the now unreachable PTE.
@@ -1805,7 +1805,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1805 page_header_update_slot(vcpu->kvm, shadow_pte, gfn); 1805 page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
1806 if (!was_rmapped) { 1806 if (!was_rmapped) {
1807 rmap_count = rmap_add(vcpu, shadow_pte, gfn, largepage); 1807 rmap_count = rmap_add(vcpu, shadow_pte, gfn, largepage);
1808 if (!is_rmap_pte(*shadow_pte)) 1808 if (!is_rmap_spte(*shadow_pte))
1809 kvm_release_pfn_clean(pfn); 1809 kvm_release_pfn_clean(pfn);
1810 if (rmap_count > RMAP_RECYCLE_THRESHOLD) 1810 if (rmap_count > RMAP_RECYCLE_THRESHOLD)
1811 rmap_recycle(vcpu, gfn, largepage); 1811 rmap_recycle(vcpu, gfn, largepage);
@@ -1984,7 +1984,7 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
1984 ASSERT(!VALID_PAGE(root)); 1984 ASSERT(!VALID_PAGE(root));
1985 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { 1985 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
1986 pdptr = kvm_pdptr_read(vcpu, i); 1986 pdptr = kvm_pdptr_read(vcpu, i);
1987 if (!is_present_pte(pdptr)) { 1987 if (!is_present_gpte(pdptr)) {
1988 vcpu->arch.mmu.pae_root[i] = 0; 1988 vcpu->arch.mmu.pae_root[i] = 0;
1989 continue; 1989 continue;
1990 } 1990 }
@@ -2475,7 +2475,7 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2475 if ((bytes == 4) && (gpa % 4 == 0)) 2475 if ((bytes == 4) && (gpa % 4 == 0))
2476 memcpy((void *)&gpte, new, 4); 2476 memcpy((void *)&gpte, new, 4);
2477 } 2477 }
2478 if (!is_present_pte(gpte)) 2478 if (!is_present_gpte(gpte))
2479 return; 2479 return;
2480 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; 2480 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
2481 2481