aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2009-06-10 07:12:05 -0400
committerAvi Kivity <avi@redhat.com>2009-09-10 01:32:51 -0400
commit43a3795a3a12425de31e25ce0ebc3bb41501cef7 (patch)
tree93481c7731b8c81ad7066d0f1e2202e767d3bd35 /arch
parent439e218a6f4716da484314fc5a1f0a59b0212c01 (diff)
KVM: MMU: Adjust pte accessors to explicitly indicate guest or shadow pte
Since the guest and host ptes can have wildly different format, adjust the pte accessor names to indicate on which type of pte they operate on. No functional changes. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/mmu.c16
-rw-r--r--arch/x86/kvm/mmu.h2
-rw-r--r--arch/x86/kvm/paging_tmpl.h22
-rw-r--r--arch/x86/kvm/x86.c2
4 files changed, 21 insertions, 21 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8f2cb29db2fe..a039e6bc21f7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -240,12 +240,12 @@ static int is_writeble_pte(unsigned long pte)
240 return pte & PT_WRITABLE_MASK; 240 return pte & PT_WRITABLE_MASK;
241} 241}
242 242
243static int is_dirty_pte(unsigned long pte) 243static int is_dirty_gpte(unsigned long pte)
244{ 244{
245 return pte & PT_DIRTY_MASK; 245 return pte & PT_DIRTY_MASK;
246} 246}
247 247
248static int is_rmap_pte(u64 pte) 248static int is_rmap_spte(u64 pte)
249{ 249{
250 return is_shadow_present_pte(pte); 250 return is_shadow_present_pte(pte);
251} 251}
@@ -502,7 +502,7 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
502 unsigned long *rmapp; 502 unsigned long *rmapp;
503 int i, count = 0; 503 int i, count = 0;
504 504
505 if (!is_rmap_pte(*spte)) 505 if (!is_rmap_spte(*spte))
506 return count; 506 return count;
507 gfn = unalias_gfn(vcpu->kvm, gfn); 507 gfn = unalias_gfn(vcpu->kvm, gfn);
508 sp = page_header(__pa(spte)); 508 sp = page_header(__pa(spte));
@@ -567,7 +567,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
567 unsigned long *rmapp; 567 unsigned long *rmapp;
568 int i; 568 int i;
569 569
570 if (!is_rmap_pte(*spte)) 570 if (!is_rmap_spte(*spte))
571 return; 571 return;
572 sp = page_header(__pa(spte)); 572 sp = page_header(__pa(spte));
573 pfn = spte_to_pfn(*spte); 573 pfn = spte_to_pfn(*spte);
@@ -1769,7 +1769,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1769 __func__, *shadow_pte, pt_access, 1769 __func__, *shadow_pte, pt_access,
1770 write_fault, user_fault, gfn); 1770 write_fault, user_fault, gfn);
1771 1771
1772 if (is_rmap_pte(*shadow_pte)) { 1772 if (is_rmap_spte(*shadow_pte)) {
1773 /* 1773 /*
1774 * If we overwrite a PTE page pointer with a 2MB PMD, unlink 1774 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1775 * the parent of the now unreachable PTE. 1775 * the parent of the now unreachable PTE.
@@ -1805,7 +1805,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1805 page_header_update_slot(vcpu->kvm, shadow_pte, gfn); 1805 page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
1806 if (!was_rmapped) { 1806 if (!was_rmapped) {
1807 rmap_count = rmap_add(vcpu, shadow_pte, gfn, largepage); 1807 rmap_count = rmap_add(vcpu, shadow_pte, gfn, largepage);
1808 if (!is_rmap_pte(*shadow_pte)) 1808 if (!is_rmap_spte(*shadow_pte))
1809 kvm_release_pfn_clean(pfn); 1809 kvm_release_pfn_clean(pfn);
1810 if (rmap_count > RMAP_RECYCLE_THRESHOLD) 1810 if (rmap_count > RMAP_RECYCLE_THRESHOLD)
1811 rmap_recycle(vcpu, gfn, largepage); 1811 rmap_recycle(vcpu, gfn, largepage);
@@ -1984,7 +1984,7 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
1984 ASSERT(!VALID_PAGE(root)); 1984 ASSERT(!VALID_PAGE(root));
1985 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { 1985 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
1986 pdptr = kvm_pdptr_read(vcpu, i); 1986 pdptr = kvm_pdptr_read(vcpu, i);
1987 if (!is_present_pte(pdptr)) { 1987 if (!is_present_gpte(pdptr)) {
1988 vcpu->arch.mmu.pae_root[i] = 0; 1988 vcpu->arch.mmu.pae_root[i] = 0;
1989 continue; 1989 continue;
1990 } 1990 }
@@ -2475,7 +2475,7 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2475 if ((bytes == 4) && (gpa % 4 == 0)) 2475 if ((bytes == 4) && (gpa % 4 == 0))
2476 memcpy((void *)&gpte, new, 4); 2476 memcpy((void *)&gpte, new, 4);
2477 } 2477 }
2478 if (!is_present_pte(gpte)) 2478 if (!is_present_gpte(gpte))
2479 return; 2479 return;
2480 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; 2480 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
2481 2481
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 3494a2fb136e..016bf7183e9f 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -75,7 +75,7 @@ static inline int is_paging(struct kvm_vcpu *vcpu)
75 return vcpu->arch.cr0 & X86_CR0_PG; 75 return vcpu->arch.cr0 & X86_CR0_PG;
76} 76}
77 77
78static inline int is_present_pte(unsigned long pte) 78static inline int is_present_gpte(unsigned long pte)
79{ 79{
80 return pte & PT_PRESENT_MASK; 80 return pte & PT_PRESENT_MASK;
81} 81}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 4cb1dbfd7c2a..238a193bbf5b 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -132,7 +132,7 @@ walk:
132#if PTTYPE == 64 132#if PTTYPE == 64
133 if (!is_long_mode(vcpu)) { 133 if (!is_long_mode(vcpu)) {
134 pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3); 134 pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3);
135 if (!is_present_pte(pte)) 135 if (!is_present_gpte(pte))
136 goto not_present; 136 goto not_present;
137 --walker->level; 137 --walker->level;
138 } 138 }
@@ -155,7 +155,7 @@ walk:
155 155
156 kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte)); 156 kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
157 157
158 if (!is_present_pte(pte)) 158 if (!is_present_gpte(pte))
159 goto not_present; 159 goto not_present;
160 160
161 rsvd_fault = is_rsvd_bits_set(vcpu, pte, walker->level); 161 rsvd_fault = is_rsvd_bits_set(vcpu, pte, walker->level);
@@ -205,7 +205,7 @@ walk:
205 --walker->level; 205 --walker->level;
206 } 206 }
207 207
208 if (write_fault && !is_dirty_pte(pte)) { 208 if (write_fault && !is_dirty_gpte(pte)) {
209 bool ret; 209 bool ret;
210 210
211 mark_page_dirty(vcpu->kvm, table_gfn); 211 mark_page_dirty(vcpu->kvm, table_gfn);
@@ -252,7 +252,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
252 252
253 gpte = *(const pt_element_t *)pte; 253 gpte = *(const pt_element_t *)pte;
254 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) { 254 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
255 if (!is_present_pte(gpte)) 255 if (!is_present_gpte(gpte))
256 set_shadow_pte(spte, shadow_notrap_nonpresent_pte); 256 set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
257 return; 257 return;
258 } 258 }
@@ -289,7 +289,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
289 pt_element_t curr_pte; 289 pt_element_t curr_pte;
290 struct kvm_shadow_walk_iterator iterator; 290 struct kvm_shadow_walk_iterator iterator;
291 291
292 if (!is_present_pte(gw->ptes[gw->level - 1])) 292 if (!is_present_gpte(gw->ptes[gw->level - 1]))
293 return NULL; 293 return NULL;
294 294
295 for_each_shadow_entry(vcpu, addr, iterator) { 295 for_each_shadow_entry(vcpu, addr, iterator) {
@@ -318,7 +318,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
318 if (level == PT_DIRECTORY_LEVEL 318 if (level == PT_DIRECTORY_LEVEL
319 && gw->level == PT_DIRECTORY_LEVEL) { 319 && gw->level == PT_DIRECTORY_LEVEL) {
320 direct = 1; 320 direct = 1;
321 if (!is_dirty_pte(gw->ptes[level - 1])) 321 if (!is_dirty_gpte(gw->ptes[level - 1]))
322 access &= ~ACC_WRITE_MASK; 322 access &= ~ACC_WRITE_MASK;
323 table_gfn = gpte_to_gfn(gw->ptes[level - 1]); 323 table_gfn = gpte_to_gfn(gw->ptes[level - 1]);
324 } else { 324 } else {
@@ -489,7 +489,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
489 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte, 489 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
490 sizeof(pt_element_t))) 490 sizeof(pt_element_t)))
491 return; 491 return;
492 if (is_present_pte(gpte) && (gpte & PT_ACCESSED_MASK)) { 492 if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) {
493 if (mmu_topup_memory_caches(vcpu)) 493 if (mmu_topup_memory_caches(vcpu))
494 return; 494 return;
495 kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte, 495 kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
@@ -536,7 +536,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
536 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt); 536 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
537 pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t); 537 pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
538 for (j = 0; j < ARRAY_SIZE(pt); ++j) 538 for (j = 0; j < ARRAY_SIZE(pt); ++j)
539 if (r || is_present_pte(pt[j])) 539 if (r || is_present_gpte(pt[j]))
540 sp->spt[i+j] = shadow_trap_nonpresent_pte; 540 sp->spt[i+j] = shadow_trap_nonpresent_pte;
541 else 541 else
542 sp->spt[i+j] = shadow_notrap_nonpresent_pte; 542 sp->spt[i+j] = shadow_notrap_nonpresent_pte;
@@ -574,12 +574,12 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
574 sizeof(pt_element_t))) 574 sizeof(pt_element_t)))
575 return -EINVAL; 575 return -EINVAL;
576 576
577 if (gpte_to_gfn(gpte) != gfn || !is_present_pte(gpte) || 577 if (gpte_to_gfn(gpte) != gfn || !is_present_gpte(gpte) ||
578 !(gpte & PT_ACCESSED_MASK)) { 578 !(gpte & PT_ACCESSED_MASK)) {
579 u64 nonpresent; 579 u64 nonpresent;
580 580
581 rmap_remove(vcpu->kvm, &sp->spt[i]); 581 rmap_remove(vcpu->kvm, &sp->spt[i]);
582 if (is_present_pte(gpte)) 582 if (is_present_gpte(gpte))
583 nonpresent = shadow_trap_nonpresent_pte; 583 nonpresent = shadow_trap_nonpresent_pte;
584 else 584 else
585 nonpresent = shadow_notrap_nonpresent_pte; 585 nonpresent = shadow_notrap_nonpresent_pte;
@@ -590,7 +590,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
590 nr_present++; 590 nr_present++;
591 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); 591 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
592 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, 592 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
593 is_dirty_pte(gpte), 0, gfn, 593 is_dirty_gpte(gpte), 0, gfn,
594 spte_to_pfn(sp->spt[i]), true, false); 594 spte_to_pfn(sp->spt[i]), true, false);
595 } 595 }
596 596
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 05cbe83c74e2..e877efa37620 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -237,7 +237,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
237 goto out; 237 goto out;
238 } 238 }
239 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { 239 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
240 if (is_present_pte(pdpte[i]) && 240 if (is_present_gpte(pdpte[i]) &&
241 (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) { 241 (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
242 ret = 0; 242 ret = 0;
243 goto out; 243 goto out;