aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2009-06-10 07:12:05 -0400
committerAvi Kivity <avi@redhat.com>2009-09-10 01:32:51 -0400
commit43a3795a3a12425de31e25ce0ebc3bb41501cef7 (patch)
tree93481c7731b8c81ad7066d0f1e2202e767d3bd35 /arch/x86/kvm/paging_tmpl.h
parent439e218a6f4716da484314fc5a1f0a59b0212c01 (diff)
KVM: MMU: Adjust pte accessors to explicitly indicate guest or shadow pte
Since the guest and host ptes can have wildly different format, adjust the pte accessor names to indicate on which type of pte they operate on. No functional changes. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h22
1 files changed, 11 insertions, 11 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 4cb1dbfd7c2a..238a193bbf5b 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -132,7 +132,7 @@ walk:
132#if PTTYPE == 64 132#if PTTYPE == 64
133 if (!is_long_mode(vcpu)) { 133 if (!is_long_mode(vcpu)) {
134 pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3); 134 pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3);
135 if (!is_present_pte(pte)) 135 if (!is_present_gpte(pte))
136 goto not_present; 136 goto not_present;
137 --walker->level; 137 --walker->level;
138 } 138 }
@@ -155,7 +155,7 @@ walk:
155 155
156 kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte)); 156 kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
157 157
158 if (!is_present_pte(pte)) 158 if (!is_present_gpte(pte))
159 goto not_present; 159 goto not_present;
160 160
161 rsvd_fault = is_rsvd_bits_set(vcpu, pte, walker->level); 161 rsvd_fault = is_rsvd_bits_set(vcpu, pte, walker->level);
@@ -205,7 +205,7 @@ walk:
205 --walker->level; 205 --walker->level;
206 } 206 }
207 207
208 if (write_fault && !is_dirty_pte(pte)) { 208 if (write_fault && !is_dirty_gpte(pte)) {
209 bool ret; 209 bool ret;
210 210
211 mark_page_dirty(vcpu->kvm, table_gfn); 211 mark_page_dirty(vcpu->kvm, table_gfn);
@@ -252,7 +252,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
252 252
253 gpte = *(const pt_element_t *)pte; 253 gpte = *(const pt_element_t *)pte;
254 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) { 254 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
255 if (!is_present_pte(gpte)) 255 if (!is_present_gpte(gpte))
256 set_shadow_pte(spte, shadow_notrap_nonpresent_pte); 256 set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
257 return; 257 return;
258 } 258 }
@@ -289,7 +289,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
289 pt_element_t curr_pte; 289 pt_element_t curr_pte;
290 struct kvm_shadow_walk_iterator iterator; 290 struct kvm_shadow_walk_iterator iterator;
291 291
292 if (!is_present_pte(gw->ptes[gw->level - 1])) 292 if (!is_present_gpte(gw->ptes[gw->level - 1]))
293 return NULL; 293 return NULL;
294 294
295 for_each_shadow_entry(vcpu, addr, iterator) { 295 for_each_shadow_entry(vcpu, addr, iterator) {
@@ -318,7 +318,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
318 if (level == PT_DIRECTORY_LEVEL 318 if (level == PT_DIRECTORY_LEVEL
319 && gw->level == PT_DIRECTORY_LEVEL) { 319 && gw->level == PT_DIRECTORY_LEVEL) {
320 direct = 1; 320 direct = 1;
321 if (!is_dirty_pte(gw->ptes[level - 1])) 321 if (!is_dirty_gpte(gw->ptes[level - 1]))
322 access &= ~ACC_WRITE_MASK; 322 access &= ~ACC_WRITE_MASK;
323 table_gfn = gpte_to_gfn(gw->ptes[level - 1]); 323 table_gfn = gpte_to_gfn(gw->ptes[level - 1]);
324 } else { 324 } else {
@@ -489,7 +489,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
489 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte, 489 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
490 sizeof(pt_element_t))) 490 sizeof(pt_element_t)))
491 return; 491 return;
492 if (is_present_pte(gpte) && (gpte & PT_ACCESSED_MASK)) { 492 if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) {
493 if (mmu_topup_memory_caches(vcpu)) 493 if (mmu_topup_memory_caches(vcpu))
494 return; 494 return;
495 kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte, 495 kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
@@ -536,7 +536,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
536 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt); 536 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
537 pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t); 537 pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
538 for (j = 0; j < ARRAY_SIZE(pt); ++j) 538 for (j = 0; j < ARRAY_SIZE(pt); ++j)
539 if (r || is_present_pte(pt[j])) 539 if (r || is_present_gpte(pt[j]))
540 sp->spt[i+j] = shadow_trap_nonpresent_pte; 540 sp->spt[i+j] = shadow_trap_nonpresent_pte;
541 else 541 else
542 sp->spt[i+j] = shadow_notrap_nonpresent_pte; 542 sp->spt[i+j] = shadow_notrap_nonpresent_pte;
@@ -574,12 +574,12 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
574 sizeof(pt_element_t))) 574 sizeof(pt_element_t)))
575 return -EINVAL; 575 return -EINVAL;
576 576
577 if (gpte_to_gfn(gpte) != gfn || !is_present_pte(gpte) || 577 if (gpte_to_gfn(gpte) != gfn || !is_present_gpte(gpte) ||
578 !(gpte & PT_ACCESSED_MASK)) { 578 !(gpte & PT_ACCESSED_MASK)) {
579 u64 nonpresent; 579 u64 nonpresent;
580 580
581 rmap_remove(vcpu->kvm, &sp->spt[i]); 581 rmap_remove(vcpu->kvm, &sp->spt[i]);
582 if (is_present_pte(gpte)) 582 if (is_present_gpte(gpte))
583 nonpresent = shadow_trap_nonpresent_pte; 583 nonpresent = shadow_trap_nonpresent_pte;
584 else 584 else
585 nonpresent = shadow_notrap_nonpresent_pte; 585 nonpresent = shadow_notrap_nonpresent_pte;
@@ -590,7 +590,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
590 nr_present++; 590 nr_present++;
591 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); 591 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
592 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, 592 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
593 is_dirty_pte(gpte), 0, gfn, 593 is_dirty_gpte(gpte), 0, gfn,
594 spte_to_pfn(sp->spt[i]), true, false); 594 spte_to_pfn(sp->spt[i]), true, false);
595 } 595 }
596 596