aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-03-23 03:55:25 -0400
committerAvi Kivity <avi@qumranet.com>2007-05-03 03:52:27 -0400
commitd28c6cfbbc5e2d4fccfe6d733995ed5971ca87f6 (patch)
tree0da31407f97a92c81d189b18608e54ad2064ebad /drivers/kvm/paging_tmpl.h
parent916ce2360fadc71d924e02403b31280112a31280 (diff)
KVM: MMU: Fix hugepage pdes mapping same physical address with different access
The kvm mmu keeps a shadow page for hugepage pdes; if several such pdes map the same physical address, they share the same shadow page. This is a fairly common case (kernel mappings on i386 nonpae Linux, for example). However, if the two pdes map the same memory but with different permissions, kvm will happily use the cached shadow page. If the access through the more permissive pde will occur after the access to the strict pde, an endless pagefault loop will be generated and the guest will make no progress. Fix by making the access permissions part of the cache lookup key. The fix allows Xen pae to boot on kvm and run guest domains. Thanks to Jeremy Fitzhardinge for reporting the bug and testing the fix. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/paging_tmpl.h')
-rw-r--r--drivers/kvm/paging_tmpl.h7
1 files changed, 6 insertions, 1 deletions
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index 17bd4400c92b..b94010dad809 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -247,6 +247,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
247 u64 shadow_pte; 247 u64 shadow_pte;
248 int metaphysical; 248 int metaphysical;
249 gfn_t table_gfn; 249 gfn_t table_gfn;
250 unsigned hugepage_access = 0;
250 251
251 if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) { 252 if (is_present_pte(*shadow_ent) || is_io_pte(*shadow_ent)) {
252 if (level == PT_PAGE_TABLE_LEVEL) 253 if (level == PT_PAGE_TABLE_LEVEL)
@@ -276,6 +277,9 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
276 if (level - 1 == PT_PAGE_TABLE_LEVEL 277 if (level - 1 == PT_PAGE_TABLE_LEVEL
277 && walker->level == PT_DIRECTORY_LEVEL) { 278 && walker->level == PT_DIRECTORY_LEVEL) {
278 metaphysical = 1; 279 metaphysical = 1;
280 hugepage_access = *guest_ent;
281 hugepage_access &= PT_USER_MASK | PT_WRITABLE_MASK;
282 hugepage_access >>= PT_WRITABLE_SHIFT;
279 table_gfn = (*guest_ent & PT_BASE_ADDR_MASK) 283 table_gfn = (*guest_ent & PT_BASE_ADDR_MASK)
280 >> PAGE_SHIFT; 284 >> PAGE_SHIFT;
281 } else { 285 } else {
@@ -283,7 +287,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
283 table_gfn = walker->table_gfn[level - 2]; 287 table_gfn = walker->table_gfn[level - 2];
284 } 288 }
285 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, 289 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
286 metaphysical, shadow_ent); 290 metaphysical, hugepage_access,
291 shadow_ent);
287 shadow_addr = shadow_page->page_hpa; 292 shadow_addr = shadow_page->page_hpa;
288 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK 293 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
289 | PT_WRITABLE_MASK | PT_USER_MASK; 294 | PT_WRITABLE_MASK | PT_USER_MASK;