aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2008-08-22 12:11:39 -0400
committerAvi Kivity <avi@qumranet.com>2008-10-15 04:15:23 -0400
commitabb9e0b8e33e58ac8e04e03b680c46435bc312fd (patch)
treee302013c6f3330c3fd62389138bda5c040b453a7
parent140754bc80e1cdbf2d14cdb10d900da1f7718e7b (diff)
KVM: MMU: Convert the paging mode shadow walk to use the generic walker
Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r--arch/x86/kvm/paging_tmpl.h158
1 files changed, 86 insertions, 72 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index ebb26a09d311..b7064e1e1e17 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -25,6 +25,7 @@
25#if PTTYPE == 64 25#if PTTYPE == 64
26 #define pt_element_t u64 26 #define pt_element_t u64
27 #define guest_walker guest_walker64 27 #define guest_walker guest_walker64
28 #define shadow_walker shadow_walker64
28 #define FNAME(name) paging##64_##name 29 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK 30 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK 31 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
@@ -41,6 +42,7 @@
41#elif PTTYPE == 32 42#elif PTTYPE == 32
42 #define pt_element_t u32 43 #define pt_element_t u32
43 #define guest_walker guest_walker32 44 #define guest_walker guest_walker32
45 #define shadow_walker shadow_walker32
44 #define FNAME(name) paging##32_##name 46 #define FNAME(name) paging##32_##name
45 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK 47 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
46 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK 48 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
@@ -71,6 +73,17 @@ struct guest_walker {
71 u32 error_code; 73 u32 error_code;
72}; 74};
73 75
76struct shadow_walker {
77 struct kvm_shadow_walk walker;
78 struct guest_walker *guest_walker;
79 int user_fault;
80 int write_fault;
81 int largepage;
82 int *ptwrite;
83 pfn_t pfn;
84 u64 *sptep;
85};
86
74static gfn_t gpte_to_gfn(pt_element_t gpte) 87static gfn_t gpte_to_gfn(pt_element_t gpte)
75{ 88{
76 return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT; 89 return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
@@ -272,86 +285,86 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
272/* 285/*
273 * Fetch a shadow pte for a specific level in the paging hierarchy. 286 * Fetch a shadow pte for a specific level in the paging hierarchy.
274 */ 287 */
275static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, 288static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw,
276 struct guest_walker *walker, 289 struct kvm_vcpu *vcpu, gva_t addr,
277 int user_fault, int write_fault, int largepage, 290 u64 *sptep, int level)
278 int *ptwrite, pfn_t pfn)
279{ 291{
280 hpa_t shadow_addr; 292 struct shadow_walker *sw =
281 int level; 293 container_of(_sw, struct shadow_walker, walker);
282 u64 *shadow_ent; 294 struct guest_walker *gw = sw->guest_walker;
283 unsigned access = walker->pt_access; 295 unsigned access = gw->pt_access;
284 296 struct kvm_mmu_page *shadow_page;
285 if (!is_present_pte(walker->ptes[walker->level - 1])) 297 u64 spte;
286 return NULL; 298 int metaphysical;
287 299 gfn_t table_gfn;
288 shadow_addr = vcpu->arch.mmu.root_hpa; 300 int r;
289 level = vcpu->arch.mmu.shadow_root_level; 301 pt_element_t curr_pte;
290 if (level == PT32E_ROOT_LEVEL) { 302
291 shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; 303 if (level == PT_PAGE_TABLE_LEVEL
292 shadow_addr &= PT64_BASE_ADDR_MASK; 304 || (sw->largepage && level == PT_DIRECTORY_LEVEL)) {
293 --level; 305 mmu_set_spte(vcpu, sptep, access, gw->pte_access & access,
306 sw->user_fault, sw->write_fault,
307 gw->ptes[gw->level-1] & PT_DIRTY_MASK,
308 sw->ptwrite, sw->largepage, gw->gfn, sw->pfn,
309 false);
310 sw->sptep = sptep;
311 return 1;
294 } 312 }
295 313
296 for (; ; level--) { 314 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
297 u32 index = SHADOW_PT_INDEX(addr, level); 315 return 0;
298 struct kvm_mmu_page *shadow_page;
299 u64 shadow_pte;
300 int metaphysical;
301 gfn_t table_gfn;
302
303 shadow_ent = ((u64 *)__va(shadow_addr)) + index;
304 if (level == PT_PAGE_TABLE_LEVEL)
305 break;
306
307 if (largepage && level == PT_DIRECTORY_LEVEL)
308 break;
309
310 if (is_shadow_present_pte(*shadow_ent)
311 && !is_large_pte(*shadow_ent)) {
312 shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
313 continue;
314 }
315 316
316 if (is_large_pte(*shadow_ent)) 317 if (is_large_pte(*sptep))
317 rmap_remove(vcpu->kvm, shadow_ent); 318 rmap_remove(vcpu->kvm, sptep);
318 319
319 if (level - 1 == PT_PAGE_TABLE_LEVEL 320 if (level == PT_DIRECTORY_LEVEL && gw->level == PT_DIRECTORY_LEVEL) {
320 && walker->level == PT_DIRECTORY_LEVEL) { 321 metaphysical = 1;
321 metaphysical = 1; 322 if (!is_dirty_pte(gw->ptes[level - 1]))
322 if (!is_dirty_pte(walker->ptes[level - 1])) 323 access &= ~ACC_WRITE_MASK;
323 access &= ~ACC_WRITE_MASK; 324 table_gfn = gpte_to_gfn(gw->ptes[level - 1]);
324 table_gfn = gpte_to_gfn(walker->ptes[level - 1]); 325 } else {
325 } else { 326 metaphysical = 0;
326 metaphysical = 0; 327 table_gfn = gw->table_gfn[level - 2];
327 table_gfn = walker->table_gfn[level - 2]; 328 }
328 } 329 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
329 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, 330 metaphysical, access, sptep);
330 metaphysical, access, 331 if (!metaphysical) {
331 shadow_ent); 332 r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2],
332 if (!metaphysical) { 333 &curr_pte, sizeof(curr_pte));
333 int r; 334 if (r || curr_pte != gw->ptes[level - 2]) {
334 pt_element_t curr_pte; 335 kvm_release_pfn_clean(sw->pfn);
335 r = kvm_read_guest_atomic(vcpu->kvm, 336 sw->sptep = NULL;
336 walker->pte_gpa[level - 2], 337 return 1;
337 &curr_pte, sizeof(curr_pte));
338 if (r || curr_pte != walker->ptes[level - 2]) {
339 kvm_release_pfn_clean(pfn);
340 return NULL;
341 }
342 } 338 }
343 shadow_addr = __pa(shadow_page->spt);
344 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
345 | PT_WRITABLE_MASK | PT_USER_MASK;
346 set_shadow_pte(shadow_ent, shadow_pte);
347 } 339 }
348 340
349 mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access, 341 spte = __pa(shadow_page->spt) | PT_PRESENT_MASK | PT_ACCESSED_MASK
350 user_fault, write_fault, 342 | PT_WRITABLE_MASK | PT_USER_MASK;
351 walker->ptes[walker->level-1] & PT_DIRTY_MASK, 343 *sptep = spte;
352 ptwrite, largepage, walker->gfn, pfn, false); 344 return 0;
345}
346
347static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
348 struct guest_walker *guest_walker,
349 int user_fault, int write_fault, int largepage,
350 int *ptwrite, pfn_t pfn)
351{
352 struct shadow_walker walker = {
353 .walker = { .entry = FNAME(shadow_walk_entry), },
354 .guest_walker = guest_walker,
355 .user_fault = user_fault,
356 .write_fault = write_fault,
357 .largepage = largepage,
358 .ptwrite = ptwrite,
359 .pfn = pfn,
360 };
361
362 if (!is_present_pte(guest_walker->ptes[guest_walker->level - 1]))
363 return NULL;
364
365 walk_shadow(&walker.walker, vcpu, addr);
353 366
354 return shadow_ent; 367 return walker.sptep;
355} 368}
356 369
357/* 370/*
@@ -499,6 +512,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
499 512
500#undef pt_element_t 513#undef pt_element_t
501#undef guest_walker 514#undef guest_walker
515#undef shadow_walker
502#undef FNAME 516#undef FNAME
503#undef PT_BASE_ADDR_MASK 517#undef PT_BASE_ADDR_MASK
504#undef PT_INDEX 518#undef PT_INDEX