aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2009-03-28 16:29:51 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-03-28 16:29:51 -0400
commited40d0c472b136682b2fcba05f89762859c7374f (patch)
tree076b83a26bcd63d6158463735dd34c10bbc591dc /arch/x86/kvm/paging_tmpl.h
parent9e495834e59ca9b29f1a1f63b9f5533bb022ac49 (diff)
parent5d80f8e5a9dc9c9a94d4aeaa567e219a808b8a4a (diff)
Merge branch 'origin' into devel
Conflicts: sound/soc/pxa/pxa2xx-i2s.c
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h219
1 files changed, 97 insertions, 122 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 9fd78b6e17ad..6bd70206c561 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -25,7 +25,6 @@
25#if PTTYPE == 64 25#if PTTYPE == 64
26 #define pt_element_t u64 26 #define pt_element_t u64
27 #define guest_walker guest_walker64 27 #define guest_walker guest_walker64
28 #define shadow_walker shadow_walker64
29 #define FNAME(name) paging##64_##name 28 #define FNAME(name) paging##64_##name
30 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK 29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
31 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK 30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
@@ -42,7 +41,6 @@
42#elif PTTYPE == 32 41#elif PTTYPE == 32
43 #define pt_element_t u32 42 #define pt_element_t u32
44 #define guest_walker guest_walker32 43 #define guest_walker guest_walker32
45 #define shadow_walker shadow_walker32
46 #define FNAME(name) paging##32_##name 44 #define FNAME(name) paging##32_##name
47 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK 45 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
48 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK 46 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
@@ -73,18 +71,6 @@ struct guest_walker {
73 u32 error_code; 71 u32 error_code;
74}; 72};
75 73
76struct shadow_walker {
77 struct kvm_shadow_walk walker;
78 struct guest_walker *guest_walker;
79 int user_fault;
80 int write_fault;
81 int largepage;
82 int *ptwrite;
83 pfn_t pfn;
84 u64 *sptep;
85 gpa_t pte_gpa;
86};
87
88static gfn_t gpte_to_gfn(pt_element_t gpte) 74static gfn_t gpte_to_gfn(pt_element_t gpte)
89{ 75{
90 return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT; 76 return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
@@ -283,91 +269,79 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
283/* 269/*
284 * Fetch a shadow pte for a specific level in the paging hierarchy. 270 * Fetch a shadow pte for a specific level in the paging hierarchy.
285 */ 271 */
286static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw, 272static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
287 struct kvm_vcpu *vcpu, u64 addr, 273 struct guest_walker *gw,
288 u64 *sptep, int level) 274 int user_fault, int write_fault, int largepage,
275 int *ptwrite, pfn_t pfn)
289{ 276{
290 struct shadow_walker *sw =
291 container_of(_sw, struct shadow_walker, walker);
292 struct guest_walker *gw = sw->guest_walker;
293 unsigned access = gw->pt_access; 277 unsigned access = gw->pt_access;
294 struct kvm_mmu_page *shadow_page; 278 struct kvm_mmu_page *shadow_page;
295 u64 spte; 279 u64 spte, *sptep;
296 int metaphysical; 280 int direct;
297 gfn_t table_gfn; 281 gfn_t table_gfn;
298 int r; 282 int r;
283 int level;
299 pt_element_t curr_pte; 284 pt_element_t curr_pte;
285 struct kvm_shadow_walk_iterator iterator;
300 286
301 if (level == PT_PAGE_TABLE_LEVEL 287 if (!is_present_pte(gw->ptes[gw->level - 1]))
302 || (sw->largepage && level == PT_DIRECTORY_LEVEL)) { 288 return NULL;
303 mmu_set_spte(vcpu, sptep, access, gw->pte_access & access,
304 sw->user_fault, sw->write_fault,
305 gw->ptes[gw->level-1] & PT_DIRTY_MASK,
306 sw->ptwrite, sw->largepage,
307 gw->ptes[gw->level-1] & PT_GLOBAL_MASK,
308 gw->gfn, sw->pfn, false);
309 sw->sptep = sptep;
310 return 1;
311 }
312 289
313 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) 290 for_each_shadow_entry(vcpu, addr, iterator) {
314 return 0; 291 level = iterator.level;
292 sptep = iterator.sptep;
293 if (level == PT_PAGE_TABLE_LEVEL
294 || (largepage && level == PT_DIRECTORY_LEVEL)) {
295 mmu_set_spte(vcpu, sptep, access,
296 gw->pte_access & access,
297 user_fault, write_fault,
298 gw->ptes[gw->level-1] & PT_DIRTY_MASK,
299 ptwrite, largepage,
300 gw->ptes[gw->level-1] & PT_GLOBAL_MASK,
301 gw->gfn, pfn, false);
302 break;
303 }
315 304
316 if (is_large_pte(*sptep)) { 305 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
317 set_shadow_pte(sptep, shadow_trap_nonpresent_pte); 306 continue;
318 kvm_flush_remote_tlbs(vcpu->kvm);
319 rmap_remove(vcpu->kvm, sptep);
320 }
321 307
322 if (level == PT_DIRECTORY_LEVEL && gw->level == PT_DIRECTORY_LEVEL) { 308 if (is_large_pte(*sptep)) {
323 metaphysical = 1; 309 rmap_remove(vcpu->kvm, sptep);
324 if (!is_dirty_pte(gw->ptes[level - 1])) 310 set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
325 access &= ~ACC_WRITE_MASK; 311 kvm_flush_remote_tlbs(vcpu->kvm);
326 table_gfn = gpte_to_gfn(gw->ptes[level - 1]);
327 } else {
328 metaphysical = 0;
329 table_gfn = gw->table_gfn[level - 2];
330 }
331 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, (gva_t)addr, level-1,
332 metaphysical, access, sptep);
333 if (!metaphysical) {
334 r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2],
335 &curr_pte, sizeof(curr_pte));
336 if (r || curr_pte != gw->ptes[level - 2]) {
337 kvm_mmu_put_page(shadow_page, sptep);
338 kvm_release_pfn_clean(sw->pfn);
339 sw->sptep = NULL;
340 return 1;
341 } 312 }
342 }
343 313
344 spte = __pa(shadow_page->spt) | PT_PRESENT_MASK | PT_ACCESSED_MASK 314 if (level == PT_DIRECTORY_LEVEL
345 | PT_WRITABLE_MASK | PT_USER_MASK; 315 && gw->level == PT_DIRECTORY_LEVEL) {
346 *sptep = spte; 316 direct = 1;
347 return 0; 317 if (!is_dirty_pte(gw->ptes[level - 1]))
348} 318 access &= ~ACC_WRITE_MASK;
349 319 table_gfn = gpte_to_gfn(gw->ptes[level - 1]);
350static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, 320 } else {
351 struct guest_walker *guest_walker, 321 direct = 0;
352 int user_fault, int write_fault, int largepage, 322 table_gfn = gw->table_gfn[level - 2];
353 int *ptwrite, pfn_t pfn) 323 }
354{ 324 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
355 struct shadow_walker walker = { 325 direct, access, sptep);
356 .walker = { .entry = FNAME(shadow_walk_entry), }, 326 if (!direct) {
357 .guest_walker = guest_walker, 327 r = kvm_read_guest_atomic(vcpu->kvm,
358 .user_fault = user_fault, 328 gw->pte_gpa[level - 2],
359 .write_fault = write_fault, 329 &curr_pte, sizeof(curr_pte));
360 .largepage = largepage, 330 if (r || curr_pte != gw->ptes[level - 2]) {
361 .ptwrite = ptwrite, 331 kvm_mmu_put_page(shadow_page, sptep);
362 .pfn = pfn, 332 kvm_release_pfn_clean(pfn);
363 }; 333 sptep = NULL;
364 334 break;
365 if (!is_present_pte(guest_walker->ptes[guest_walker->level - 1])) 335 }
366 return NULL; 336 }
367 337
368 walk_shadow(&walker.walker, vcpu, addr); 338 spte = __pa(shadow_page->spt)
339 | PT_PRESENT_MASK | PT_ACCESSED_MASK
340 | PT_WRITABLE_MASK | PT_USER_MASK;
341 *sptep = spte;
342 }
369 343
370 return walker.sptep; 344 return sptep;
371} 345}
372 346
373/* 347/*
@@ -465,54 +439,56 @@ out_unlock:
465 return 0; 439 return 0;
466} 440}
467 441
468static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw, 442static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
469 struct kvm_vcpu *vcpu, u64 addr,
470 u64 *sptep, int level)
471{ 443{
472 struct shadow_walker *sw = 444 struct kvm_shadow_walk_iterator iterator;
473 container_of(_sw, struct shadow_walker, walker); 445 pt_element_t gpte;
474 446 gpa_t pte_gpa = -1;
475 /* FIXME: properly handle invlpg on large guest pages */ 447 int level;
476 if (level == PT_PAGE_TABLE_LEVEL || 448 u64 *sptep;
477 ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) { 449 int need_flush = 0;
478 struct kvm_mmu_page *sp = page_header(__pa(sptep));
479 450
480 sw->pte_gpa = (sp->gfn << PAGE_SHIFT); 451 spin_lock(&vcpu->kvm->mmu_lock);
481 sw->pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
482 452
483 if (is_shadow_present_pte(*sptep)) { 453 for_each_shadow_entry(vcpu, gva, iterator) {
484 rmap_remove(vcpu->kvm, sptep); 454 level = iterator.level;
485 if (is_large_pte(*sptep)) 455 sptep = iterator.sptep;
486 --vcpu->kvm->stat.lpages; 456
457 /* FIXME: properly handle invlpg on large guest pages */
458 if (level == PT_PAGE_TABLE_LEVEL ||
459 ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) {
460 struct kvm_mmu_page *sp = page_header(__pa(sptep));
461
462 pte_gpa = (sp->gfn << PAGE_SHIFT);
463 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
464
465 if (is_shadow_present_pte(*sptep)) {
466 rmap_remove(vcpu->kvm, sptep);
467 if (is_large_pte(*sptep))
468 --vcpu->kvm->stat.lpages;
469 need_flush = 1;
470 }
471 set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
472 break;
487 } 473 }
488 set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
489 return 1;
490 }
491 if (!is_shadow_present_pte(*sptep))
492 return 1;
493 return 0;
494}
495 474
496static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) 475 if (!is_shadow_present_pte(*sptep))
497{ 476 break;
498 pt_element_t gpte; 477 }
499 struct shadow_walker walker = {
500 .walker = { .entry = FNAME(shadow_invlpg_entry), },
501 .pte_gpa = -1,
502 };
503 478
504 spin_lock(&vcpu->kvm->mmu_lock); 479 if (need_flush)
505 walk_shadow(&walker.walker, vcpu, gva); 480 kvm_flush_remote_tlbs(vcpu->kvm);
506 spin_unlock(&vcpu->kvm->mmu_lock); 481 spin_unlock(&vcpu->kvm->mmu_lock);
507 if (walker.pte_gpa == -1) 482
483 if (pte_gpa == -1)
508 return; 484 return;
509 if (kvm_read_guest_atomic(vcpu->kvm, walker.pte_gpa, &gpte, 485 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
510 sizeof(pt_element_t))) 486 sizeof(pt_element_t)))
511 return; 487 return;
512 if (is_present_pte(gpte) && (gpte & PT_ACCESSED_MASK)) { 488 if (is_present_pte(gpte) && (gpte & PT_ACCESSED_MASK)) {
513 if (mmu_topup_memory_caches(vcpu)) 489 if (mmu_topup_memory_caches(vcpu))
514 return; 490 return;
515 kvm_mmu_pte_write(vcpu, walker.pte_gpa, (const u8 *)&gpte, 491 kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
516 sizeof(pt_element_t), 0); 492 sizeof(pt_element_t), 0);
517 } 493 }
518} 494}
@@ -540,7 +516,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
540 pt_element_t pt[256 / sizeof(pt_element_t)]; 516 pt_element_t pt[256 / sizeof(pt_element_t)];
541 gpa_t pte_gpa; 517 gpa_t pte_gpa;
542 518
543 if (sp->role.metaphysical 519 if (sp->role.direct
544 || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) { 520 || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
545 nonpaging_prefetch_page(vcpu, sp); 521 nonpaging_prefetch_page(vcpu, sp);
546 return; 522 return;
@@ -619,7 +595,6 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
619 595
620#undef pt_element_t 596#undef pt_element_t
621#undef guest_walker 597#undef guest_walker
622#undef shadow_walker
623#undef FNAME 598#undef FNAME
624#undef PT_BASE_ADDR_MASK 599#undef PT_BASE_ADDR_MASK
625#undef PT_INDEX 600#undef PT_INDEX