aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-06-21 04:44:20 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 03:47:00 -0400
commita1f4d39500ad8ed61825eff061debff42386ab5b (patch)
tree58b72188b1356329e78951773f4be41e66b11d21 /arch/x86/kvm/mmu.c
parentfc34531db3cf8c422f2ff7cf4ef507a3ca672cd2 (diff)
KVM: Remove memory alias support
As advertised in feature-removal-schedule.txt. Equivalent support is provided by overlapping memory regions. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c17
1 files changed, 4 insertions, 13 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8c2f580956d..c5501bc1010 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -434,9 +434,7 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn)
434 int *write_count; 434 int *write_count;
435 int i; 435 int i;
436 436
437 gfn = unalias_gfn(kvm, gfn); 437 slot = gfn_to_memslot(kvm, gfn);
438
439 slot = gfn_to_memslot_unaliased(kvm, gfn);
440 for (i = PT_DIRECTORY_LEVEL; 438 for (i = PT_DIRECTORY_LEVEL;
441 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { 439 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
442 write_count = slot_largepage_idx(gfn, slot, i); 440 write_count = slot_largepage_idx(gfn, slot, i);
@@ -450,8 +448,7 @@ static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
450 int *write_count; 448 int *write_count;
451 int i; 449 int i;
452 450
453 gfn = unalias_gfn(kvm, gfn); 451 slot = gfn_to_memslot(kvm, gfn);
454 slot = gfn_to_memslot_unaliased(kvm, gfn);
455 for (i = PT_DIRECTORY_LEVEL; 452 for (i = PT_DIRECTORY_LEVEL;
456 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { 453 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
457 write_count = slot_largepage_idx(gfn, slot, i); 454 write_count = slot_largepage_idx(gfn, slot, i);
@@ -467,8 +464,7 @@ static int has_wrprotected_page(struct kvm *kvm,
467 struct kvm_memory_slot *slot; 464 struct kvm_memory_slot *slot;
468 int *largepage_idx; 465 int *largepage_idx;
469 466
470 gfn = unalias_gfn(kvm, gfn); 467 slot = gfn_to_memslot(kvm, gfn);
471 slot = gfn_to_memslot_unaliased(kvm, gfn);
472 if (slot) { 468 if (slot) {
473 largepage_idx = slot_largepage_idx(gfn, slot, level); 469 largepage_idx = slot_largepage_idx(gfn, slot, level);
474 return *largepage_idx; 470 return *largepage_idx;
@@ -521,7 +517,6 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
521 517
522/* 518/*
523 * Take gfn and return the reverse mapping to it. 519 * Take gfn and return the reverse mapping to it.
524 * Note: gfn must be unaliased before this function get called
525 */ 520 */
526 521
527static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) 522static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
@@ -561,7 +556,6 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
561 556
562 if (!is_rmap_spte(*spte)) 557 if (!is_rmap_spte(*spte))
563 return count; 558 return count;
564 gfn = unalias_gfn(vcpu->kvm, gfn);
565 sp = page_header(__pa(spte)); 559 sp = page_header(__pa(spte));
566 kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); 560 kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
567 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); 561 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
@@ -698,7 +692,6 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
698 u64 *spte; 692 u64 *spte;
699 int i, write_protected = 0; 693 int i, write_protected = 0;
700 694
701 gfn = unalias_gfn(kvm, gfn);
702 rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL); 695 rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
703 696
704 spte = rmap_next(kvm, rmapp, NULL); 697 spte = rmap_next(kvm, rmapp, NULL);
@@ -885,7 +878,6 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
885 878
886 sp = page_header(__pa(spte)); 879 sp = page_header(__pa(spte));
887 880
888 gfn = unalias_gfn(vcpu->kvm, gfn);
889 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); 881 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
890 882
891 kvm_unmap_rmapp(vcpu->kvm, rmapp, 0); 883 kvm_unmap_rmapp(vcpu->kvm, rmapp, 0);
@@ -3510,8 +3502,7 @@ static void audit_write_protection(struct kvm_vcpu *vcpu)
3510 if (sp->unsync) 3502 if (sp->unsync)
3511 continue; 3503 continue;
3512 3504
3513 gfn = unalias_gfn(vcpu->kvm, sp->gfn); 3505 slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
3514 slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn);
3515 rmapp = &slot->rmap[gfn - slot->base_gfn]; 3506 rmapp = &slot->rmap[gfn - slot->base_gfn];
3516 3507
3517 spte = rmap_next(vcpu->kvm, rmapp, NULL); 3508 spte = rmap_next(vcpu->kvm, rmapp, NULL);