aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-12-09 11:43:00 -0500
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:53:21 -0500
commite833240f3c1b0b415efb14eaa102718769d5f063 (patch)
treea2482ca91665ce0e761131d971bb418c5e8e365c /drivers/kvm
parentbc750ba860d978fcaac1e0db28774b1f38ae8193 (diff)
KVM: MMU: Use mmu_set_spte() for real-mode shadows
In addition to removing some duplicated code, this also handles the unlikely case of real-mode code updating a guest page table. This can happen when one vcpu (in real mode) touches a second vcpu's (in protected mode) page tables, or if a vcpu switches to real mode, touches page tables, and switches back. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm')
-rw-r--r--drivers/kvm/mmu.c41
1 files changed, 10 insertions, 31 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index b4dd72645853..ba71e8d66761 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -966,40 +966,23 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
966{ 966{
967} 967}
968 968
969static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, struct page *page) 969static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
970{ 970{
971 int level = PT32E_ROOT_LEVEL; 971 int level = PT32E_ROOT_LEVEL;
972 hpa_t table_addr = vcpu->mmu.root_hpa; 972 hpa_t table_addr = vcpu->mmu.root_hpa;
973 int pt_write = 0;
973 974
974 for (; ; level--) { 975 for (; ; level--) {
975 u32 index = PT64_INDEX(v, level); 976 u32 index = PT64_INDEX(v, level);
976 u64 *table; 977 u64 *table;
977 u64 pte;
978 978
979 ASSERT(VALID_PAGE(table_addr)); 979 ASSERT(VALID_PAGE(table_addr));
980 table = __va(table_addr); 980 table = __va(table_addr);
981 981
982 if (level == 1) { 982 if (level == 1) {
983 int was_rmapped; 983 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
984 984 0, write, 1, &pt_write, gfn);
985 pte = table[index]; 985 return pt_write || is_io_pte(table[index]);
986 was_rmapped = is_rmap_pte(pte);
987 if (is_shadow_present_pte(pte) && is_writeble_pte(pte)) {
988 kvm_release_page_clean(page);
989 return 0;
990 }
991 mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
992 page_header_update_slot(vcpu->kvm, table,
993 v >> PAGE_SHIFT);
994 table[index] = page_to_phys(page)
995 | PT_PRESENT_MASK | PT_WRITABLE_MASK
996 | PT_USER_MASK;
997 if (!was_rmapped)
998 rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
999 else
1000 kvm_release_page_clean(page);
1001
1002 return 0;
1003 } 986 }
1004 987
1005 if (table[index] == shadow_trap_nonpresent_pte) { 988 if (table[index] == shadow_trap_nonpresent_pte) {
@@ -1013,7 +996,6 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, struct page *page)
1013 1, ACC_ALL, &table[index]); 996 1, ACC_ALL, &table[index]);
1014 if (!new_table) { 997 if (!new_table) {
1015 pgprintk("nonpaging_map: ENOMEM\n"); 998 pgprintk("nonpaging_map: ENOMEM\n");
1016 kvm_release_page_clean(page);
1017 return -ENOMEM; 999 return -ENOMEM;
1018 } 1000 }
1019 1001
@@ -1114,9 +1096,10 @@ static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1114static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, 1096static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1115 u32 error_code) 1097 u32 error_code)
1116{ 1098{
1117 struct page *page; 1099 gfn_t gfn;
1118 int r; 1100 int r;
1119 1101
1102 pgprintk("%s: gva %lx error %x\n", __FUNCTION__, gva, error_code);
1120 r = mmu_topup_memory_caches(vcpu); 1103 r = mmu_topup_memory_caches(vcpu);
1121 if (r) 1104 if (r)
1122 return r; 1105 return r;
@@ -1124,14 +1107,10 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1124 ASSERT(vcpu); 1107 ASSERT(vcpu);
1125 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa)); 1108 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
1126 1109
1127 page = gfn_to_page(vcpu->kvm, gva >> PAGE_SHIFT); 1110 gfn = gva >> PAGE_SHIFT;
1128
1129 if (is_error_page(page)) {
1130 kvm_release_page_clean(page);
1131 return 1;
1132 }
1133 1111
1134 return nonpaging_map(vcpu, gva & PAGE_MASK, page); 1112 return nonpaging_map(vcpu, gva & PAGE_MASK,
1113 error_code & PFERR_WRITE_MASK, gfn);
1135} 1114}
1136 1115
1137static void nonpaging_free(struct kvm_vcpu *vcpu) 1116static void nonpaging_free(struct kvm_vcpu *vcpu)