aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2013-02-20 16:52:02 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2013-02-20 16:52:02 -0500
commit6b73a96065e89dc9fa75ba4f78b1aa3a3bbd0470 (patch)
tree170a39f429c7b98ee25ddc6436139a57bb5dd6f9 /arch/x86/kvm/mmu.c
parented55705dd5008b408c48a8459b8b34b01f3de985 (diff)
Revert "KVM: MMU: lazily drop large spte"
This reverts commit caf6900f2d8aaebe404c976753f6813ccd31d95e. It is causing migration failures, reference https://bugzilla.kernel.org/show_bug.cgi?id=54061. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c23
1 files changed, 16 insertions, 7 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 1cda1f332654..4ed3edbe06bd 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1105,7 +1105,8 @@ static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
1105 1105
1106/* 1106/*
1107 * Write-protect on the specified @sptep, @pt_protect indicates whether 1107 * Write-protect on the specified @sptep, @pt_protect indicates whether
1108 * spte write-protection is caused by protecting shadow page table. 1108 * spte writ-protection is caused by protecting shadow page table.
1109 * @flush indicates whether tlb need be flushed.
1109 * 1110 *
1110 * Note: write protection is difference between drity logging and spte 1111 * Note: write protection is difference between drity logging and spte
1111 * protection: 1112 * protection:
@@ -1114,9 +1115,10 @@ static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
1114 * - for spte protection, the spte can be writable only after unsync-ing 1115 * - for spte protection, the spte can be writable only after unsync-ing
1115 * shadow page. 1116 * shadow page.
1116 * 1117 *
1117 * Return true if tlb need be flushed. 1118 * Return true if the spte is dropped.
1118 */ 1119 */
1119static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool pt_protect) 1120static bool
1121spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush, bool pt_protect)
1120{ 1122{
1121 u64 spte = *sptep; 1123 u64 spte = *sptep;
1122 1124
@@ -1126,11 +1128,17 @@ static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool pt_protect)
1126 1128
1127 rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); 1129 rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
1128 1130
1131 if (__drop_large_spte(kvm, sptep)) {
1132 *flush |= true;
1133 return true;
1134 }
1135
1129 if (pt_protect) 1136 if (pt_protect)
1130 spte &= ~SPTE_MMU_WRITEABLE; 1137 spte &= ~SPTE_MMU_WRITEABLE;
1131 spte = spte & ~PT_WRITABLE_MASK; 1138 spte = spte & ~PT_WRITABLE_MASK;
1132 1139
1133 return mmu_spte_update(sptep, spte); 1140 *flush |= mmu_spte_update(sptep, spte);
1141 return false;
1134} 1142}
1135 1143
1136static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, 1144static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
@@ -1142,8 +1150,11 @@ static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
1142 1150
1143 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) { 1151 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
1144 BUG_ON(!(*sptep & PT_PRESENT_MASK)); 1152 BUG_ON(!(*sptep & PT_PRESENT_MASK));
1153 if (spte_write_protect(kvm, sptep, &flush, pt_protect)) {
1154 sptep = rmap_get_first(*rmapp, &iter);
1155 continue;
1156 }
1145 1157
1146 flush |= spte_write_protect(kvm, sptep, pt_protect);
1147 sptep = rmap_get_next(&iter); 1158 sptep = rmap_get_next(&iter);
1148 } 1159 }
1149 1160
@@ -2581,8 +2592,6 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
2581 break; 2592 break;
2582 } 2593 }
2583 2594
2584 drop_large_spte(vcpu, iterator.sptep);
2585
2586 if (!is_shadow_present_pte(*iterator.sptep)) { 2595 if (!is_shadow_present_pte(*iterator.sptep)) {
2587 u64 base_addr = iterator.addr; 2596 u64 base_addr = iterator.addr;
2588 2597