aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm
diff options
context:
space:
mode:
authorUri Lublin <uril@qumranet.com>2007-02-21 11:25:21 -0500
committerAvi Kivity <avi@qumranet.com>2007-03-04 04:12:42 -0500
commitab51a434c5816e1ca3f033791c1cc5c6594998ec (patch)
treef806c9013d8d28cb585163eac7f6526c09965a30 /drivers/kvm
parentf7e6a45ad9224dfe9f0d76a45c43be7ccafe0b82 (diff)
KVM: Add missing calls to mark_page_dirty()
A few places where we modify guest memory fail to call mark_page_dirty(), causing live migration to fail. This adds the missing calls. Signed-off-by: Uri Lublin <uril@qumranet.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm')
-rw-r--r--drivers/kvm/kvm_main.c6
1 files changed, 6 insertions, 0 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index a593d092d85b..edff4055b32b 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -228,12 +228,15 @@ int kvm_write_guest(struct kvm_vcpu *vcpu, gva_t addr, unsigned long size,
228 unsigned now; 228 unsigned now;
229 unsigned offset; 229 unsigned offset;
230 hva_t guest_buf; 230 hva_t guest_buf;
231 gfn_t gfn;
231 232
232 paddr = gva_to_hpa(vcpu, addr); 233 paddr = gva_to_hpa(vcpu, addr);
233 234
234 if (is_error_hpa(paddr)) 235 if (is_error_hpa(paddr))
235 break; 236 break;
236 237
238 gfn = vcpu->mmu.gva_to_gpa(vcpu, addr) >> PAGE_SHIFT;
239 mark_page_dirty(vcpu->kvm, gfn);
237 guest_buf = (hva_t)kmap_atomic( 240 guest_buf = (hva_t)kmap_atomic(
238 pfn_to_page(paddr >> PAGE_SHIFT), KM_USER0); 241 pfn_to_page(paddr >> PAGE_SHIFT), KM_USER0);
239 offset = addr & ~PAGE_MASK; 242 offset = addr & ~PAGE_MASK;
@@ -953,6 +956,7 @@ static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
953 return 0; 956 return 0;
954 page = gfn_to_page(m, gpa >> PAGE_SHIFT); 957 page = gfn_to_page(m, gpa >> PAGE_SHIFT);
955 kvm_mmu_pre_write(vcpu, gpa, bytes); 958 kvm_mmu_pre_write(vcpu, gpa, bytes);
959 mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
956 virt = kmap_atomic(page, KM_USER0); 960 virt = kmap_atomic(page, KM_USER0);
957 memcpy(virt + offset_in_page(gpa), &val, bytes); 961 memcpy(virt + offset_in_page(gpa), &val, bytes);
958 kunmap_atomic(virt, KM_USER0); 962 kunmap_atomic(virt, KM_USER0);
@@ -1294,6 +1298,7 @@ static int vcpu_register_para(struct kvm_vcpu *vcpu, gpa_t para_state_gpa)
1294 if (is_error_hpa(para_state_hpa)) 1298 if (is_error_hpa(para_state_hpa))
1295 goto err_gp; 1299 goto err_gp;
1296 1300
1301 mark_page_dirty(vcpu->kvm, para_state_gpa >> PAGE_SHIFT);
1297 para_state_page = pfn_to_page(para_state_hpa >> PAGE_SHIFT); 1302 para_state_page = pfn_to_page(para_state_hpa >> PAGE_SHIFT);
1298 para_state = kmap_atomic(para_state_page, KM_USER0); 1303 para_state = kmap_atomic(para_state_page, KM_USER0);
1299 1304
@@ -1323,6 +1328,7 @@ static int vcpu_register_para(struct kvm_vcpu *vcpu, gpa_t para_state_gpa)
1323 vcpu->para_state_gpa = para_state_gpa; 1328 vcpu->para_state_gpa = para_state_gpa;
1324 vcpu->hypercall_gpa = hypercall_gpa; 1329 vcpu->hypercall_gpa = hypercall_gpa;
1325 1330
1331 mark_page_dirty(vcpu->kvm, hypercall_gpa >> PAGE_SHIFT);
1326 hypercall = kmap_atomic(pfn_to_page(hypercall_hpa >> PAGE_SHIFT), 1332 hypercall = kmap_atomic(pfn_to_page(hypercall_hpa >> PAGE_SHIFT),
1327 KM_USER1) + (hypercall_hpa & ~PAGE_MASK); 1333 KM_USER1) + (hypercall_hpa & ~PAGE_MASK);
1328 kvm_arch_ops->patch_hypercall(vcpu, hypercall); 1334 kvm_arch_ops->patch_hypercall(vcpu, hypercall);