diff options
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 20 |
1 files changed, 12 insertions, 8 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 03ba8608fe0f..ecc0856268c4 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -91,7 +91,10 @@ static bool FNAME(cmpxchg_gpte)(struct kvm *kvm, | |||
91 | pt_element_t *table; | 91 | pt_element_t *table; |
92 | struct page *page; | 92 | struct page *page; |
93 | 93 | ||
94 | down_read(¤t->mm->mmap_sem); | ||
94 | page = gfn_to_page(kvm, table_gfn); | 95 | page = gfn_to_page(kvm, table_gfn); |
96 | up_read(¤t->mm->mmap_sem); | ||
97 | |||
95 | table = kmap_atomic(page, KM_USER0); | 98 | table = kmap_atomic(page, KM_USER0); |
96 | 99 | ||
97 | ret = CMPXCHG(&table[index], orig_pte, new_pte); | 100 | ret = CMPXCHG(&table[index], orig_pte, new_pte); |
@@ -140,7 +143,7 @@ walk: | |||
140 | } | 143 | } |
141 | #endif | 144 | #endif |
142 | ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || | 145 | ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || |
143 | (vcpu->cr3 & CR3_NONPAE_RESERVED_BITS) == 0); | 146 | (vcpu->arch.cr3 & CR3_NONPAE_RESERVED_BITS) == 0); |
144 | 147 | ||
145 | pt_access = ACC_ALL; | 148 | pt_access = ACC_ALL; |
146 | 149 | ||
@@ -297,7 +300,6 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
297 | u64 shadow_pte; | 300 | u64 shadow_pte; |
298 | int metaphysical; | 301 | int metaphysical; |
299 | gfn_t table_gfn; | 302 | gfn_t table_gfn; |
300 | bool new_page = 0; | ||
301 | 303 | ||
302 | shadow_ent = ((u64 *)__va(shadow_addr)) + index; | 304 | shadow_ent = ((u64 *)__va(shadow_addr)) + index; |
303 | if (level == PT_PAGE_TABLE_LEVEL) | 305 | if (level == PT_PAGE_TABLE_LEVEL) |
@@ -319,8 +321,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
319 | } | 321 | } |
320 | shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, | 322 | shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, |
321 | metaphysical, access, | 323 | metaphysical, access, |
322 | shadow_ent, &new_page); | 324 | shadow_ent); |
323 | if (new_page && !metaphysical) { | 325 | if (!metaphysical) { |
324 | int r; | 326 | int r; |
325 | pt_element_t curr_pte; | 327 | pt_element_t curr_pte; |
326 | r = kvm_read_guest_atomic(vcpu->kvm, | 328 | r = kvm_read_guest_atomic(vcpu->kvm, |
@@ -378,7 +380,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
378 | if (r) | 380 | if (r) |
379 | return r; | 381 | return r; |
380 | 382 | ||
381 | down_read(¤t->mm->mmap_sem); | 383 | down_read(&vcpu->kvm->slots_lock); |
382 | /* | 384 | /* |
383 | * Look up the shadow pte for the faulting address. | 385 | * Look up the shadow pte for the faulting address. |
384 | */ | 386 | */ |
@@ -392,11 +394,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
392 | pgprintk("%s: guest page fault\n", __FUNCTION__); | 394 | pgprintk("%s: guest page fault\n", __FUNCTION__); |
393 | inject_page_fault(vcpu, addr, walker.error_code); | 395 | inject_page_fault(vcpu, addr, walker.error_code); |
394 | vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ | 396 | vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ |
395 | up_read(¤t->mm->mmap_sem); | 397 | up_read(&vcpu->kvm->slots_lock); |
396 | return 0; | 398 | return 0; |
397 | } | 399 | } |
398 | 400 | ||
401 | down_read(¤t->mm->mmap_sem); | ||
399 | page = gfn_to_page(vcpu->kvm, walker.gfn); | 402 | page = gfn_to_page(vcpu->kvm, walker.gfn); |
403 | up_read(¤t->mm->mmap_sem); | ||
400 | 404 | ||
401 | spin_lock(&vcpu->kvm->mmu_lock); | 405 | spin_lock(&vcpu->kvm->mmu_lock); |
402 | kvm_mmu_free_some_pages(vcpu); | 406 | kvm_mmu_free_some_pages(vcpu); |
@@ -413,14 +417,14 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
413 | */ | 417 | */ |
414 | if (shadow_pte && is_io_pte(*shadow_pte)) { | 418 | if (shadow_pte && is_io_pte(*shadow_pte)) { |
415 | spin_unlock(&vcpu->kvm->mmu_lock); | 419 | spin_unlock(&vcpu->kvm->mmu_lock); |
416 | up_read(¤t->mm->mmap_sem); | 420 | up_read(&vcpu->kvm->slots_lock); |
417 | return 1; | 421 | return 1; |
418 | } | 422 | } |
419 | 423 | ||
420 | ++vcpu->stat.pf_fixed; | 424 | ++vcpu->stat.pf_fixed; |
421 | kvm_mmu_audit(vcpu, "post page fault (fixed)"); | 425 | kvm_mmu_audit(vcpu, "post page fault (fixed)"); |
422 | spin_unlock(&vcpu->kvm->mmu_lock); | 426 | spin_unlock(&vcpu->kvm->mmu_lock); |
423 | up_read(¤t->mm->mmap_sem); | 427 | up_read(&vcpu->kvm->slots_lock); |
424 | 428 | ||
425 | return write_pt; | 429 | return write_pt; |
426 | } | 430 | } |