diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kvm/mmu.c | 24 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 13 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 7 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 65 |
4 files changed, 71 insertions, 38 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 8efdcdbebb03..26037106ad19 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -876,11 +876,18 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn) | |||
876 | 876 | ||
877 | struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) | 877 | struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) |
878 | { | 878 | { |
879 | struct page *page; | ||
880 | |||
879 | gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); | 881 | gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); |
880 | 882 | ||
881 | if (gpa == UNMAPPED_GVA) | 883 | if (gpa == UNMAPPED_GVA) |
882 | return NULL; | 884 | return NULL; |
883 | return gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); | 885 | |
886 | down_read(¤t->mm->mmap_sem); | ||
887 | page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); | ||
888 | up_read(¤t->mm->mmap_sem); | ||
889 | |||
890 | return page; | ||
884 | } | 891 | } |
885 | 892 | ||
886 | static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | 893 | static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, |
@@ -1020,15 +1027,18 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) | |||
1020 | 1027 | ||
1021 | struct page *page; | 1028 | struct page *page; |
1022 | 1029 | ||
1030 | down_read(&vcpu->kvm->slots_lock); | ||
1031 | |||
1023 | down_read(¤t->mm->mmap_sem); | 1032 | down_read(¤t->mm->mmap_sem); |
1024 | page = gfn_to_page(vcpu->kvm, gfn); | 1033 | page = gfn_to_page(vcpu->kvm, gfn); |
1034 | up_read(¤t->mm->mmap_sem); | ||
1025 | 1035 | ||
1026 | spin_lock(&vcpu->kvm->mmu_lock); | 1036 | spin_lock(&vcpu->kvm->mmu_lock); |
1027 | kvm_mmu_free_some_pages(vcpu); | 1037 | kvm_mmu_free_some_pages(vcpu); |
1028 | r = __nonpaging_map(vcpu, v, write, gfn, page); | 1038 | r = __nonpaging_map(vcpu, v, write, gfn, page); |
1029 | spin_unlock(&vcpu->kvm->mmu_lock); | 1039 | spin_unlock(&vcpu->kvm->mmu_lock); |
1030 | 1040 | ||
1031 | up_read(¤t->mm->mmap_sem); | 1041 | up_read(&vcpu->kvm->slots_lock); |
1032 | 1042 | ||
1033 | return r; | 1043 | return r; |
1034 | } | 1044 | } |
@@ -1362,6 +1372,7 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
1362 | gfn_t gfn; | 1372 | gfn_t gfn; |
1363 | int r; | 1373 | int r; |
1364 | u64 gpte = 0; | 1374 | u64 gpte = 0; |
1375 | struct page *page; | ||
1365 | 1376 | ||
1366 | if (bytes != 4 && bytes != 8) | 1377 | if (bytes != 4 && bytes != 8) |
1367 | return; | 1378 | return; |
@@ -1389,6 +1400,11 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
1389 | if (!is_present_pte(gpte)) | 1400 | if (!is_present_pte(gpte)) |
1390 | return; | 1401 | return; |
1391 | gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; | 1402 | gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; |
1403 | |||
1404 | down_read(¤t->mm->mmap_sem); | ||
1405 | page = gfn_to_page(vcpu->kvm, gfn); | ||
1406 | up_read(¤t->mm->mmap_sem); | ||
1407 | |||
1392 | vcpu->arch.update_pte.gfn = gfn; | 1408 | vcpu->arch.update_pte.gfn = gfn; |
1393 | vcpu->arch.update_pte.page = gfn_to_page(vcpu->kvm, gfn); | 1409 | vcpu->arch.update_pte.page = gfn_to_page(vcpu->kvm, gfn); |
1394 | } | 1410 | } |
@@ -1496,9 +1512,9 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) | |||
1496 | gpa_t gpa; | 1512 | gpa_t gpa; |
1497 | int r; | 1513 | int r; |
1498 | 1514 | ||
1499 | down_read(¤t->mm->mmap_sem); | 1515 | down_read(&vcpu->kvm->slots_lock); |
1500 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); | 1516 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); |
1501 | up_read(¤t->mm->mmap_sem); | 1517 | up_read(&vcpu->kvm->slots_lock); |
1502 | 1518 | ||
1503 | spin_lock(&vcpu->kvm->mmu_lock); | 1519 | spin_lock(&vcpu->kvm->mmu_lock); |
1504 | r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); | 1520 | r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 03ba8608fe0f..2009c6e9dc4d 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -91,7 +91,10 @@ static bool FNAME(cmpxchg_gpte)(struct kvm *kvm, | |||
91 | pt_element_t *table; | 91 | pt_element_t *table; |
92 | struct page *page; | 92 | struct page *page; |
93 | 93 | ||
94 | down_read(¤t->mm->mmap_sem); | ||
94 | page = gfn_to_page(kvm, table_gfn); | 95 | page = gfn_to_page(kvm, table_gfn); |
96 | up_read(¤t->mm->mmap_sem); | ||
97 | |||
95 | table = kmap_atomic(page, KM_USER0); | 98 | table = kmap_atomic(page, KM_USER0); |
96 | 99 | ||
97 | ret = CMPXCHG(&table[index], orig_pte, new_pte); | 100 | ret = CMPXCHG(&table[index], orig_pte, new_pte); |
@@ -378,7 +381,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
378 | if (r) | 381 | if (r) |
379 | return r; | 382 | return r; |
380 | 383 | ||
381 | down_read(¤t->mm->mmap_sem); | 384 | down_read(&vcpu->kvm->slots_lock); |
382 | /* | 385 | /* |
383 | * Look up the shadow pte for the faulting address. | 386 | * Look up the shadow pte for the faulting address. |
384 | */ | 387 | */ |
@@ -392,11 +395,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
392 | pgprintk("%s: guest page fault\n", __FUNCTION__); | 395 | pgprintk("%s: guest page fault\n", __FUNCTION__); |
393 | inject_page_fault(vcpu, addr, walker.error_code); | 396 | inject_page_fault(vcpu, addr, walker.error_code); |
394 | vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ | 397 | vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ |
395 | up_read(¤t->mm->mmap_sem); | 398 | up_read(&vcpu->kvm->slots_lock); |
396 | return 0; | 399 | return 0; |
397 | } | 400 | } |
398 | 401 | ||
402 | down_read(¤t->mm->mmap_sem); | ||
399 | page = gfn_to_page(vcpu->kvm, walker.gfn); | 403 | page = gfn_to_page(vcpu->kvm, walker.gfn); |
404 | up_read(¤t->mm->mmap_sem); | ||
400 | 405 | ||
401 | spin_lock(&vcpu->kvm->mmu_lock); | 406 | spin_lock(&vcpu->kvm->mmu_lock); |
402 | kvm_mmu_free_some_pages(vcpu); | 407 | kvm_mmu_free_some_pages(vcpu); |
@@ -413,14 +418,14 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
413 | */ | 418 | */ |
414 | if (shadow_pte && is_io_pte(*shadow_pte)) { | 419 | if (shadow_pte && is_io_pte(*shadow_pte)) { |
415 | spin_unlock(&vcpu->kvm->mmu_lock); | 420 | spin_unlock(&vcpu->kvm->mmu_lock); |
416 | up_read(¤t->mm->mmap_sem); | 421 | up_read(&vcpu->kvm->slots_lock); |
417 | return 1; | 422 | return 1; |
418 | } | 423 | } |
419 | 424 | ||
420 | ++vcpu->stat.pf_fixed; | 425 | ++vcpu->stat.pf_fixed; |
421 | kvm_mmu_audit(vcpu, "post page fault (fixed)"); | 426 | kvm_mmu_audit(vcpu, "post page fault (fixed)"); |
422 | spin_unlock(&vcpu->kvm->mmu_lock); | 427 | spin_unlock(&vcpu->kvm->mmu_lock); |
423 | up_read(¤t->mm->mmap_sem); | 428 | up_read(&vcpu->kvm->slots_lock); |
424 | 429 | ||
425 | return write_pt; | 430 | return write_pt; |
426 | } | 431 | } |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index ad36447e696e..86f5bf121838 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -1477,7 +1477,7 @@ static int alloc_apic_access_page(struct kvm *kvm) | |||
1477 | struct kvm_userspace_memory_region kvm_userspace_mem; | 1477 | struct kvm_userspace_memory_region kvm_userspace_mem; |
1478 | int r = 0; | 1478 | int r = 0; |
1479 | 1479 | ||
1480 | down_write(¤t->mm->mmap_sem); | 1480 | down_write(&kvm->slots_lock); |
1481 | if (kvm->arch.apic_access_page) | 1481 | if (kvm->arch.apic_access_page) |
1482 | goto out; | 1482 | goto out; |
1483 | kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; | 1483 | kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; |
@@ -1487,9 +1487,12 @@ static int alloc_apic_access_page(struct kvm *kvm) | |||
1487 | r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0); | 1487 | r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0); |
1488 | if (r) | 1488 | if (r) |
1489 | goto out; | 1489 | goto out; |
1490 | |||
1491 | down_read(¤t->mm->mmap_sem); | ||
1490 | kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00); | 1492 | kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00); |
1493 | up_read(¤t->mm->mmap_sem); | ||
1491 | out: | 1494 | out: |
1492 | up_write(¤t->mm->mmap_sem); | 1495 | up_write(&kvm->slots_lock); |
1493 | return r; | 1496 | return r; |
1494 | } | 1497 | } |
1495 | 1498 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 338764fa5391..6b01552bd1f1 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -184,7 +184,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
184 | int ret; | 184 | int ret; |
185 | u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)]; | 185 | u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)]; |
186 | 186 | ||
187 | down_read(¤t->mm->mmap_sem); | 187 | down_read(&vcpu->kvm->slots_lock); |
188 | ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte, | 188 | ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte, |
189 | offset * sizeof(u64), sizeof(pdpte)); | 189 | offset * sizeof(u64), sizeof(pdpte)); |
190 | if (ret < 0) { | 190 | if (ret < 0) { |
@@ -201,7 +201,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
201 | 201 | ||
202 | memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs)); | 202 | memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs)); |
203 | out: | 203 | out: |
204 | up_read(¤t->mm->mmap_sem); | 204 | up_read(&vcpu->kvm->slots_lock); |
205 | 205 | ||
206 | return ret; | 206 | return ret; |
207 | } | 207 | } |
@@ -215,13 +215,13 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu) | |||
215 | if (is_long_mode(vcpu) || !is_pae(vcpu)) | 215 | if (is_long_mode(vcpu) || !is_pae(vcpu)) |
216 | return false; | 216 | return false; |
217 | 217 | ||
218 | down_read(¤t->mm->mmap_sem); | 218 | down_read(&vcpu->kvm->slots_lock); |
219 | r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte)); | 219 | r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte)); |
220 | if (r < 0) | 220 | if (r < 0) |
221 | goto out; | 221 | goto out; |
222 | changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0; | 222 | changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0; |
223 | out: | 223 | out: |
224 | up_read(¤t->mm->mmap_sem); | 224 | up_read(&vcpu->kvm->slots_lock); |
225 | 225 | ||
226 | return changed; | 226 | return changed; |
227 | } | 227 | } |
@@ -359,7 +359,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
359 | */ | 359 | */ |
360 | } | 360 | } |
361 | 361 | ||
362 | down_read(¤t->mm->mmap_sem); | 362 | down_read(&vcpu->kvm->slots_lock); |
363 | /* | 363 | /* |
364 | * Does the new cr3 value map to physical memory? (Note, we | 364 | * Does the new cr3 value map to physical memory? (Note, we |
365 | * catch an invalid cr3 even in real-mode, because it would | 365 | * catch an invalid cr3 even in real-mode, because it would |
@@ -375,7 +375,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
375 | vcpu->arch.cr3 = cr3; | 375 | vcpu->arch.cr3 = cr3; |
376 | vcpu->arch.mmu.new_cr3(vcpu); | 376 | vcpu->arch.mmu.new_cr3(vcpu); |
377 | } | 377 | } |
378 | up_read(¤t->mm->mmap_sem); | 378 | up_read(&vcpu->kvm->slots_lock); |
379 | } | 379 | } |
380 | EXPORT_SYMBOL_GPL(set_cr3); | 380 | EXPORT_SYMBOL_GPL(set_cr3); |
381 | 381 | ||
@@ -1232,12 +1232,12 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, | |||
1232 | if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) | 1232 | if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) |
1233 | return -EINVAL; | 1233 | return -EINVAL; |
1234 | 1234 | ||
1235 | down_write(¤t->mm->mmap_sem); | 1235 | down_write(&kvm->slots_lock); |
1236 | 1236 | ||
1237 | kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); | 1237 | kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); |
1238 | kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; | 1238 | kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; |
1239 | 1239 | ||
1240 | up_write(¤t->mm->mmap_sem); | 1240 | up_write(&kvm->slots_lock); |
1241 | return 0; | 1241 | return 0; |
1242 | } | 1242 | } |
1243 | 1243 | ||
@@ -1286,7 +1286,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, | |||
1286 | < alias->target_phys_addr) | 1286 | < alias->target_phys_addr) |
1287 | goto out; | 1287 | goto out; |
1288 | 1288 | ||
1289 | down_write(¤t->mm->mmap_sem); | 1289 | down_write(&kvm->slots_lock); |
1290 | 1290 | ||
1291 | p = &kvm->arch.aliases[alias->slot]; | 1291 | p = &kvm->arch.aliases[alias->slot]; |
1292 | p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT; | 1292 | p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT; |
@@ -1300,7 +1300,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, | |||
1300 | 1300 | ||
1301 | kvm_mmu_zap_all(kvm); | 1301 | kvm_mmu_zap_all(kvm); |
1302 | 1302 | ||
1303 | up_write(¤t->mm->mmap_sem); | 1303 | up_write(&kvm->slots_lock); |
1304 | 1304 | ||
1305 | return 0; | 1305 | return 0; |
1306 | 1306 | ||
@@ -1376,7 +1376,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
1376 | struct kvm_memory_slot *memslot; | 1376 | struct kvm_memory_slot *memslot; |
1377 | int is_dirty = 0; | 1377 | int is_dirty = 0; |
1378 | 1378 | ||
1379 | down_write(¤t->mm->mmap_sem); | 1379 | down_write(&kvm->slots_lock); |
1380 | 1380 | ||
1381 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | 1381 | r = kvm_get_dirty_log(kvm, log, &is_dirty); |
1382 | if (r) | 1382 | if (r) |
@@ -1392,7 +1392,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
1392 | } | 1392 | } |
1393 | r = 0; | 1393 | r = 0; |
1394 | out: | 1394 | out: |
1395 | up_write(¤t->mm->mmap_sem); | 1395 | up_write(&kvm->slots_lock); |
1396 | return r; | 1396 | return r; |
1397 | } | 1397 | } |
1398 | 1398 | ||
@@ -1570,7 +1570,7 @@ int emulator_read_std(unsigned long addr, | |||
1570 | void *data = val; | 1570 | void *data = val; |
1571 | int r = X86EMUL_CONTINUE; | 1571 | int r = X86EMUL_CONTINUE; |
1572 | 1572 | ||
1573 | down_read(¤t->mm->mmap_sem); | 1573 | down_read(&vcpu->kvm->slots_lock); |
1574 | while (bytes) { | 1574 | while (bytes) { |
1575 | gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); | 1575 | gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); |
1576 | unsigned offset = addr & (PAGE_SIZE-1); | 1576 | unsigned offset = addr & (PAGE_SIZE-1); |
@@ -1592,7 +1592,7 @@ int emulator_read_std(unsigned long addr, | |||
1592 | addr += tocopy; | 1592 | addr += tocopy; |
1593 | } | 1593 | } |
1594 | out: | 1594 | out: |
1595 | up_read(¤t->mm->mmap_sem); | 1595 | up_read(&vcpu->kvm->slots_lock); |
1596 | return r; | 1596 | return r; |
1597 | } | 1597 | } |
1598 | EXPORT_SYMBOL_GPL(emulator_read_std); | 1598 | EXPORT_SYMBOL_GPL(emulator_read_std); |
@@ -1611,9 +1611,9 @@ static int emulator_read_emulated(unsigned long addr, | |||
1611 | return X86EMUL_CONTINUE; | 1611 | return X86EMUL_CONTINUE; |
1612 | } | 1612 | } |
1613 | 1613 | ||
1614 | down_read(¤t->mm->mmap_sem); | 1614 | down_read(&vcpu->kvm->slots_lock); |
1615 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); | 1615 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); |
1616 | up_read(¤t->mm->mmap_sem); | 1616 | up_read(&vcpu->kvm->slots_lock); |
1617 | 1617 | ||
1618 | /* For APIC access vmexit */ | 1618 | /* For APIC access vmexit */ |
1619 | if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) | 1619 | if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) |
@@ -1651,14 +1651,14 @@ static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
1651 | { | 1651 | { |
1652 | int ret; | 1652 | int ret; |
1653 | 1653 | ||
1654 | down_read(¤t->mm->mmap_sem); | 1654 | down_read(&vcpu->kvm->slots_lock); |
1655 | ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); | 1655 | ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); |
1656 | if (ret < 0) { | 1656 | if (ret < 0) { |
1657 | up_read(¤t->mm->mmap_sem); | 1657 | up_read(&vcpu->kvm->slots_lock); |
1658 | return 0; | 1658 | return 0; |
1659 | } | 1659 | } |
1660 | kvm_mmu_pte_write(vcpu, gpa, val, bytes); | 1660 | kvm_mmu_pte_write(vcpu, gpa, val, bytes); |
1661 | up_read(¤t->mm->mmap_sem); | 1661 | up_read(&vcpu->kvm->slots_lock); |
1662 | return 1; | 1662 | return 1; |
1663 | } | 1663 | } |
1664 | 1664 | ||
@@ -1670,9 +1670,9 @@ static int emulator_write_emulated_onepage(unsigned long addr, | |||
1670 | struct kvm_io_device *mmio_dev; | 1670 | struct kvm_io_device *mmio_dev; |
1671 | gpa_t gpa; | 1671 | gpa_t gpa; |
1672 | 1672 | ||
1673 | down_read(¤t->mm->mmap_sem); | 1673 | down_read(&vcpu->kvm->slots_lock); |
1674 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); | 1674 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); |
1675 | up_read(¤t->mm->mmap_sem); | 1675 | up_read(&vcpu->kvm->slots_lock); |
1676 | 1676 | ||
1677 | if (gpa == UNMAPPED_GVA) { | 1677 | if (gpa == UNMAPPED_GVA) { |
1678 | kvm_inject_page_fault(vcpu, addr, 2); | 1678 | kvm_inject_page_fault(vcpu, addr, 2); |
@@ -1749,7 +1749,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr, | |||
1749 | char *kaddr; | 1749 | char *kaddr; |
1750 | u64 val; | 1750 | u64 val; |
1751 | 1751 | ||
1752 | down_read(¤t->mm->mmap_sem); | 1752 | down_read(&vcpu->kvm->slots_lock); |
1753 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); | 1753 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); |
1754 | 1754 | ||
1755 | if (gpa == UNMAPPED_GVA || | 1755 | if (gpa == UNMAPPED_GVA || |
@@ -1760,13 +1760,17 @@ static int emulator_cmpxchg_emulated(unsigned long addr, | |||
1760 | goto emul_write; | 1760 | goto emul_write; |
1761 | 1761 | ||
1762 | val = *(u64 *)new; | 1762 | val = *(u64 *)new; |
1763 | |||
1764 | down_read(¤t->mm->mmap_sem); | ||
1763 | page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); | 1765 | page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); |
1766 | up_read(¤t->mm->mmap_sem); | ||
1767 | |||
1764 | kaddr = kmap_atomic(page, KM_USER0); | 1768 | kaddr = kmap_atomic(page, KM_USER0); |
1765 | set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val); | 1769 | set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val); |
1766 | kunmap_atomic(kaddr, KM_USER0); | 1770 | kunmap_atomic(kaddr, KM_USER0); |
1767 | kvm_release_page_dirty(page); | 1771 | kvm_release_page_dirty(page); |
1768 | emul_write: | 1772 | emul_write: |
1769 | up_read(¤t->mm->mmap_sem); | 1773 | up_read(&vcpu->kvm->slots_lock); |
1770 | } | 1774 | } |
1771 | #endif | 1775 | #endif |
1772 | 1776 | ||
@@ -2159,10 +2163,10 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | |||
2159 | kvm_x86_ops->skip_emulated_instruction(vcpu); | 2163 | kvm_x86_ops->skip_emulated_instruction(vcpu); |
2160 | 2164 | ||
2161 | for (i = 0; i < nr_pages; ++i) { | 2165 | for (i = 0; i < nr_pages; ++i) { |
2162 | down_read(¤t->mm->mmap_sem); | 2166 | down_read(&vcpu->kvm->slots_lock); |
2163 | page = gva_to_page(vcpu, address + i * PAGE_SIZE); | 2167 | page = gva_to_page(vcpu, address + i * PAGE_SIZE); |
2164 | vcpu->arch.pio.guest_pages[i] = page; | 2168 | vcpu->arch.pio.guest_pages[i] = page; |
2165 | up_read(¤t->mm->mmap_sem); | 2169 | up_read(&vcpu->kvm->slots_lock); |
2166 | if (!page) { | 2170 | if (!page) { |
2167 | kvm_inject_gp(vcpu, 0); | 2171 | kvm_inject_gp(vcpu, 0); |
2168 | free_pio_guest_pages(vcpu); | 2172 | free_pio_guest_pages(vcpu); |
@@ -2485,8 +2489,9 @@ static void vapic_enter(struct kvm_vcpu *vcpu) | |||
2485 | 2489 | ||
2486 | down_read(¤t->mm->mmap_sem); | 2490 | down_read(¤t->mm->mmap_sem); |
2487 | page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); | 2491 | page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); |
2488 | vcpu->arch.apic->vapic_page = page; | ||
2489 | up_read(¤t->mm->mmap_sem); | 2492 | up_read(¤t->mm->mmap_sem); |
2493 | |||
2494 | vcpu->arch.apic->vapic_page = page; | ||
2490 | } | 2495 | } |
2491 | 2496 | ||
2492 | static void vapic_exit(struct kvm_vcpu *vcpu) | 2497 | static void vapic_exit(struct kvm_vcpu *vcpu) |
@@ -2959,9 +2964,9 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | |||
2959 | gpa_t gpa; | 2964 | gpa_t gpa; |
2960 | 2965 | ||
2961 | vcpu_load(vcpu); | 2966 | vcpu_load(vcpu); |
2962 | down_read(¤t->mm->mmap_sem); | 2967 | down_read(&vcpu->kvm->slots_lock); |
2963 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr); | 2968 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr); |
2964 | up_read(¤t->mm->mmap_sem); | 2969 | up_read(&vcpu->kvm->slots_lock); |
2965 | tr->physical_address = gpa; | 2970 | tr->physical_address = gpa; |
2966 | tr->valid = gpa != UNMAPPED_GVA; | 2971 | tr->valid = gpa != UNMAPPED_GVA; |
2967 | tr->writeable = 1; | 2972 | tr->writeable = 1; |
@@ -3234,11 +3239,13 @@ int kvm_arch_set_memory_region(struct kvm *kvm, | |||
3234 | */ | 3239 | */ |
3235 | if (!user_alloc) { | 3240 | if (!user_alloc) { |
3236 | if (npages && !old.rmap) { | 3241 | if (npages && !old.rmap) { |
3242 | down_write(¤t->mm->mmap_sem); | ||
3237 | memslot->userspace_addr = do_mmap(NULL, 0, | 3243 | memslot->userspace_addr = do_mmap(NULL, 0, |
3238 | npages * PAGE_SIZE, | 3244 | npages * PAGE_SIZE, |
3239 | PROT_READ | PROT_WRITE, | 3245 | PROT_READ | PROT_WRITE, |
3240 | MAP_SHARED | MAP_ANONYMOUS, | 3246 | MAP_SHARED | MAP_ANONYMOUS, |
3241 | 0); | 3247 | 0); |
3248 | up_write(¤t->mm->mmap_sem); | ||
3242 | 3249 | ||
3243 | if (IS_ERR((void *)memslot->userspace_addr)) | 3250 | if (IS_ERR((void *)memslot->userspace_addr)) |
3244 | return PTR_ERR((void *)memslot->userspace_addr); | 3251 | return PTR_ERR((void *)memslot->userspace_addr); |
@@ -3246,8 +3253,10 @@ int kvm_arch_set_memory_region(struct kvm *kvm, | |||
3246 | if (!old.user_alloc && old.rmap) { | 3253 | if (!old.user_alloc && old.rmap) { |
3247 | int ret; | 3254 | int ret; |
3248 | 3255 | ||
3256 | down_write(¤t->mm->mmap_sem); | ||
3249 | ret = do_munmap(current->mm, old.userspace_addr, | 3257 | ret = do_munmap(current->mm, old.userspace_addr, |
3250 | old.npages * PAGE_SIZE); | 3258 | old.npages * PAGE_SIZE); |
3259 | up_write(¤t->mm->mmap_sem); | ||
3251 | if (ret < 0) | 3260 | if (ret < 0) |
3252 | printk(KERN_WARNING | 3261 | printk(KERN_WARNING |
3253 | "kvm_vm_ioctl_set_memory_region: " | 3262 | "kvm_vm_ioctl_set_memory_region: " |