aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarcelo Tosatti <marcelo@kvack.org>2008-03-29 19:17:59 -0400
committerAvi Kivity <avi@qumranet.com>2008-04-27 05:00:52 -0400
commit3200f405a1e8e06c8634f11d33614455baa4e6be (patch)
tree806116d2495dd7fd93b5c0db98a72fe4fa854787
parent25c5f225beda4fbea878ed8b6203ab4ecc7de2d1 (diff)
KVM: MMU: unify slots_lock usage
Unify slots_lock acquision around vcpu_run(). This is simpler and less error-prone. Also fix some callsites that were not grabbing the lock properly. [avi: drop slots_lock while in guest mode to avoid holding the lock for indefinite periods] Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
-rw-r--r--arch/x86/kvm/mmu.c13
-rw-r--r--arch/x86/kvm/paging_tmpl.h4
-rw-r--r--arch/x86/kvm/vmx.c6
-rw-r--r--arch/x86/kvm/x86.c53
-rw-r--r--include/asm-x86/kvm_host.h2
5 files changed, 26 insertions, 52 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 6fc342194dda..c563283cb982 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1204,8 +1204,6 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1204 1204
1205 struct page *page; 1205 struct page *page;
1206 1206
1207 down_read(&vcpu->kvm->slots_lock);
1208
1209 down_read(&current->mm->mmap_sem); 1207 down_read(&current->mm->mmap_sem);
1210 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) { 1208 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1211 gfn &= ~(KVM_PAGES_PER_HPAGE-1); 1209 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
@@ -1218,7 +1216,6 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1218 /* mmio */ 1216 /* mmio */
1219 if (is_error_page(page)) { 1217 if (is_error_page(page)) {
1220 kvm_release_page_clean(page); 1218 kvm_release_page_clean(page);
1221 up_read(&vcpu->kvm->slots_lock);
1222 return 1; 1219 return 1;
1223 } 1220 }
1224 1221
@@ -1228,7 +1225,6 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1228 PT32E_ROOT_LEVEL); 1225 PT32E_ROOT_LEVEL);
1229 spin_unlock(&vcpu->kvm->mmu_lock); 1226 spin_unlock(&vcpu->kvm->mmu_lock);
1230 1227
1231 up_read(&vcpu->kvm->slots_lock);
1232 1228
1233 return r; 1229 return r;
1234} 1230}
@@ -1376,9 +1372,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
1376 largepage = 1; 1372 largepage = 1;
1377 } 1373 }
1378 page = gfn_to_page(vcpu->kvm, gfn); 1374 page = gfn_to_page(vcpu->kvm, gfn);
1375 up_read(&current->mm->mmap_sem);
1379 if (is_error_page(page)) { 1376 if (is_error_page(page)) {
1380 kvm_release_page_clean(page); 1377 kvm_release_page_clean(page);
1381 up_read(&current->mm->mmap_sem);
1382 return 1; 1378 return 1;
1383 } 1379 }
1384 spin_lock(&vcpu->kvm->mmu_lock); 1380 spin_lock(&vcpu->kvm->mmu_lock);
@@ -1386,7 +1382,6 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
1386 r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK, 1382 r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
1387 largepage, gfn, page, TDP_ROOT_LEVEL); 1383 largepage, gfn, page, TDP_ROOT_LEVEL);
1388 spin_unlock(&vcpu->kvm->mmu_lock); 1384 spin_unlock(&vcpu->kvm->mmu_lock);
1389 up_read(&current->mm->mmap_sem);
1390 1385
1391 return r; 1386 return r;
1392} 1387}
@@ -1808,9 +1803,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1808 gpa_t gpa; 1803 gpa_t gpa;
1809 int r; 1804 int r;
1810 1805
1811 down_read(&vcpu->kvm->slots_lock);
1812 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); 1806 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1813 up_read(&vcpu->kvm->slots_lock);
1814 1807
1815 spin_lock(&vcpu->kvm->mmu_lock); 1808 spin_lock(&vcpu->kvm->mmu_lock);
1816 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); 1809 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
@@ -2063,7 +2056,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
2063 if (r) 2056 if (r)
2064 return r; 2057 return r;
2065 2058
2066 if (!__emulator_write_phys(vcpu, addr, &value, bytes)) 2059 if (!emulator_write_phys(vcpu, addr, &value, bytes))
2067 return -EFAULT; 2060 return -EFAULT;
2068 2061
2069 return 1; 2062 return 1;
@@ -2127,7 +2120,6 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
2127 int r; 2120 int r;
2128 struct kvm_pv_mmu_op_buffer buffer; 2121 struct kvm_pv_mmu_op_buffer buffer;
2129 2122
2130 down_read(&vcpu->kvm->slots_lock);
2131 down_read(&current->mm->mmap_sem); 2123 down_read(&current->mm->mmap_sem);
2132 2124
2133 buffer.ptr = buffer.buf; 2125 buffer.ptr = buffer.buf;
@@ -2150,7 +2142,6 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
2150out: 2142out:
2151 *ret = buffer.processed; 2143 *ret = buffer.processed;
2152 up_read(&current->mm->mmap_sem); 2144 up_read(&current->mm->mmap_sem);
2153 up_read(&vcpu->kvm->slots_lock);
2154 return r; 2145 return r;
2155} 2146}
2156 2147
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index e9ae5dba724e..57d872aec663 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -388,7 +388,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
388 if (r) 388 if (r)
389 return r; 389 return r;
390 390
391 down_read(&vcpu->kvm->slots_lock);
392 /* 391 /*
393 * Look up the shadow pte for the faulting address. 392 * Look up the shadow pte for the faulting address.
394 */ 393 */
@@ -402,7 +401,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
402 pgprintk("%s: guest page fault\n", __func__); 401 pgprintk("%s: guest page fault\n", __func__);
403 inject_page_fault(vcpu, addr, walker.error_code); 402 inject_page_fault(vcpu, addr, walker.error_code);
404 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ 403 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
405 up_read(&vcpu->kvm->slots_lock);
406 return 0; 404 return 0;
407 } 405 }
408 406
@@ -422,7 +420,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
422 if (is_error_page(page)) { 420 if (is_error_page(page)) {
423 pgprintk("gfn %x is mmio\n", walker.gfn); 421 pgprintk("gfn %x is mmio\n", walker.gfn);
424 kvm_release_page_clean(page); 422 kvm_release_page_clean(page);
425 up_read(&vcpu->kvm->slots_lock);
426 return 1; 423 return 1;
427 } 424 }
428 425
@@ -440,7 +437,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
440 ++vcpu->stat.pf_fixed; 437 ++vcpu->stat.pf_fixed;
441 kvm_mmu_audit(vcpu, "post page fault (fixed)"); 438 kvm_mmu_audit(vcpu, "post page fault (fixed)");
442 spin_unlock(&vcpu->kvm->mmu_lock); 439 spin_unlock(&vcpu->kvm->mmu_lock);
443 up_read(&vcpu->kvm->slots_lock);
444 440
445 return write_pt; 441 return write_pt;
446} 442}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 87eee7a7f16e..6249810b2155 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1505,7 +1505,6 @@ static int init_rmode_tss(struct kvm *kvm)
1505 int ret = 0; 1505 int ret = 0;
1506 int r; 1506 int r;
1507 1507
1508 down_read(&kvm->slots_lock);
1509 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE); 1508 r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
1510 if (r < 0) 1509 if (r < 0)
1511 goto out; 1510 goto out;
@@ -1528,7 +1527,6 @@ static int init_rmode_tss(struct kvm *kvm)
1528 1527
1529 ret = 1; 1528 ret = 1;
1530out: 1529out:
1531 up_read(&kvm->slots_lock);
1532 return ret; 1530 return ret;
1533} 1531}
1534 1532
@@ -1730,6 +1728,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
1730 u64 msr; 1728 u64 msr;
1731 int ret; 1729 int ret;
1732 1730
1731 down_read(&vcpu->kvm->slots_lock);
1733 if (!init_rmode_tss(vmx->vcpu.kvm)) { 1732 if (!init_rmode_tss(vmx->vcpu.kvm)) {
1734 ret = -ENOMEM; 1733 ret = -ENOMEM;
1735 goto out; 1734 goto out;
@@ -1833,9 +1832,10 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
1833 1832
1834 vpid_sync_vcpu_all(vmx); 1833 vpid_sync_vcpu_all(vmx);
1835 1834
1836 return 0; 1835 ret = 0;
1837 1836
1838out: 1837out:
1838 up_read(&vcpu->kvm->slots_lock);
1839 return ret; 1839 return ret;
1840} 1840}
1841 1841
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 32d910044f85..e6a38bf9a45e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -201,7 +201,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
201 int ret; 201 int ret;
202 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)]; 202 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
203 203
204 down_read(&vcpu->kvm->slots_lock);
205 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte, 204 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
206 offset * sizeof(u64), sizeof(pdpte)); 205 offset * sizeof(u64), sizeof(pdpte));
207 if (ret < 0) { 206 if (ret < 0) {
@@ -218,7 +217,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
218 217
219 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs)); 218 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
220out: 219out:
221 up_read(&vcpu->kvm->slots_lock);
222 220
223 return ret; 221 return ret;
224} 222}
@@ -233,13 +231,11 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
233 if (is_long_mode(vcpu) || !is_pae(vcpu)) 231 if (is_long_mode(vcpu) || !is_pae(vcpu))
234 return false; 232 return false;
235 233
236 down_read(&vcpu->kvm->slots_lock);
237 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte)); 234 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
238 if (r < 0) 235 if (r < 0)
239 goto out; 236 goto out;
240 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0; 237 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
241out: 238out:
242 up_read(&vcpu->kvm->slots_lock);
243 239
244 return changed; 240 return changed;
245} 241}
@@ -377,7 +373,6 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
377 */ 373 */
378 } 374 }
379 375
380 down_read(&vcpu->kvm->slots_lock);
381 /* 376 /*
382 * Does the new cr3 value map to physical memory? (Note, we 377 * Does the new cr3 value map to physical memory? (Note, we
383 * catch an invalid cr3 even in real-mode, because it would 378 * catch an invalid cr3 even in real-mode, because it would
@@ -393,7 +388,6 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
393 vcpu->arch.cr3 = cr3; 388 vcpu->arch.cr3 = cr3;
394 vcpu->arch.mmu.new_cr3(vcpu); 389 vcpu->arch.mmu.new_cr3(vcpu);
395 } 390 }
396 up_read(&vcpu->kvm->slots_lock);
397} 391}
398EXPORT_SYMBOL_GPL(kvm_set_cr3); 392EXPORT_SYMBOL_GPL(kvm_set_cr3);
399 393
@@ -503,7 +497,6 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
503 497
504 version++; 498 version++;
505 499
506 down_read(&kvm->slots_lock);
507 kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); 500 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
508 501
509 wc_ts = current_kernel_time(); 502 wc_ts = current_kernel_time();
@@ -515,7 +508,6 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
515 508
516 version++; 509 version++;
517 kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); 510 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
518 up_read(&kvm->slots_lock);
519} 511}
520 512
521static void kvm_write_guest_time(struct kvm_vcpu *v) 513static void kvm_write_guest_time(struct kvm_vcpu *v)
@@ -609,10 +601,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
609 vcpu->arch.hv_clock.tsc_shift = 22; 601 vcpu->arch.hv_clock.tsc_shift = 22;
610 602
611 down_read(&current->mm->mmap_sem); 603 down_read(&current->mm->mmap_sem);
612 down_read(&vcpu->kvm->slots_lock);
613 vcpu->arch.time_page = 604 vcpu->arch.time_page =
614 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); 605 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
615 up_read(&vcpu->kvm->slots_lock);
616 up_read(&current->mm->mmap_sem); 606 up_read(&current->mm->mmap_sem);
617 607
618 if (is_error_page(vcpu->arch.time_page)) { 608 if (is_error_page(vcpu->arch.time_page)) {
@@ -715,9 +705,11 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
715 705
716 vcpu_load(vcpu); 706 vcpu_load(vcpu);
717 707
708 down_read(&vcpu->kvm->slots_lock);
718 for (i = 0; i < msrs->nmsrs; ++i) 709 for (i = 0; i < msrs->nmsrs; ++i)
719 if (do_msr(vcpu, entries[i].index, &entries[i].data)) 710 if (do_msr(vcpu, entries[i].index, &entries[i].data))
720 break; 711 break;
712 up_read(&vcpu->kvm->slots_lock);
721 713
722 vcpu_put(vcpu); 714 vcpu_put(vcpu);
723 715
@@ -1768,7 +1760,6 @@ int emulator_read_std(unsigned long addr,
1768 void *data = val; 1760 void *data = val;
1769 int r = X86EMUL_CONTINUE; 1761 int r = X86EMUL_CONTINUE;
1770 1762
1771 down_read(&vcpu->kvm->slots_lock);
1772 while (bytes) { 1763 while (bytes) {
1773 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 1764 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1774 unsigned offset = addr & (PAGE_SIZE-1); 1765 unsigned offset = addr & (PAGE_SIZE-1);
@@ -1790,7 +1781,6 @@ int emulator_read_std(unsigned long addr,
1790 addr += tocopy; 1781 addr += tocopy;
1791 } 1782 }
1792out: 1783out:
1793 up_read(&vcpu->kvm->slots_lock);
1794 return r; 1784 return r;
1795} 1785}
1796EXPORT_SYMBOL_GPL(emulator_read_std); 1786EXPORT_SYMBOL_GPL(emulator_read_std);
@@ -1809,9 +1799,7 @@ static int emulator_read_emulated(unsigned long addr,
1809 return X86EMUL_CONTINUE; 1799 return X86EMUL_CONTINUE;
1810 } 1800 }
1811 1801
1812 down_read(&vcpu->kvm->slots_lock);
1813 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 1802 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1814 up_read(&vcpu->kvm->slots_lock);
1815 1803
1816 /* For APIC access vmexit */ 1804 /* For APIC access vmexit */
1817 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 1805 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
@@ -1844,7 +1832,7 @@ mmio:
1844 return X86EMUL_UNHANDLEABLE; 1832 return X86EMUL_UNHANDLEABLE;
1845} 1833}
1846 1834
1847int __emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 1835int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1848 const void *val, int bytes) 1836 const void *val, int bytes)
1849{ 1837{
1850 int ret; 1838 int ret;
@@ -1856,17 +1844,6 @@ int __emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1856 return 1; 1844 return 1;
1857} 1845}
1858 1846
1859static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1860 const void *val, int bytes)
1861{
1862 int ret;
1863
1864 down_read(&vcpu->kvm->slots_lock);
1865 ret =__emulator_write_phys(vcpu, gpa, val, bytes);
1866 up_read(&vcpu->kvm->slots_lock);
1867 return ret;
1868}
1869
1870static int emulator_write_emulated_onepage(unsigned long addr, 1847static int emulator_write_emulated_onepage(unsigned long addr,
1871 const void *val, 1848 const void *val,
1872 unsigned int bytes, 1849 unsigned int bytes,
@@ -1875,9 +1852,7 @@ static int emulator_write_emulated_onepage(unsigned long addr,
1875 struct kvm_io_device *mmio_dev; 1852 struct kvm_io_device *mmio_dev;
1876 gpa_t gpa; 1853 gpa_t gpa;
1877 1854
1878 down_read(&vcpu->kvm->slots_lock);
1879 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 1855 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1880 up_read(&vcpu->kvm->slots_lock);
1881 1856
1882 if (gpa == UNMAPPED_GVA) { 1857 if (gpa == UNMAPPED_GVA) {
1883 kvm_inject_page_fault(vcpu, addr, 2); 1858 kvm_inject_page_fault(vcpu, addr, 2);
@@ -1954,7 +1929,6 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
1954 char *kaddr; 1929 char *kaddr;
1955 u64 val; 1930 u64 val;
1956 1931
1957 down_read(&vcpu->kvm->slots_lock);
1958 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 1932 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1959 1933
1960 if (gpa == UNMAPPED_GVA || 1934 if (gpa == UNMAPPED_GVA ||
@@ -1974,9 +1948,8 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
1974 set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val); 1948 set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
1975 kunmap_atomic(kaddr, KM_USER0); 1949 kunmap_atomic(kaddr, KM_USER0);
1976 kvm_release_page_dirty(page); 1950 kvm_release_page_dirty(page);
1977 emul_write:
1978 up_read(&vcpu->kvm->slots_lock);
1979 } 1951 }
1952emul_write:
1980#endif 1953#endif
1981 1954
1982 return emulator_write_emulated(addr, new, bytes, vcpu); 1955 return emulator_write_emulated(addr, new, bytes, vcpu);
@@ -2368,10 +2341,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2368 kvm_x86_ops->skip_emulated_instruction(vcpu); 2341 kvm_x86_ops->skip_emulated_instruction(vcpu);
2369 2342
2370 for (i = 0; i < nr_pages; ++i) { 2343 for (i = 0; i < nr_pages; ++i) {
2371 down_read(&vcpu->kvm->slots_lock);
2372 page = gva_to_page(vcpu, address + i * PAGE_SIZE); 2344 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
2373 vcpu->arch.pio.guest_pages[i] = page; 2345 vcpu->arch.pio.guest_pages[i] = page;
2374 up_read(&vcpu->kvm->slots_lock);
2375 if (!page) { 2346 if (!page) {
2376 kvm_inject_gp(vcpu, 0); 2347 kvm_inject_gp(vcpu, 0);
2377 free_pio_guest_pages(vcpu); 2348 free_pio_guest_pages(vcpu);
@@ -2445,7 +2416,9 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
2445 ++vcpu->stat.halt_exits; 2416 ++vcpu->stat.halt_exits;
2446 if (irqchip_in_kernel(vcpu->kvm)) { 2417 if (irqchip_in_kernel(vcpu->kvm)) {
2447 vcpu->arch.mp_state = VCPU_MP_STATE_HALTED; 2418 vcpu->arch.mp_state = VCPU_MP_STATE_HALTED;
2419 up_read(&vcpu->kvm->slots_lock);
2448 kvm_vcpu_block(vcpu); 2420 kvm_vcpu_block(vcpu);
2421 down_read(&vcpu->kvm->slots_lock);
2449 if (vcpu->arch.mp_state != VCPU_MP_STATE_RUNNABLE) 2422 if (vcpu->arch.mp_state != VCPU_MP_STATE_RUNNABLE)
2450 return -EINTR; 2423 return -EINTR;
2451 return 1; 2424 return 1;
@@ -2738,6 +2711,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2738 vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; 2711 vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
2739 } 2712 }
2740 2713
2714 down_read(&vcpu->kvm->slots_lock);
2741 vapic_enter(vcpu); 2715 vapic_enter(vcpu);
2742 2716
2743preempted: 2717preempted:
@@ -2811,6 +2785,8 @@ again:
2811 2785
2812 kvm_lapic_sync_to_vapic(vcpu); 2786 kvm_lapic_sync_to_vapic(vcpu);
2813 2787
2788 up_read(&vcpu->kvm->slots_lock);
2789
2814 vcpu->guest_mode = 1; 2790 vcpu->guest_mode = 1;
2815 kvm_guest_enter(); 2791 kvm_guest_enter();
2816 2792
@@ -2837,6 +2813,8 @@ again:
2837 2813
2838 preempt_enable(); 2814 preempt_enable();
2839 2815
2816 down_read(&vcpu->kvm->slots_lock);
2817
2840 /* 2818 /*
2841 * Profile KVM exit RIPs: 2819 * Profile KVM exit RIPs:
2842 */ 2820 */
@@ -2864,14 +2842,18 @@ again:
2864 } 2842 }
2865 2843
2866out: 2844out:
2845 up_read(&vcpu->kvm->slots_lock);
2867 if (r > 0) { 2846 if (r > 0) {
2868 kvm_resched(vcpu); 2847 kvm_resched(vcpu);
2848 down_read(&vcpu->kvm->slots_lock);
2869 goto preempted; 2849 goto preempted;
2870 } 2850 }
2871 2851
2872 post_kvm_run_save(vcpu, kvm_run); 2852 post_kvm_run_save(vcpu, kvm_run);
2873 2853
2854 down_read(&vcpu->kvm->slots_lock);
2874 vapic_exit(vcpu); 2855 vapic_exit(vcpu);
2856 up_read(&vcpu->kvm->slots_lock);
2875 2857
2876 return r; 2858 return r;
2877} 2859}
@@ -2906,9 +2888,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2906 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); 2888 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
2907 vcpu->mmio_read_completed = 1; 2889 vcpu->mmio_read_completed = 1;
2908 vcpu->mmio_needed = 0; 2890 vcpu->mmio_needed = 0;
2891
2892 down_read(&vcpu->kvm->slots_lock);
2909 r = emulate_instruction(vcpu, kvm_run, 2893 r = emulate_instruction(vcpu, kvm_run,
2910 vcpu->arch.mmio_fault_cr2, 0, 2894 vcpu->arch.mmio_fault_cr2, 0,
2911 EMULTYPE_NO_DECODE); 2895 EMULTYPE_NO_DECODE);
2896 up_read(&vcpu->kvm->slots_lock);
2912 if (r == EMULATE_DO_MMIO) { 2897 if (r == EMULATE_DO_MMIO) {
2913 /* 2898 /*
2914 * Read-modify-write. Back to userspace. 2899 * Read-modify-write. Back to userspace.
@@ -3817,7 +3802,9 @@ fail:
3817void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 3802void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
3818{ 3803{
3819 kvm_free_lapic(vcpu); 3804 kvm_free_lapic(vcpu);
3805 down_read(&vcpu->kvm->slots_lock);
3820 kvm_mmu_destroy(vcpu); 3806 kvm_mmu_destroy(vcpu);
3807 up_read(&vcpu->kvm->slots_lock);
3821 free_page((unsigned long)vcpu->arch.pio_data); 3808 free_page((unsigned long)vcpu->arch.pio_data);
3822} 3809}
3823 3810
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 7b28cf949d55..2b081ed44fdb 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -446,7 +446,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
446 446
447int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); 447int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
448 448
449int __emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 449int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
450 const void *val, int bytes); 450 const void *val, int bytes);
451int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes, 451int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
452 gpa_t addr, unsigned long *ret); 452 gpa_t addr, unsigned long *ret);