aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorMarcelo Tosatti <marcelo@kvack.org>2008-03-29 19:17:59 -0400
committerAvi Kivity <avi@qumranet.com>2008-04-27 05:00:52 -0400
commit3200f405a1e8e06c8634f11d33614455baa4e6be (patch)
tree806116d2495dd7fd93b5c0db98a72fe4fa854787 /arch/x86/kvm/x86.c
parent25c5f225beda4fbea878ed8b6203ab4ecc7de2d1 (diff)
KVM: MMU: unify slots_lock usage
Unify slots_lock acquision around vcpu_run(). This is simpler and less error-prone. Also fix some callsites that were not grabbing the lock properly. [avi: drop slots_lock while in guest mode to avoid holding the lock for indefinite periods] Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c53
1 files changed, 20 insertions, 33 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 32d910044f85..e6a38bf9a45e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -201,7 +201,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
201 int ret; 201 int ret;
202 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)]; 202 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
203 203
204 down_read(&vcpu->kvm->slots_lock);
205 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte, 204 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
206 offset * sizeof(u64), sizeof(pdpte)); 205 offset * sizeof(u64), sizeof(pdpte));
207 if (ret < 0) { 206 if (ret < 0) {
@@ -218,7 +217,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
218 217
219 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs)); 218 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
220out: 219out:
221 up_read(&vcpu->kvm->slots_lock);
222 220
223 return ret; 221 return ret;
224} 222}
@@ -233,13 +231,11 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
233 if (is_long_mode(vcpu) || !is_pae(vcpu)) 231 if (is_long_mode(vcpu) || !is_pae(vcpu))
234 return false; 232 return false;
235 233
236 down_read(&vcpu->kvm->slots_lock);
237 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte)); 234 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
238 if (r < 0) 235 if (r < 0)
239 goto out; 236 goto out;
240 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0; 237 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
241out: 238out:
242 up_read(&vcpu->kvm->slots_lock);
243 239
244 return changed; 240 return changed;
245} 241}
@@ -377,7 +373,6 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
377 */ 373 */
378 } 374 }
379 375
380 down_read(&vcpu->kvm->slots_lock);
381 /* 376 /*
382 * Does the new cr3 value map to physical memory? (Note, we 377 * Does the new cr3 value map to physical memory? (Note, we
383 * catch an invalid cr3 even in real-mode, because it would 378 * catch an invalid cr3 even in real-mode, because it would
@@ -393,7 +388,6 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
393 vcpu->arch.cr3 = cr3; 388 vcpu->arch.cr3 = cr3;
394 vcpu->arch.mmu.new_cr3(vcpu); 389 vcpu->arch.mmu.new_cr3(vcpu);
395 } 390 }
396 up_read(&vcpu->kvm->slots_lock);
397} 391}
398EXPORT_SYMBOL_GPL(kvm_set_cr3); 392EXPORT_SYMBOL_GPL(kvm_set_cr3);
399 393
@@ -503,7 +497,6 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
503 497
504 version++; 498 version++;
505 499
506 down_read(&kvm->slots_lock);
507 kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); 500 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
508 501
509 wc_ts = current_kernel_time(); 502 wc_ts = current_kernel_time();
@@ -515,7 +508,6 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
515 508
516 version++; 509 version++;
517 kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); 510 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
518 up_read(&kvm->slots_lock);
519} 511}
520 512
521static void kvm_write_guest_time(struct kvm_vcpu *v) 513static void kvm_write_guest_time(struct kvm_vcpu *v)
@@ -609,10 +601,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
609 vcpu->arch.hv_clock.tsc_shift = 22; 601 vcpu->arch.hv_clock.tsc_shift = 22;
610 602
611 down_read(&current->mm->mmap_sem); 603 down_read(&current->mm->mmap_sem);
612 down_read(&vcpu->kvm->slots_lock);
613 vcpu->arch.time_page = 604 vcpu->arch.time_page =
614 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); 605 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
615 up_read(&vcpu->kvm->slots_lock);
616 up_read(&current->mm->mmap_sem); 606 up_read(&current->mm->mmap_sem);
617 607
618 if (is_error_page(vcpu->arch.time_page)) { 608 if (is_error_page(vcpu->arch.time_page)) {
@@ -715,9 +705,11 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
715 705
716 vcpu_load(vcpu); 706 vcpu_load(vcpu);
717 707
708 down_read(&vcpu->kvm->slots_lock);
718 for (i = 0; i < msrs->nmsrs; ++i) 709 for (i = 0; i < msrs->nmsrs; ++i)
719 if (do_msr(vcpu, entries[i].index, &entries[i].data)) 710 if (do_msr(vcpu, entries[i].index, &entries[i].data))
720 break; 711 break;
712 up_read(&vcpu->kvm->slots_lock);
721 713
722 vcpu_put(vcpu); 714 vcpu_put(vcpu);
723 715
@@ -1768,7 +1760,6 @@ int emulator_read_std(unsigned long addr,
1768 void *data = val; 1760 void *data = val;
1769 int r = X86EMUL_CONTINUE; 1761 int r = X86EMUL_CONTINUE;
1770 1762
1771 down_read(&vcpu->kvm->slots_lock);
1772 while (bytes) { 1763 while (bytes) {
1773 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 1764 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1774 unsigned offset = addr & (PAGE_SIZE-1); 1765 unsigned offset = addr & (PAGE_SIZE-1);
@@ -1790,7 +1781,6 @@ int emulator_read_std(unsigned long addr,
1790 addr += tocopy; 1781 addr += tocopy;
1791 } 1782 }
1792out: 1783out:
1793 up_read(&vcpu->kvm->slots_lock);
1794 return r; 1784 return r;
1795} 1785}
1796EXPORT_SYMBOL_GPL(emulator_read_std); 1786EXPORT_SYMBOL_GPL(emulator_read_std);
@@ -1809,9 +1799,7 @@ static int emulator_read_emulated(unsigned long addr,
1809 return X86EMUL_CONTINUE; 1799 return X86EMUL_CONTINUE;
1810 } 1800 }
1811 1801
1812 down_read(&vcpu->kvm->slots_lock);
1813 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 1802 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1814 up_read(&vcpu->kvm->slots_lock);
1815 1803
1816 /* For APIC access vmexit */ 1804 /* For APIC access vmexit */
1817 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 1805 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
@@ -1844,7 +1832,7 @@ mmio:
1844 return X86EMUL_UNHANDLEABLE; 1832 return X86EMUL_UNHANDLEABLE;
1845} 1833}
1846 1834
1847int __emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 1835int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1848 const void *val, int bytes) 1836 const void *val, int bytes)
1849{ 1837{
1850 int ret; 1838 int ret;
@@ -1856,17 +1844,6 @@ int __emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1856 return 1; 1844 return 1;
1857} 1845}
1858 1846
1859static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1860 const void *val, int bytes)
1861{
1862 int ret;
1863
1864 down_read(&vcpu->kvm->slots_lock);
1865 ret =__emulator_write_phys(vcpu, gpa, val, bytes);
1866 up_read(&vcpu->kvm->slots_lock);
1867 return ret;
1868}
1869
1870static int emulator_write_emulated_onepage(unsigned long addr, 1847static int emulator_write_emulated_onepage(unsigned long addr,
1871 const void *val, 1848 const void *val,
1872 unsigned int bytes, 1849 unsigned int bytes,
@@ -1875,9 +1852,7 @@ static int emulator_write_emulated_onepage(unsigned long addr,
1875 struct kvm_io_device *mmio_dev; 1852 struct kvm_io_device *mmio_dev;
1876 gpa_t gpa; 1853 gpa_t gpa;
1877 1854
1878 down_read(&vcpu->kvm->slots_lock);
1879 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 1855 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1880 up_read(&vcpu->kvm->slots_lock);
1881 1856
1882 if (gpa == UNMAPPED_GVA) { 1857 if (gpa == UNMAPPED_GVA) {
1883 kvm_inject_page_fault(vcpu, addr, 2); 1858 kvm_inject_page_fault(vcpu, addr, 2);
@@ -1954,7 +1929,6 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
1954 char *kaddr; 1929 char *kaddr;
1955 u64 val; 1930 u64 val;
1956 1931
1957 down_read(&vcpu->kvm->slots_lock);
1958 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 1932 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
1959 1933
1960 if (gpa == UNMAPPED_GVA || 1934 if (gpa == UNMAPPED_GVA ||
@@ -1974,9 +1948,8 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
1974 set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val); 1948 set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
1975 kunmap_atomic(kaddr, KM_USER0); 1949 kunmap_atomic(kaddr, KM_USER0);
1976 kvm_release_page_dirty(page); 1950 kvm_release_page_dirty(page);
1977 emul_write:
1978 up_read(&vcpu->kvm->slots_lock);
1979 } 1951 }
1952emul_write:
1980#endif 1953#endif
1981 1954
1982 return emulator_write_emulated(addr, new, bytes, vcpu); 1955 return emulator_write_emulated(addr, new, bytes, vcpu);
@@ -2368,10 +2341,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2368 kvm_x86_ops->skip_emulated_instruction(vcpu); 2341 kvm_x86_ops->skip_emulated_instruction(vcpu);
2369 2342
2370 for (i = 0; i < nr_pages; ++i) { 2343 for (i = 0; i < nr_pages; ++i) {
2371 down_read(&vcpu->kvm->slots_lock);
2372 page = gva_to_page(vcpu, address + i * PAGE_SIZE); 2344 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
2373 vcpu->arch.pio.guest_pages[i] = page; 2345 vcpu->arch.pio.guest_pages[i] = page;
2374 up_read(&vcpu->kvm->slots_lock);
2375 if (!page) { 2346 if (!page) {
2376 kvm_inject_gp(vcpu, 0); 2347 kvm_inject_gp(vcpu, 0);
2377 free_pio_guest_pages(vcpu); 2348 free_pio_guest_pages(vcpu);
@@ -2445,7 +2416,9 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
2445 ++vcpu->stat.halt_exits; 2416 ++vcpu->stat.halt_exits;
2446 if (irqchip_in_kernel(vcpu->kvm)) { 2417 if (irqchip_in_kernel(vcpu->kvm)) {
2447 vcpu->arch.mp_state = VCPU_MP_STATE_HALTED; 2418 vcpu->arch.mp_state = VCPU_MP_STATE_HALTED;
2419 up_read(&vcpu->kvm->slots_lock);
2448 kvm_vcpu_block(vcpu); 2420 kvm_vcpu_block(vcpu);
2421 down_read(&vcpu->kvm->slots_lock);
2449 if (vcpu->arch.mp_state != VCPU_MP_STATE_RUNNABLE) 2422 if (vcpu->arch.mp_state != VCPU_MP_STATE_RUNNABLE)
2450 return -EINTR; 2423 return -EINTR;
2451 return 1; 2424 return 1;
@@ -2738,6 +2711,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2738 vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE; 2711 vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
2739 } 2712 }
2740 2713
2714 down_read(&vcpu->kvm->slots_lock);
2741 vapic_enter(vcpu); 2715 vapic_enter(vcpu);
2742 2716
2743preempted: 2717preempted:
@@ -2811,6 +2785,8 @@ again:
2811 2785
2812 kvm_lapic_sync_to_vapic(vcpu); 2786 kvm_lapic_sync_to_vapic(vcpu);
2813 2787
2788 up_read(&vcpu->kvm->slots_lock);
2789
2814 vcpu->guest_mode = 1; 2790 vcpu->guest_mode = 1;
2815 kvm_guest_enter(); 2791 kvm_guest_enter();
2816 2792
@@ -2837,6 +2813,8 @@ again:
2837 2813
2838 preempt_enable(); 2814 preempt_enable();
2839 2815
2816 down_read(&vcpu->kvm->slots_lock);
2817
2840 /* 2818 /*
2841 * Profile KVM exit RIPs: 2819 * Profile KVM exit RIPs:
2842 */ 2820 */
@@ -2864,14 +2842,18 @@ again:
2864 } 2842 }
2865 2843
2866out: 2844out:
2845 up_read(&vcpu->kvm->slots_lock);
2867 if (r > 0) { 2846 if (r > 0) {
2868 kvm_resched(vcpu); 2847 kvm_resched(vcpu);
2848 down_read(&vcpu->kvm->slots_lock);
2869 goto preempted; 2849 goto preempted;
2870 } 2850 }
2871 2851
2872 post_kvm_run_save(vcpu, kvm_run); 2852 post_kvm_run_save(vcpu, kvm_run);
2873 2853
2854 down_read(&vcpu->kvm->slots_lock);
2874 vapic_exit(vcpu); 2855 vapic_exit(vcpu);
2856 up_read(&vcpu->kvm->slots_lock);
2875 2857
2876 return r; 2858 return r;
2877} 2859}
@@ -2906,9 +2888,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2906 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); 2888 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
2907 vcpu->mmio_read_completed = 1; 2889 vcpu->mmio_read_completed = 1;
2908 vcpu->mmio_needed = 0; 2890 vcpu->mmio_needed = 0;
2891
2892 down_read(&vcpu->kvm->slots_lock);
2909 r = emulate_instruction(vcpu, kvm_run, 2893 r = emulate_instruction(vcpu, kvm_run,
2910 vcpu->arch.mmio_fault_cr2, 0, 2894 vcpu->arch.mmio_fault_cr2, 0,
2911 EMULTYPE_NO_DECODE); 2895 EMULTYPE_NO_DECODE);
2896 up_read(&vcpu->kvm->slots_lock);
2912 if (r == EMULATE_DO_MMIO) { 2897 if (r == EMULATE_DO_MMIO) {
2913 /* 2898 /*
2914 * Read-modify-write. Back to userspace. 2899 * Read-modify-write. Back to userspace.
@@ -3817,7 +3802,9 @@ fail:
3817void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 3802void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
3818{ 3803{
3819 kvm_free_lapic(vcpu); 3804 kvm_free_lapic(vcpu);
3805 down_read(&vcpu->kvm->slots_lock);
3820 kvm_mmu_destroy(vcpu); 3806 kvm_mmu_destroy(vcpu);
3807 up_read(&vcpu->kvm->slots_lock);
3821 free_page((unsigned long)vcpu->arch.pio_data); 3808 free_page((unsigned long)vcpu->arch.pio_data);
3822} 3809}
3823 3810