diff options
| -rw-r--r-- | arch/x86/Kconfig | 2 | ||||
| -rw-r--r-- | arch/x86/kvm/lapic.c | 4 | ||||
| -rw-r--r-- | arch/x86/kvm/mmu.c | 38 | ||||
| -rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 20 | ||||
| -rw-r--r-- | arch/x86/kvm/svm.c | 26 | ||||
| -rw-r--r-- | arch/x86/kvm/vmx.c | 14 | ||||
| -rw-r--r-- | arch/x86/kvm/x86.c | 114 | ||||
| -rw-r--r-- | include/linux/kvm.h | 4 | ||||
| -rw-r--r-- | include/linux/kvm_host.h | 1 | ||||
| -rw-r--r-- | virt/kvm/ioapic.c | 8 | ||||
| -rw-r--r-- | virt/kvm/kvm_main.c | 5 |
11 files changed, 156 insertions, 80 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 4a88cf7695b4..53800b80a204 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -21,7 +21,7 @@ config X86 | |||
| 21 | select HAVE_IDE | 21 | select HAVE_IDE |
| 22 | select HAVE_OPROFILE | 22 | select HAVE_OPROFILE |
| 23 | select HAVE_KPROBES | 23 | select HAVE_KPROBES |
| 24 | select HAVE_KVM | 24 | select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) |
| 25 | 25 | ||
| 26 | 26 | ||
| 27 | config GENERIC_LOCKBREAK | 27 | config GENERIC_LOCKBREAK |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 2cbee9479ce4..68a6b1511934 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
| @@ -647,6 +647,10 @@ static void start_apic_timer(struct kvm_lapic *apic) | |||
| 647 | apic->timer.period = apic_get_reg(apic, APIC_TMICT) * | 647 | apic->timer.period = apic_get_reg(apic, APIC_TMICT) * |
| 648 | APIC_BUS_CYCLE_NS * apic->timer.divide_count; | 648 | APIC_BUS_CYCLE_NS * apic->timer.divide_count; |
| 649 | atomic_set(&apic->timer.pending, 0); | 649 | atomic_set(&apic->timer.pending, 0); |
| 650 | |||
| 651 | if (!apic->timer.period) | ||
| 652 | return; | ||
| 653 | |||
| 650 | hrtimer_start(&apic->timer.dev, | 654 | hrtimer_start(&apic->timer.dev, |
| 651 | ktime_add_ns(now, apic->timer.period), | 655 | ktime_add_ns(now, apic->timer.period), |
| 652 | HRTIMER_MODE_ABS); | 656 | HRTIMER_MODE_ABS); |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 8efdcdbebb03..d8172aabc660 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
| @@ -681,8 +681,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
| 681 | unsigned level, | 681 | unsigned level, |
| 682 | int metaphysical, | 682 | int metaphysical, |
| 683 | unsigned access, | 683 | unsigned access, |
| 684 | u64 *parent_pte, | 684 | u64 *parent_pte) |
| 685 | bool *new_page) | ||
| 686 | { | 685 | { |
| 687 | union kvm_mmu_page_role role; | 686 | union kvm_mmu_page_role role; |
| 688 | unsigned index; | 687 | unsigned index; |
| @@ -722,8 +721,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
| 722 | vcpu->arch.mmu.prefetch_page(vcpu, sp); | 721 | vcpu->arch.mmu.prefetch_page(vcpu, sp); |
| 723 | if (!metaphysical) | 722 | if (!metaphysical) |
| 724 | rmap_write_protect(vcpu->kvm, gfn); | 723 | rmap_write_protect(vcpu->kvm, gfn); |
| 725 | if (new_page) | ||
| 726 | *new_page = 1; | ||
| 727 | return sp; | 724 | return sp; |
| 728 | } | 725 | } |
| 729 | 726 | ||
| @@ -876,11 +873,18 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn) | |||
| 876 | 873 | ||
| 877 | struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) | 874 | struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) |
| 878 | { | 875 | { |
| 876 | struct page *page; | ||
| 877 | |||
| 879 | gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); | 878 | gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); |
| 880 | 879 | ||
| 881 | if (gpa == UNMAPPED_GVA) | 880 | if (gpa == UNMAPPED_GVA) |
| 882 | return NULL; | 881 | return NULL; |
| 883 | return gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); | 882 | |
| 883 | down_read(¤t->mm->mmap_sem); | ||
| 884 | page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); | ||
| 885 | up_read(¤t->mm->mmap_sem); | ||
| 886 | |||
| 887 | return page; | ||
| 884 | } | 888 | } |
| 885 | 889 | ||
| 886 | static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | 890 | static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, |
| @@ -999,8 +1003,7 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, | |||
| 999 | >> PAGE_SHIFT; | 1003 | >> PAGE_SHIFT; |
| 1000 | new_table = kvm_mmu_get_page(vcpu, pseudo_gfn, | 1004 | new_table = kvm_mmu_get_page(vcpu, pseudo_gfn, |
| 1001 | v, level - 1, | 1005 | v, level - 1, |
| 1002 | 1, ACC_ALL, &table[index], | 1006 | 1, ACC_ALL, &table[index]); |
| 1003 | NULL); | ||
| 1004 | if (!new_table) { | 1007 | if (!new_table) { |
| 1005 | pgprintk("nonpaging_map: ENOMEM\n"); | 1008 | pgprintk("nonpaging_map: ENOMEM\n"); |
| 1006 | kvm_release_page_clean(page); | 1009 | kvm_release_page_clean(page); |
| @@ -1020,15 +1023,18 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) | |||
| 1020 | 1023 | ||
| 1021 | struct page *page; | 1024 | struct page *page; |
| 1022 | 1025 | ||
| 1026 | down_read(&vcpu->kvm->slots_lock); | ||
| 1027 | |||
| 1023 | down_read(¤t->mm->mmap_sem); | 1028 | down_read(¤t->mm->mmap_sem); |
| 1024 | page = gfn_to_page(vcpu->kvm, gfn); | 1029 | page = gfn_to_page(vcpu->kvm, gfn); |
| 1030 | up_read(¤t->mm->mmap_sem); | ||
| 1025 | 1031 | ||
| 1026 | spin_lock(&vcpu->kvm->mmu_lock); | 1032 | spin_lock(&vcpu->kvm->mmu_lock); |
| 1027 | kvm_mmu_free_some_pages(vcpu); | 1033 | kvm_mmu_free_some_pages(vcpu); |
| 1028 | r = __nonpaging_map(vcpu, v, write, gfn, page); | 1034 | r = __nonpaging_map(vcpu, v, write, gfn, page); |
| 1029 | spin_unlock(&vcpu->kvm->mmu_lock); | 1035 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 1030 | 1036 | ||
| 1031 | up_read(¤t->mm->mmap_sem); | 1037 | up_read(&vcpu->kvm->slots_lock); |
| 1032 | 1038 | ||
| 1033 | return r; | 1039 | return r; |
| 1034 | } | 1040 | } |
| @@ -1090,7 +1096,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
| 1090 | 1096 | ||
| 1091 | ASSERT(!VALID_PAGE(root)); | 1097 | ASSERT(!VALID_PAGE(root)); |
| 1092 | sp = kvm_mmu_get_page(vcpu, root_gfn, 0, | 1098 | sp = kvm_mmu_get_page(vcpu, root_gfn, 0, |
| 1093 | PT64_ROOT_LEVEL, 0, ACC_ALL, NULL, NULL); | 1099 | PT64_ROOT_LEVEL, 0, ACC_ALL, NULL); |
| 1094 | root = __pa(sp->spt); | 1100 | root = __pa(sp->spt); |
| 1095 | ++sp->root_count; | 1101 | ++sp->root_count; |
| 1096 | vcpu->arch.mmu.root_hpa = root; | 1102 | vcpu->arch.mmu.root_hpa = root; |
| @@ -1111,7 +1117,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
| 1111 | root_gfn = 0; | 1117 | root_gfn = 0; |
| 1112 | sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, | 1118 | sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, |
| 1113 | PT32_ROOT_LEVEL, !is_paging(vcpu), | 1119 | PT32_ROOT_LEVEL, !is_paging(vcpu), |
| 1114 | ACC_ALL, NULL, NULL); | 1120 | ACC_ALL, NULL); |
| 1115 | root = __pa(sp->spt); | 1121 | root = __pa(sp->spt); |
| 1116 | ++sp->root_count; | 1122 | ++sp->root_count; |
| 1117 | vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; | 1123 | vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; |
| @@ -1172,7 +1178,7 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu) | |||
| 1172 | 1178 | ||
| 1173 | static void paging_new_cr3(struct kvm_vcpu *vcpu) | 1179 | static void paging_new_cr3(struct kvm_vcpu *vcpu) |
| 1174 | { | 1180 | { |
| 1175 | pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3); | 1181 | pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->arch.cr3); |
| 1176 | mmu_free_roots(vcpu); | 1182 | mmu_free_roots(vcpu); |
| 1177 | } | 1183 | } |
| 1178 | 1184 | ||
| @@ -1362,6 +1368,7 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
| 1362 | gfn_t gfn; | 1368 | gfn_t gfn; |
| 1363 | int r; | 1369 | int r; |
| 1364 | u64 gpte = 0; | 1370 | u64 gpte = 0; |
| 1371 | struct page *page; | ||
| 1365 | 1372 | ||
| 1366 | if (bytes != 4 && bytes != 8) | 1373 | if (bytes != 4 && bytes != 8) |
| 1367 | return; | 1374 | return; |
| @@ -1389,6 +1396,11 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
| 1389 | if (!is_present_pte(gpte)) | 1396 | if (!is_present_pte(gpte)) |
| 1390 | return; | 1397 | return; |
| 1391 | gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; | 1398 | gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; |
| 1399 | |||
| 1400 | down_read(¤t->mm->mmap_sem); | ||
| 1401 | page = gfn_to_page(vcpu->kvm, gfn); | ||
| 1402 | up_read(¤t->mm->mmap_sem); | ||
| 1403 | |||
| 1392 | vcpu->arch.update_pte.gfn = gfn; | 1404 | vcpu->arch.update_pte.gfn = gfn; |
| 1393 | vcpu->arch.update_pte.page = gfn_to_page(vcpu->kvm, gfn); | 1405 | vcpu->arch.update_pte.page = gfn_to_page(vcpu->kvm, gfn); |
| 1394 | } | 1406 | } |
| @@ -1496,9 +1508,9 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) | |||
| 1496 | gpa_t gpa; | 1508 | gpa_t gpa; |
| 1497 | int r; | 1509 | int r; |
| 1498 | 1510 | ||
| 1499 | down_read(¤t->mm->mmap_sem); | 1511 | down_read(&vcpu->kvm->slots_lock); |
| 1500 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); | 1512 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); |
| 1501 | up_read(¤t->mm->mmap_sem); | 1513 | up_read(&vcpu->kvm->slots_lock); |
| 1502 | 1514 | ||
| 1503 | spin_lock(&vcpu->kvm->mmu_lock); | 1515 | spin_lock(&vcpu->kvm->mmu_lock); |
| 1504 | r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); | 1516 | r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 03ba8608fe0f..ecc0856268c4 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
| @@ -91,7 +91,10 @@ static bool FNAME(cmpxchg_gpte)(struct kvm *kvm, | |||
| 91 | pt_element_t *table; | 91 | pt_element_t *table; |
| 92 | struct page *page; | 92 | struct page *page; |
| 93 | 93 | ||
| 94 | down_read(¤t->mm->mmap_sem); | ||
| 94 | page = gfn_to_page(kvm, table_gfn); | 95 | page = gfn_to_page(kvm, table_gfn); |
| 96 | up_read(¤t->mm->mmap_sem); | ||
| 97 | |||
| 95 | table = kmap_atomic(page, KM_USER0); | 98 | table = kmap_atomic(page, KM_USER0); |
| 96 | 99 | ||
| 97 | ret = CMPXCHG(&table[index], orig_pte, new_pte); | 100 | ret = CMPXCHG(&table[index], orig_pte, new_pte); |
| @@ -140,7 +143,7 @@ walk: | |||
| 140 | } | 143 | } |
| 141 | #endif | 144 | #endif |
| 142 | ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || | 145 | ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || |
| 143 | (vcpu->cr3 & CR3_NONPAE_RESERVED_BITS) == 0); | 146 | (vcpu->arch.cr3 & CR3_NONPAE_RESERVED_BITS) == 0); |
| 144 | 147 | ||
| 145 | pt_access = ACC_ALL; | 148 | pt_access = ACC_ALL; |
| 146 | 149 | ||
| @@ -297,7 +300,6 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
| 297 | u64 shadow_pte; | 300 | u64 shadow_pte; |
| 298 | int metaphysical; | 301 | int metaphysical; |
| 299 | gfn_t table_gfn; | 302 | gfn_t table_gfn; |
| 300 | bool new_page = 0; | ||
| 301 | 303 | ||
| 302 | shadow_ent = ((u64 *)__va(shadow_addr)) + index; | 304 | shadow_ent = ((u64 *)__va(shadow_addr)) + index; |
| 303 | if (level == PT_PAGE_TABLE_LEVEL) | 305 | if (level == PT_PAGE_TABLE_LEVEL) |
| @@ -319,8 +321,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
| 319 | } | 321 | } |
| 320 | shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, | 322 | shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1, |
| 321 | metaphysical, access, | 323 | metaphysical, access, |
| 322 | shadow_ent, &new_page); | 324 | shadow_ent); |
| 323 | if (new_page && !metaphysical) { | 325 | if (!metaphysical) { |
| 324 | int r; | 326 | int r; |
| 325 | pt_element_t curr_pte; | 327 | pt_element_t curr_pte; |
| 326 | r = kvm_read_guest_atomic(vcpu->kvm, | 328 | r = kvm_read_guest_atomic(vcpu->kvm, |
| @@ -378,7 +380,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
| 378 | if (r) | 380 | if (r) |
| 379 | return r; | 381 | return r; |
| 380 | 382 | ||
| 381 | down_read(¤t->mm->mmap_sem); | 383 | down_read(&vcpu->kvm->slots_lock); |
| 382 | /* | 384 | /* |
| 383 | * Look up the shadow pte for the faulting address. | 385 | * Look up the shadow pte for the faulting address. |
| 384 | */ | 386 | */ |
| @@ -392,11 +394,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
| 392 | pgprintk("%s: guest page fault\n", __FUNCTION__); | 394 | pgprintk("%s: guest page fault\n", __FUNCTION__); |
| 393 | inject_page_fault(vcpu, addr, walker.error_code); | 395 | inject_page_fault(vcpu, addr, walker.error_code); |
| 394 | vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ | 396 | vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ |
| 395 | up_read(¤t->mm->mmap_sem); | 397 | up_read(&vcpu->kvm->slots_lock); |
| 396 | return 0; | 398 | return 0; |
| 397 | } | 399 | } |
| 398 | 400 | ||
| 401 | down_read(¤t->mm->mmap_sem); | ||
| 399 | page = gfn_to_page(vcpu->kvm, walker.gfn); | 402 | page = gfn_to_page(vcpu->kvm, walker.gfn); |
| 403 | up_read(¤t->mm->mmap_sem); | ||
| 400 | 404 | ||
| 401 | spin_lock(&vcpu->kvm->mmu_lock); | 405 | spin_lock(&vcpu->kvm->mmu_lock); |
| 402 | kvm_mmu_free_some_pages(vcpu); | 406 | kvm_mmu_free_some_pages(vcpu); |
| @@ -413,14 +417,14 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
| 413 | */ | 417 | */ |
| 414 | if (shadow_pte && is_io_pte(*shadow_pte)) { | 418 | if (shadow_pte && is_io_pte(*shadow_pte)) { |
| 415 | spin_unlock(&vcpu->kvm->mmu_lock); | 419 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 416 | up_read(¤t->mm->mmap_sem); | 420 | up_read(&vcpu->kvm->slots_lock); |
| 417 | return 1; | 421 | return 1; |
| 418 | } | 422 | } |
| 419 | 423 | ||
| 420 | ++vcpu->stat.pf_fixed; | 424 | ++vcpu->stat.pf_fixed; |
| 421 | kvm_mmu_audit(vcpu, "post page fault (fixed)"); | 425 | kvm_mmu_audit(vcpu, "post page fault (fixed)"); |
| 422 | spin_unlock(&vcpu->kvm->mmu_lock); | 426 | spin_unlock(&vcpu->kvm->mmu_lock); |
| 423 | up_read(¤t->mm->mmap_sem); | 427 | up_read(&vcpu->kvm->slots_lock); |
| 424 | 428 | ||
| 425 | return write_pt; | 429 | return write_pt; |
| 426 | } | 430 | } |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index de755cb1431d..1a582f1090e8 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -792,6 +792,10 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
| 792 | vcpu->arch.cr0 = cr0; | 792 | vcpu->arch.cr0 = cr0; |
| 793 | cr0 |= X86_CR0_PG | X86_CR0_WP; | 793 | cr0 |= X86_CR0_PG | X86_CR0_WP; |
| 794 | cr0 &= ~(X86_CR0_CD | X86_CR0_NW); | 794 | cr0 &= ~(X86_CR0_CD | X86_CR0_NW); |
| 795 | if (!vcpu->fpu_active) { | ||
| 796 | svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR); | ||
| 797 | cr0 |= X86_CR0_TS; | ||
| 798 | } | ||
| 795 | svm->vmcb->save.cr0 = cr0; | 799 | svm->vmcb->save.cr0 = cr0; |
| 796 | } | 800 | } |
| 797 | 801 | ||
| @@ -1096,6 +1100,24 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) | |||
| 1096 | case MSR_IA32_SYSENTER_ESP: | 1100 | case MSR_IA32_SYSENTER_ESP: |
| 1097 | *data = svm->vmcb->save.sysenter_esp; | 1101 | *data = svm->vmcb->save.sysenter_esp; |
| 1098 | break; | 1102 | break; |
| 1103 | /* Nobody will change the following 5 values in the VMCB so | ||
| 1104 | we can safely return them on rdmsr. They will always be 0 | ||
| 1105 | until LBRV is implemented. */ | ||
| 1106 | case MSR_IA32_DEBUGCTLMSR: | ||
| 1107 | *data = svm->vmcb->save.dbgctl; | ||
| 1108 | break; | ||
| 1109 | case MSR_IA32_LASTBRANCHFROMIP: | ||
| 1110 | *data = svm->vmcb->save.br_from; | ||
| 1111 | break; | ||
| 1112 | case MSR_IA32_LASTBRANCHTOIP: | ||
| 1113 | *data = svm->vmcb->save.br_to; | ||
| 1114 | break; | ||
| 1115 | case MSR_IA32_LASTINTFROMIP: | ||
| 1116 | *data = svm->vmcb->save.last_excp_from; | ||
| 1117 | break; | ||
| 1118 | case MSR_IA32_LASTINTTOIP: | ||
| 1119 | *data = svm->vmcb->save.last_excp_to; | ||
| 1120 | break; | ||
| 1099 | default: | 1121 | default: |
| 1100 | return kvm_get_msr_common(vcpu, ecx, data); | 1122 | return kvm_get_msr_common(vcpu, ecx, data); |
| 1101 | } | 1123 | } |
| @@ -1156,6 +1178,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) | |||
| 1156 | case MSR_IA32_SYSENTER_ESP: | 1178 | case MSR_IA32_SYSENTER_ESP: |
| 1157 | svm->vmcb->save.sysenter_esp = data; | 1179 | svm->vmcb->save.sysenter_esp = data; |
| 1158 | break; | 1180 | break; |
| 1181 | case MSR_IA32_DEBUGCTLMSR: | ||
| 1182 | pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", | ||
| 1183 | __FUNCTION__, data); | ||
| 1184 | break; | ||
| 1159 | case MSR_K7_EVNTSEL0: | 1185 | case MSR_K7_EVNTSEL0: |
| 1160 | case MSR_K7_EVNTSEL1: | 1186 | case MSR_K7_EVNTSEL1: |
| 1161 | case MSR_K7_EVNTSEL2: | 1187 | case MSR_K7_EVNTSEL2: |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index ad36447e696e..94ea724638fd 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -638,6 +638,7 @@ static void setup_msrs(struct vcpu_vmx *vmx) | |||
| 638 | { | 638 | { |
| 639 | int save_nmsrs; | 639 | int save_nmsrs; |
| 640 | 640 | ||
| 641 | vmx_load_host_state(vmx); | ||
| 641 | save_nmsrs = 0; | 642 | save_nmsrs = 0; |
| 642 | #ifdef CONFIG_X86_64 | 643 | #ifdef CONFIG_X86_64 |
| 643 | if (is_long_mode(&vmx->vcpu)) { | 644 | if (is_long_mode(&vmx->vcpu)) { |
| @@ -1477,7 +1478,7 @@ static int alloc_apic_access_page(struct kvm *kvm) | |||
| 1477 | struct kvm_userspace_memory_region kvm_userspace_mem; | 1478 | struct kvm_userspace_memory_region kvm_userspace_mem; |
| 1478 | int r = 0; | 1479 | int r = 0; |
| 1479 | 1480 | ||
| 1480 | down_write(¤t->mm->mmap_sem); | 1481 | down_write(&kvm->slots_lock); |
| 1481 | if (kvm->arch.apic_access_page) | 1482 | if (kvm->arch.apic_access_page) |
| 1482 | goto out; | 1483 | goto out; |
| 1483 | kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; | 1484 | kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT; |
| @@ -1487,9 +1488,12 @@ static int alloc_apic_access_page(struct kvm *kvm) | |||
| 1487 | r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0); | 1488 | r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0); |
| 1488 | if (r) | 1489 | if (r) |
| 1489 | goto out; | 1490 | goto out; |
| 1491 | |||
| 1492 | down_read(¤t->mm->mmap_sem); | ||
| 1490 | kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00); | 1493 | kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00); |
| 1494 | up_read(¤t->mm->mmap_sem); | ||
| 1491 | out: | 1495 | out: |
| 1492 | up_write(¤t->mm->mmap_sem); | 1496 | up_write(&kvm->slots_lock); |
| 1493 | return r; | 1497 | return r; |
| 1494 | } | 1498 | } |
| 1495 | 1499 | ||
| @@ -1602,9 +1606,6 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
| 1602 | vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); | 1606 | vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL); |
| 1603 | vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); | 1607 | vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK); |
| 1604 | 1608 | ||
| 1605 | if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) | ||
| 1606 | if (alloc_apic_access_page(vmx->vcpu.kvm) != 0) | ||
| 1607 | return -ENOMEM; | ||
| 1608 | 1609 | ||
| 1609 | return 0; | 1610 | return 0; |
| 1610 | } | 1611 | } |
| @@ -2534,6 +2535,9 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) | |||
| 2534 | put_cpu(); | 2535 | put_cpu(); |
| 2535 | if (err) | 2536 | if (err) |
| 2536 | goto free_vmcs; | 2537 | goto free_vmcs; |
| 2538 | if (vm_need_virtualize_apic_accesses(kvm)) | ||
| 2539 | if (alloc_apic_access_page(kvm) != 0) | ||
| 2540 | goto free_vmcs; | ||
| 2537 | 2541 | ||
| 2538 | return &vmx->vcpu; | 2542 | return &vmx->vcpu; |
| 2539 | 2543 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index cf5308148689..6b01552bd1f1 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -46,6 +46,9 @@ | |||
| 46 | #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM | 46 | #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM |
| 47 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | 47 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU |
| 48 | 48 | ||
| 49 | static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, | ||
| 50 | struct kvm_cpuid_entry2 __user *entries); | ||
| 51 | |||
| 49 | struct kvm_x86_ops *kvm_x86_ops; | 52 | struct kvm_x86_ops *kvm_x86_ops; |
| 50 | 53 | ||
| 51 | struct kvm_stats_debugfs_item debugfs_entries[] = { | 54 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
| @@ -181,7 +184,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
| 181 | int ret; | 184 | int ret; |
| 182 | u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)]; | 185 | u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)]; |
| 183 | 186 | ||
| 184 | down_read(¤t->mm->mmap_sem); | 187 | down_read(&vcpu->kvm->slots_lock); |
| 185 | ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte, | 188 | ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte, |
| 186 | offset * sizeof(u64), sizeof(pdpte)); | 189 | offset * sizeof(u64), sizeof(pdpte)); |
| 187 | if (ret < 0) { | 190 | if (ret < 0) { |
| @@ -198,7 +201,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
| 198 | 201 | ||
| 199 | memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs)); | 202 | memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs)); |
| 200 | out: | 203 | out: |
| 201 | up_read(¤t->mm->mmap_sem); | 204 | up_read(&vcpu->kvm->slots_lock); |
| 202 | 205 | ||
| 203 | return ret; | 206 | return ret; |
| 204 | } | 207 | } |
| @@ -212,13 +215,13 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu) | |||
| 212 | if (is_long_mode(vcpu) || !is_pae(vcpu)) | 215 | if (is_long_mode(vcpu) || !is_pae(vcpu)) |
| 213 | return false; | 216 | return false; |
| 214 | 217 | ||
| 215 | down_read(¤t->mm->mmap_sem); | 218 | down_read(&vcpu->kvm->slots_lock); |
| 216 | r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte)); | 219 | r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte)); |
| 217 | if (r < 0) | 220 | if (r < 0) |
| 218 | goto out; | 221 | goto out; |
| 219 | changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0; | 222 | changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0; |
| 220 | out: | 223 | out: |
| 221 | up_read(¤t->mm->mmap_sem); | 224 | up_read(&vcpu->kvm->slots_lock); |
| 222 | 225 | ||
| 223 | return changed; | 226 | return changed; |
| 224 | } | 227 | } |
| @@ -356,7 +359,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
| 356 | */ | 359 | */ |
| 357 | } | 360 | } |
| 358 | 361 | ||
| 359 | down_read(¤t->mm->mmap_sem); | 362 | down_read(&vcpu->kvm->slots_lock); |
| 360 | /* | 363 | /* |
| 361 | * Does the new cr3 value map to physical memory? (Note, we | 364 | * Does the new cr3 value map to physical memory? (Note, we |
| 362 | * catch an invalid cr3 even in real-mode, because it would | 365 | * catch an invalid cr3 even in real-mode, because it would |
| @@ -372,7 +375,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
| 372 | vcpu->arch.cr3 = cr3; | 375 | vcpu->arch.cr3 = cr3; |
| 373 | vcpu->arch.mmu.new_cr3(vcpu); | 376 | vcpu->arch.mmu.new_cr3(vcpu); |
| 374 | } | 377 | } |
| 375 | up_read(¤t->mm->mmap_sem); | 378 | up_read(&vcpu->kvm->slots_lock); |
| 376 | } | 379 | } |
| 377 | EXPORT_SYMBOL_GPL(set_cr3); | 380 | EXPORT_SYMBOL_GPL(set_cr3); |
| 378 | 381 | ||
| @@ -484,6 +487,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) | |||
| 484 | pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n", | 487 | pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n", |
| 485 | __FUNCTION__, data); | 488 | __FUNCTION__, data); |
| 486 | break; | 489 | break; |
| 490 | case MSR_IA32_MCG_CTL: | ||
| 491 | pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n", | ||
| 492 | __FUNCTION__, data); | ||
| 493 | break; | ||
| 487 | case MSR_IA32_UCODE_REV: | 494 | case MSR_IA32_UCODE_REV: |
| 488 | case MSR_IA32_UCODE_WRITE: | 495 | case MSR_IA32_UCODE_WRITE: |
| 489 | case 0x200 ... 0x2ff: /* MTRRs */ | 496 | case 0x200 ... 0x2ff: /* MTRRs */ |
| @@ -526,6 +533,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | |||
| 526 | case MSR_IA32_MC0_CTL: | 533 | case MSR_IA32_MC0_CTL: |
| 527 | case MSR_IA32_MCG_STATUS: | 534 | case MSR_IA32_MCG_STATUS: |
| 528 | case MSR_IA32_MCG_CAP: | 535 | case MSR_IA32_MCG_CAP: |
| 536 | case MSR_IA32_MCG_CTL: | ||
| 529 | case MSR_IA32_MC0_MISC: | 537 | case MSR_IA32_MC0_MISC: |
| 530 | case MSR_IA32_MC0_MISC+4: | 538 | case MSR_IA32_MC0_MISC+4: |
| 531 | case MSR_IA32_MC0_MISC+8: | 539 | case MSR_IA32_MC0_MISC+8: |
| @@ -727,6 +735,24 @@ long kvm_arch_dev_ioctl(struct file *filp, | |||
| 727 | r = 0; | 735 | r = 0; |
| 728 | break; | 736 | break; |
| 729 | } | 737 | } |
| 738 | case KVM_GET_SUPPORTED_CPUID: { | ||
| 739 | struct kvm_cpuid2 __user *cpuid_arg = argp; | ||
| 740 | struct kvm_cpuid2 cpuid; | ||
| 741 | |||
| 742 | r = -EFAULT; | ||
| 743 | if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) | ||
| 744 | goto out; | ||
| 745 | r = kvm_dev_ioctl_get_supported_cpuid(&cpuid, | ||
| 746 | cpuid_arg->entries); | ||
| 747 | if (r) | ||
| 748 | goto out; | ||
| 749 | |||
| 750 | r = -EFAULT; | ||
| 751 | if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) | ||
| 752 | goto out; | ||
| 753 | r = 0; | ||
| 754 | break; | ||
| 755 | } | ||
| 730 | default: | 756 | default: |
| 731 | r = -EINVAL; | 757 | r = -EINVAL; |
| 732 | } | 758 | } |
| @@ -974,8 +1000,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |||
| 974 | put_cpu(); | 1000 | put_cpu(); |
| 975 | } | 1001 | } |
| 976 | 1002 | ||
| 977 | static int kvm_vm_ioctl_get_supported_cpuid(struct kvm *kvm, | 1003 | static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, |
| 978 | struct kvm_cpuid2 *cpuid, | ||
| 979 | struct kvm_cpuid_entry2 __user *entries) | 1004 | struct kvm_cpuid_entry2 __user *entries) |
| 980 | { | 1005 | { |
| 981 | struct kvm_cpuid_entry2 *cpuid_entries; | 1006 | struct kvm_cpuid_entry2 *cpuid_entries; |
| @@ -1207,12 +1232,12 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, | |||
| 1207 | if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) | 1232 | if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) |
| 1208 | return -EINVAL; | 1233 | return -EINVAL; |
| 1209 | 1234 | ||
| 1210 | down_write(¤t->mm->mmap_sem); | 1235 | down_write(&kvm->slots_lock); |
| 1211 | 1236 | ||
| 1212 | kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); | 1237 | kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); |
| 1213 | kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; | 1238 | kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; |
| 1214 | 1239 | ||
| 1215 | up_write(¤t->mm->mmap_sem); | 1240 | up_write(&kvm->slots_lock); |
| 1216 | return 0; | 1241 | return 0; |
| 1217 | } | 1242 | } |
| 1218 | 1243 | ||
| @@ -1261,7 +1286,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, | |||
| 1261 | < alias->target_phys_addr) | 1286 | < alias->target_phys_addr) |
| 1262 | goto out; | 1287 | goto out; |
| 1263 | 1288 | ||
| 1264 | down_write(¤t->mm->mmap_sem); | 1289 | down_write(&kvm->slots_lock); |
| 1265 | 1290 | ||
| 1266 | p = &kvm->arch.aliases[alias->slot]; | 1291 | p = &kvm->arch.aliases[alias->slot]; |
| 1267 | p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT; | 1292 | p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT; |
| @@ -1275,7 +1300,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, | |||
| 1275 | 1300 | ||
| 1276 | kvm_mmu_zap_all(kvm); | 1301 | kvm_mmu_zap_all(kvm); |
| 1277 | 1302 | ||
| 1278 | up_write(¤t->mm->mmap_sem); | 1303 | up_write(&kvm->slots_lock); |
| 1279 | 1304 | ||
| 1280 | return 0; | 1305 | return 0; |
| 1281 | 1306 | ||
| @@ -1351,7 +1376,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
| 1351 | struct kvm_memory_slot *memslot; | 1376 | struct kvm_memory_slot *memslot; |
| 1352 | int is_dirty = 0; | 1377 | int is_dirty = 0; |
| 1353 | 1378 | ||
| 1354 | down_write(¤t->mm->mmap_sem); | 1379 | down_write(&kvm->slots_lock); |
| 1355 | 1380 | ||
| 1356 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | 1381 | r = kvm_get_dirty_log(kvm, log, &is_dirty); |
| 1357 | if (r) | 1382 | if (r) |
| @@ -1367,7 +1392,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
| 1367 | } | 1392 | } |
| 1368 | r = 0; | 1393 | r = 0; |
| 1369 | out: | 1394 | out: |
| 1370 | up_write(¤t->mm->mmap_sem); | 1395 | up_write(&kvm->slots_lock); |
| 1371 | return r; | 1396 | return r; |
| 1372 | } | 1397 | } |
| 1373 | 1398 | ||
| @@ -1487,24 +1512,6 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
| 1487 | r = 0; | 1512 | r = 0; |
| 1488 | break; | 1513 | break; |
| 1489 | } | 1514 | } |
| 1490 | case KVM_GET_SUPPORTED_CPUID: { | ||
| 1491 | struct kvm_cpuid2 __user *cpuid_arg = argp; | ||
| 1492 | struct kvm_cpuid2 cpuid; | ||
| 1493 | |||
| 1494 | r = -EFAULT; | ||
| 1495 | if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid)) | ||
| 1496 | goto out; | ||
| 1497 | r = kvm_vm_ioctl_get_supported_cpuid(kvm, &cpuid, | ||
| 1498 | cpuid_arg->entries); | ||
| 1499 | if (r) | ||
| 1500 | goto out; | ||
| 1501 | |||
| 1502 | r = -EFAULT; | ||
| 1503 | if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid)) | ||
| 1504 | goto out; | ||
| 1505 | r = 0; | ||
| 1506 | break; | ||
| 1507 | } | ||
| 1508 | default: | 1515 | default: |
| 1509 | ; | 1516 | ; |
| 1510 | } | 1517 | } |
| @@ -1563,7 +1570,7 @@ int emulator_read_std(unsigned long addr, | |||
| 1563 | void *data = val; | 1570 | void *data = val; |
| 1564 | int r = X86EMUL_CONTINUE; | 1571 | int r = X86EMUL_CONTINUE; |
| 1565 | 1572 | ||
| 1566 | down_read(¤t->mm->mmap_sem); | 1573 | down_read(&vcpu->kvm->slots_lock); |
| 1567 | while (bytes) { | 1574 | while (bytes) { |
| 1568 | gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); | 1575 | gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); |
| 1569 | unsigned offset = addr & (PAGE_SIZE-1); | 1576 | unsigned offset = addr & (PAGE_SIZE-1); |
| @@ -1585,7 +1592,7 @@ int emulator_read_std(unsigned long addr, | |||
| 1585 | addr += tocopy; | 1592 | addr += tocopy; |
| 1586 | } | 1593 | } |
| 1587 | out: | 1594 | out: |
| 1588 | up_read(¤t->mm->mmap_sem); | 1595 | up_read(&vcpu->kvm->slots_lock); |
| 1589 | return r; | 1596 | return r; |
| 1590 | } | 1597 | } |
| 1591 | EXPORT_SYMBOL_GPL(emulator_read_std); | 1598 | EXPORT_SYMBOL_GPL(emulator_read_std); |
| @@ -1604,9 +1611,9 @@ static int emulator_read_emulated(unsigned long addr, | |||
| 1604 | return X86EMUL_CONTINUE; | 1611 | return X86EMUL_CONTINUE; |
| 1605 | } | 1612 | } |
| 1606 | 1613 | ||
| 1607 | down_read(¤t->mm->mmap_sem); | 1614 | down_read(&vcpu->kvm->slots_lock); |
| 1608 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); | 1615 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); |
| 1609 | up_read(¤t->mm->mmap_sem); | 1616 | up_read(&vcpu->kvm->slots_lock); |
| 1610 | 1617 | ||
| 1611 | /* For APIC access vmexit */ | 1618 | /* For APIC access vmexit */ |
| 1612 | if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) | 1619 | if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) |
| @@ -1644,14 +1651,14 @@ static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
| 1644 | { | 1651 | { |
| 1645 | int ret; | 1652 | int ret; |
| 1646 | 1653 | ||
| 1647 | down_read(¤t->mm->mmap_sem); | 1654 | down_read(&vcpu->kvm->slots_lock); |
| 1648 | ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); | 1655 | ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); |
| 1649 | if (ret < 0) { | 1656 | if (ret < 0) { |
| 1650 | up_read(¤t->mm->mmap_sem); | 1657 | up_read(&vcpu->kvm->slots_lock); |
| 1651 | return 0; | 1658 | return 0; |
| 1652 | } | 1659 | } |
| 1653 | kvm_mmu_pte_write(vcpu, gpa, val, bytes); | 1660 | kvm_mmu_pte_write(vcpu, gpa, val, bytes); |
| 1654 | up_read(¤t->mm->mmap_sem); | 1661 | up_read(&vcpu->kvm->slots_lock); |
| 1655 | return 1; | 1662 | return 1; |
| 1656 | } | 1663 | } |
| 1657 | 1664 | ||
| @@ -1663,9 +1670,9 @@ static int emulator_write_emulated_onepage(unsigned long addr, | |||
| 1663 | struct kvm_io_device *mmio_dev; | 1670 | struct kvm_io_device *mmio_dev; |
| 1664 | gpa_t gpa; | 1671 | gpa_t gpa; |
| 1665 | 1672 | ||
| 1666 | down_read(¤t->mm->mmap_sem); | 1673 | down_read(&vcpu->kvm->slots_lock); |
| 1667 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); | 1674 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); |
| 1668 | up_read(¤t->mm->mmap_sem); | 1675 | up_read(&vcpu->kvm->slots_lock); |
| 1669 | 1676 | ||
| 1670 | if (gpa == UNMAPPED_GVA) { | 1677 | if (gpa == UNMAPPED_GVA) { |
| 1671 | kvm_inject_page_fault(vcpu, addr, 2); | 1678 | kvm_inject_page_fault(vcpu, addr, 2); |
| @@ -1742,7 +1749,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr, | |||
| 1742 | char *kaddr; | 1749 | char *kaddr; |
| 1743 | u64 val; | 1750 | u64 val; |
| 1744 | 1751 | ||
| 1745 | down_read(¤t->mm->mmap_sem); | 1752 | down_read(&vcpu->kvm->slots_lock); |
| 1746 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); | 1753 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); |
| 1747 | 1754 | ||
| 1748 | if (gpa == UNMAPPED_GVA || | 1755 | if (gpa == UNMAPPED_GVA || |
| @@ -1753,13 +1760,17 @@ static int emulator_cmpxchg_emulated(unsigned long addr, | |||
| 1753 | goto emul_write; | 1760 | goto emul_write; |
| 1754 | 1761 | ||
| 1755 | val = *(u64 *)new; | 1762 | val = *(u64 *)new; |
| 1763 | |||
| 1764 | down_read(¤t->mm->mmap_sem); | ||
| 1756 | page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); | 1765 | page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); |
| 1766 | up_read(¤t->mm->mmap_sem); | ||
| 1767 | |||
| 1757 | kaddr = kmap_atomic(page, KM_USER0); | 1768 | kaddr = kmap_atomic(page, KM_USER0); |
| 1758 | set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val); | 1769 | set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val); |
| 1759 | kunmap_atomic(kaddr, KM_USER0); | 1770 | kunmap_atomic(kaddr, KM_USER0); |
| 1760 | kvm_release_page_dirty(page); | 1771 | kvm_release_page_dirty(page); |
| 1761 | emul_write: | 1772 | emul_write: |
| 1762 | up_read(¤t->mm->mmap_sem); | 1773 | up_read(&vcpu->kvm->slots_lock); |
| 1763 | } | 1774 | } |
| 1764 | #endif | 1775 | #endif |
| 1765 | 1776 | ||
| @@ -2152,10 +2163,10 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | |||
| 2152 | kvm_x86_ops->skip_emulated_instruction(vcpu); | 2163 | kvm_x86_ops->skip_emulated_instruction(vcpu); |
| 2153 | 2164 | ||
| 2154 | for (i = 0; i < nr_pages; ++i) { | 2165 | for (i = 0; i < nr_pages; ++i) { |
| 2155 | down_read(¤t->mm->mmap_sem); | 2166 | down_read(&vcpu->kvm->slots_lock); |
| 2156 | page = gva_to_page(vcpu, address + i * PAGE_SIZE); | 2167 | page = gva_to_page(vcpu, address + i * PAGE_SIZE); |
| 2157 | vcpu->arch.pio.guest_pages[i] = page; | 2168 | vcpu->arch.pio.guest_pages[i] = page; |
| 2158 | up_read(¤t->mm->mmap_sem); | 2169 | up_read(&vcpu->kvm->slots_lock); |
| 2159 | if (!page) { | 2170 | if (!page) { |
| 2160 | kvm_inject_gp(vcpu, 0); | 2171 | kvm_inject_gp(vcpu, 0); |
| 2161 | free_pio_guest_pages(vcpu); | 2172 | free_pio_guest_pages(vcpu); |
| @@ -2478,8 +2489,9 @@ static void vapic_enter(struct kvm_vcpu *vcpu) | |||
| 2478 | 2489 | ||
| 2479 | down_read(¤t->mm->mmap_sem); | 2490 | down_read(¤t->mm->mmap_sem); |
| 2480 | page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); | 2491 | page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); |
| 2481 | vcpu->arch.apic->vapic_page = page; | ||
| 2482 | up_read(¤t->mm->mmap_sem); | 2492 | up_read(¤t->mm->mmap_sem); |
| 2493 | |||
| 2494 | vcpu->arch.apic->vapic_page = page; | ||
| 2483 | } | 2495 | } |
| 2484 | 2496 | ||
| 2485 | static void vapic_exit(struct kvm_vcpu *vcpu) | 2497 | static void vapic_exit(struct kvm_vcpu *vcpu) |
| @@ -2861,8 +2873,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
| 2861 | kvm_x86_ops->decache_cr4_guest_bits(vcpu); | 2873 | kvm_x86_ops->decache_cr4_guest_bits(vcpu); |
| 2862 | 2874 | ||
| 2863 | mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0; | 2875 | mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0; |
| 2864 | vcpu->arch.cr0 = sregs->cr0; | ||
| 2865 | kvm_x86_ops->set_cr0(vcpu, sregs->cr0); | 2876 | kvm_x86_ops->set_cr0(vcpu, sregs->cr0); |
| 2877 | vcpu->arch.cr0 = sregs->cr0; | ||
| 2866 | 2878 | ||
| 2867 | mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4; | 2879 | mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4; |
| 2868 | kvm_x86_ops->set_cr4(vcpu, sregs->cr4); | 2880 | kvm_x86_ops->set_cr4(vcpu, sregs->cr4); |
| @@ -2952,9 +2964,9 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | |||
| 2952 | gpa_t gpa; | 2964 | gpa_t gpa; |
| 2953 | 2965 | ||
| 2954 | vcpu_load(vcpu); | 2966 | vcpu_load(vcpu); |
| 2955 | down_read(¤t->mm->mmap_sem); | 2967 | down_read(&vcpu->kvm->slots_lock); |
| 2956 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr); | 2968 | gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr); |
| 2957 | up_read(¤t->mm->mmap_sem); | 2969 | up_read(&vcpu->kvm->slots_lock); |
| 2958 | tr->physical_address = gpa; | 2970 | tr->physical_address = gpa; |
| 2959 | tr->valid = gpa != UNMAPPED_GVA; | 2971 | tr->valid = gpa != UNMAPPED_GVA; |
| 2960 | tr->writeable = 1; | 2972 | tr->writeable = 1; |
| @@ -3227,11 +3239,13 @@ int kvm_arch_set_memory_region(struct kvm *kvm, | |||
| 3227 | */ | 3239 | */ |
| 3228 | if (!user_alloc) { | 3240 | if (!user_alloc) { |
| 3229 | if (npages && !old.rmap) { | 3241 | if (npages && !old.rmap) { |
| 3242 | down_write(¤t->mm->mmap_sem); | ||
| 3230 | memslot->userspace_addr = do_mmap(NULL, 0, | 3243 | memslot->userspace_addr = do_mmap(NULL, 0, |
| 3231 | npages * PAGE_SIZE, | 3244 | npages * PAGE_SIZE, |
| 3232 | PROT_READ | PROT_WRITE, | 3245 | PROT_READ | PROT_WRITE, |
| 3233 | MAP_SHARED | MAP_ANONYMOUS, | 3246 | MAP_SHARED | MAP_ANONYMOUS, |
| 3234 | 0); | 3247 | 0); |
| 3248 | up_write(¤t->mm->mmap_sem); | ||
| 3235 | 3249 | ||
| 3236 | if (IS_ERR((void *)memslot->userspace_addr)) | 3250 | if (IS_ERR((void *)memslot->userspace_addr)) |
| 3237 | return PTR_ERR((void *)memslot->userspace_addr); | 3251 | return PTR_ERR((void *)memslot->userspace_addr); |
| @@ -3239,8 +3253,10 @@ int kvm_arch_set_memory_region(struct kvm *kvm, | |||
| 3239 | if (!old.user_alloc && old.rmap) { | 3253 | if (!old.user_alloc && old.rmap) { |
| 3240 | int ret; | 3254 | int ret; |
| 3241 | 3255 | ||
| 3256 | down_write(¤t->mm->mmap_sem); | ||
| 3242 | ret = do_munmap(current->mm, old.userspace_addr, | 3257 | ret = do_munmap(current->mm, old.userspace_addr, |
| 3243 | old.npages * PAGE_SIZE); | 3258 | old.npages * PAGE_SIZE); |
| 3259 | up_write(¤t->mm->mmap_sem); | ||
| 3244 | if (ret < 0) | 3260 | if (ret < 0) |
| 3245 | printk(KERN_WARNING | 3261 | printk(KERN_WARNING |
| 3246 | "kvm_vm_ioctl_set_memory_region: " | 3262 | "kvm_vm_ioctl_set_memory_region: " |
diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 4de4fd2d8607..c1ec04fd000d 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h | |||
| @@ -221,6 +221,7 @@ struct kvm_vapic_addr { | |||
| 221 | * Get size for mmap(vcpu_fd) | 221 | * Get size for mmap(vcpu_fd) |
| 222 | */ | 222 | */ |
| 223 | #define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */ | 223 | #define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */ |
| 224 | #define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2) | ||
| 224 | 225 | ||
| 225 | /* | 226 | /* |
| 226 | * Extension capability list. | 227 | * Extension capability list. |
| @@ -230,8 +231,8 @@ struct kvm_vapic_addr { | |||
| 230 | #define KVM_CAP_MMU_SHADOW_CACHE_CONTROL 2 | 231 | #define KVM_CAP_MMU_SHADOW_CACHE_CONTROL 2 |
| 231 | #define KVM_CAP_USER_MEMORY 3 | 232 | #define KVM_CAP_USER_MEMORY 3 |
| 232 | #define KVM_CAP_SET_TSS_ADDR 4 | 233 | #define KVM_CAP_SET_TSS_ADDR 4 |
| 233 | #define KVM_CAP_EXT_CPUID 5 | ||
| 234 | #define KVM_CAP_VAPIC 6 | 234 | #define KVM_CAP_VAPIC 6 |
| 235 | #define KVM_CAP_EXT_CPUID 7 | ||
| 235 | 236 | ||
| 236 | /* | 237 | /* |
| 237 | * ioctls for VM fds | 238 | * ioctls for VM fds |
| @@ -249,7 +250,6 @@ struct kvm_vapic_addr { | |||
| 249 | #define KVM_CREATE_VCPU _IO(KVMIO, 0x41) | 250 | #define KVM_CREATE_VCPU _IO(KVMIO, 0x41) |
| 250 | #define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log) | 251 | #define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log) |
| 251 | #define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias) | 252 | #define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias) |
| 252 | #define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x48, struct kvm_cpuid2) | ||
| 253 | /* Device model IOC */ | 253 | /* Device model IOC */ |
| 254 | #define KVM_CREATE_IRQCHIP _IO(KVMIO, 0x60) | 254 | #define KVM_CREATE_IRQCHIP _IO(KVMIO, 0x60) |
| 255 | #define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level) | 255 | #define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level) |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index ea4764b0a2f4..928b0d59e9ba 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
| @@ -107,6 +107,7 @@ struct kvm_memory_slot { | |||
| 107 | struct kvm { | 107 | struct kvm { |
| 108 | struct mutex lock; /* protects the vcpus array and APIC accesses */ | 108 | struct mutex lock; /* protects the vcpus array and APIC accesses */ |
| 109 | spinlock_t mmu_lock; | 109 | spinlock_t mmu_lock; |
| 110 | struct rw_semaphore slots_lock; | ||
| 110 | struct mm_struct *mm; /* userspace tied to this vm */ | 111 | struct mm_struct *mm; /* userspace tied to this vm */ |
| 111 | int nmemslots; | 112 | int nmemslots; |
| 112 | struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + | 113 | struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + |
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 317f8e211cd2..4232fd75dd20 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c | |||
| @@ -211,6 +211,10 @@ static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq) | |||
| 211 | case IOAPIC_LOWEST_PRIORITY: | 211 | case IOAPIC_LOWEST_PRIORITY: |
| 212 | vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector, | 212 | vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector, |
| 213 | deliver_bitmask); | 213 | deliver_bitmask); |
| 214 | #ifdef CONFIG_X86 | ||
| 215 | if (irq == 0) | ||
| 216 | vcpu = ioapic->kvm->vcpus[0]; | ||
| 217 | #endif | ||
| 214 | if (vcpu != NULL) | 218 | if (vcpu != NULL) |
| 215 | ioapic_inj_irq(ioapic, vcpu, vector, | 219 | ioapic_inj_irq(ioapic, vcpu, vector, |
| 216 | trig_mode, delivery_mode); | 220 | trig_mode, delivery_mode); |
| @@ -220,6 +224,10 @@ static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq) | |||
| 220 | deliver_bitmask, vector, IOAPIC_LOWEST_PRIORITY); | 224 | deliver_bitmask, vector, IOAPIC_LOWEST_PRIORITY); |
| 221 | break; | 225 | break; |
| 222 | case IOAPIC_FIXED: | 226 | case IOAPIC_FIXED: |
| 227 | #ifdef CONFIG_X86 | ||
| 228 | if (irq == 0) | ||
| 229 | deliver_bitmask = 1; | ||
| 230 | #endif | ||
| 223 | for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) { | 231 | for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) { |
| 224 | if (!(deliver_bitmask & (1 << vcpu_id))) | 232 | if (!(deliver_bitmask & (1 << vcpu_id))) |
| 225 | continue; | 233 | continue; |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 32fbf8006969..b2e12893e3f4 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
| @@ -169,6 +169,7 @@ static struct kvm *kvm_create_vm(void) | |||
| 169 | kvm_io_bus_init(&kvm->pio_bus); | 169 | kvm_io_bus_init(&kvm->pio_bus); |
| 170 | mutex_init(&kvm->lock); | 170 | mutex_init(&kvm->lock); |
| 171 | kvm_io_bus_init(&kvm->mmio_bus); | 171 | kvm_io_bus_init(&kvm->mmio_bus); |
| 172 | init_rwsem(&kvm->slots_lock); | ||
| 172 | spin_lock(&kvm_lock); | 173 | spin_lock(&kvm_lock); |
| 173 | list_add(&kvm->vm_list, &vm_list); | 174 | list_add(&kvm->vm_list, &vm_list); |
| 174 | spin_unlock(&kvm_lock); | 175 | spin_unlock(&kvm_lock); |
| @@ -339,9 +340,9 @@ int kvm_set_memory_region(struct kvm *kvm, | |||
| 339 | { | 340 | { |
| 340 | int r; | 341 | int r; |
| 341 | 342 | ||
| 342 | down_write(¤t->mm->mmap_sem); | 343 | down_write(&kvm->slots_lock); |
| 343 | r = __kvm_set_memory_region(kvm, mem, user_alloc); | 344 | r = __kvm_set_memory_region(kvm, mem, user_alloc); |
| 344 | up_write(¤t->mm->mmap_sem); | 345 | up_write(&kvm->slots_lock); |
| 345 | return r; | 346 | return r; |
| 346 | } | 347 | } |
| 347 | EXPORT_SYMBOL_GPL(kvm_set_memory_region); | 348 | EXPORT_SYMBOL_GPL(kvm_set_memory_region); |
