diff options
| -rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu.c | 1 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_hv.c | 90 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_pr.c | 2 | ||||
| -rw-r--r-- | arch/x86/kvm/mmu.c | 19 | ||||
| -rw-r--r-- | arch/x86/kvm/svm.c | 9 | ||||
| -rw-r--r-- | arch/x86/kvm/vmx.c | 17 |
6 files changed, 88 insertions, 50 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 29ebe2fd5867..a93d719edc90 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c | |||
| @@ -235,6 +235,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
| 235 | gpte->may_read = true; | 235 | gpte->may_read = true; |
| 236 | gpte->may_write = true; | 236 | gpte->may_write = true; |
| 237 | gpte->page_size = MMU_PAGE_4K; | 237 | gpte->page_size = MMU_PAGE_4K; |
| 238 | gpte->wimg = HPTE_R_M; | ||
| 238 | 239 | ||
| 239 | return 0; | 240 | return 0; |
| 240 | } | 241 | } |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 966097232d21..b73dbc9e797d 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
| @@ -65,11 +65,17 @@ struct kvm_resize_hpt { | |||
| 65 | u32 order; | 65 | u32 order; |
| 66 | 66 | ||
| 67 | /* These fields protected by kvm->lock */ | 67 | /* These fields protected by kvm->lock */ |
| 68 | |||
| 69 | /* Possible values and their usage: | ||
| 70 | * <0 an error occurred during allocation, | ||
| 71 | * -EBUSY allocation is in the progress, | ||
| 72 | * 0 allocation made successfuly. | ||
| 73 | */ | ||
| 68 | int error; | 74 | int error; |
| 69 | bool prepare_done; | ||
| 70 | 75 | ||
| 71 | /* Private to the work thread, until prepare_done is true, | 76 | /* Private to the work thread, until error != -EBUSY, |
| 72 | * then protected by kvm->resize_hpt_sem */ | 77 | * then protected by kvm->lock. |
| 78 | */ | ||
| 73 | struct kvm_hpt_info hpt; | 79 | struct kvm_hpt_info hpt; |
| 74 | }; | 80 | }; |
| 75 | 81 | ||
| @@ -159,8 +165,6 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) | |||
| 159 | * Reset all the reverse-mapping chains for all memslots | 165 | * Reset all the reverse-mapping chains for all memslots |
| 160 | */ | 166 | */ |
| 161 | kvmppc_rmap_reset(kvm); | 167 | kvmppc_rmap_reset(kvm); |
| 162 | /* Ensure that each vcpu will flush its TLB on next entry. */ | ||
| 163 | cpumask_setall(&kvm->arch.need_tlb_flush); | ||
| 164 | err = 0; | 168 | err = 0; |
| 165 | goto out; | 169 | goto out; |
| 166 | } | 170 | } |
| @@ -176,6 +180,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) | |||
| 176 | kvmppc_set_hpt(kvm, &info); | 180 | kvmppc_set_hpt(kvm, &info); |
| 177 | 181 | ||
| 178 | out: | 182 | out: |
| 183 | if (err == 0) | ||
| 184 | /* Ensure that each vcpu will flush its TLB on next entry. */ | ||
| 185 | cpumask_setall(&kvm->arch.need_tlb_flush); | ||
| 186 | |||
| 179 | mutex_unlock(&kvm->lock); | 187 | mutex_unlock(&kvm->lock); |
| 180 | return err; | 188 | return err; |
| 181 | } | 189 | } |
| @@ -1413,16 +1421,20 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize) | |||
| 1413 | 1421 | ||
| 1414 | static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize) | 1422 | static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize) |
| 1415 | { | 1423 | { |
| 1416 | BUG_ON(kvm->arch.resize_hpt != resize); | 1424 | if (WARN_ON(!mutex_is_locked(&kvm->lock))) |
| 1425 | return; | ||
| 1417 | 1426 | ||
| 1418 | if (!resize) | 1427 | if (!resize) |
| 1419 | return; | 1428 | return; |
| 1420 | 1429 | ||
| 1421 | if (resize->hpt.virt) | 1430 | if (resize->error != -EBUSY) { |
| 1422 | kvmppc_free_hpt(&resize->hpt); | 1431 | if (resize->hpt.virt) |
| 1432 | kvmppc_free_hpt(&resize->hpt); | ||
| 1433 | kfree(resize); | ||
| 1434 | } | ||
| 1423 | 1435 | ||
| 1424 | kvm->arch.resize_hpt = NULL; | 1436 | if (kvm->arch.resize_hpt == resize) |
| 1425 | kfree(resize); | 1437 | kvm->arch.resize_hpt = NULL; |
| 1426 | } | 1438 | } |
| 1427 | 1439 | ||
| 1428 | static void resize_hpt_prepare_work(struct work_struct *work) | 1440 | static void resize_hpt_prepare_work(struct work_struct *work) |
| @@ -1431,17 +1443,41 @@ static void resize_hpt_prepare_work(struct work_struct *work) | |||
| 1431 | struct kvm_resize_hpt, | 1443 | struct kvm_resize_hpt, |
| 1432 | work); | 1444 | work); |
| 1433 | struct kvm *kvm = resize->kvm; | 1445 | struct kvm *kvm = resize->kvm; |
| 1434 | int err; | 1446 | int err = 0; |
| 1435 | 1447 | ||
| 1436 | resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n", | 1448 | if (WARN_ON(resize->error != -EBUSY)) |
| 1437 | resize->order); | 1449 | return; |
| 1438 | |||
| 1439 | err = resize_hpt_allocate(resize); | ||
| 1440 | 1450 | ||
| 1441 | mutex_lock(&kvm->lock); | 1451 | mutex_lock(&kvm->lock); |
| 1442 | 1452 | ||
| 1453 | /* Request is still current? */ | ||
| 1454 | if (kvm->arch.resize_hpt == resize) { | ||
| 1455 | /* We may request large allocations here: | ||
| 1456 | * do not sleep with kvm->lock held for a while. | ||
| 1457 | */ | ||
| 1458 | mutex_unlock(&kvm->lock); | ||
| 1459 | |||
| 1460 | resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n", | ||
| 1461 | resize->order); | ||
| 1462 | |||
| 1463 | err = resize_hpt_allocate(resize); | ||
| 1464 | |||
| 1465 | /* We have strict assumption about -EBUSY | ||
| 1466 | * when preparing for HPT resize. | ||
| 1467 | */ | ||
| 1468 | if (WARN_ON(err == -EBUSY)) | ||
| 1469 | err = -EINPROGRESS; | ||
| 1470 | |||
| 1471 | mutex_lock(&kvm->lock); | ||
| 1472 | /* It is possible that kvm->arch.resize_hpt != resize | ||
| 1473 | * after we grab kvm->lock again. | ||
| 1474 | */ | ||
| 1475 | } | ||
| 1476 | |||
| 1443 | resize->error = err; | 1477 | resize->error = err; |
| 1444 | resize->prepare_done = true; | 1478 | |
| 1479 | if (kvm->arch.resize_hpt != resize) | ||
| 1480 | resize_hpt_release(kvm, resize); | ||
| 1445 | 1481 | ||
| 1446 | mutex_unlock(&kvm->lock); | 1482 | mutex_unlock(&kvm->lock); |
| 1447 | } | 1483 | } |
| @@ -1466,14 +1502,12 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, | |||
| 1466 | 1502 | ||
| 1467 | if (resize) { | 1503 | if (resize) { |
| 1468 | if (resize->order == shift) { | 1504 | if (resize->order == shift) { |
| 1469 | /* Suitable resize in progress */ | 1505 | /* Suitable resize in progress? */ |
| 1470 | if (resize->prepare_done) { | 1506 | ret = resize->error; |
| 1471 | ret = resize->error; | 1507 | if (ret == -EBUSY) |
| 1472 | if (ret != 0) | ||
| 1473 | resize_hpt_release(kvm, resize); | ||
| 1474 | } else { | ||
| 1475 | ret = 100; /* estimated time in ms */ | 1508 | ret = 100; /* estimated time in ms */ |
| 1476 | } | 1509 | else if (ret) |
| 1510 | resize_hpt_release(kvm, resize); | ||
| 1477 | 1511 | ||
| 1478 | goto out; | 1512 | goto out; |
| 1479 | } | 1513 | } |
| @@ -1493,6 +1527,8 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, | |||
| 1493 | ret = -ENOMEM; | 1527 | ret = -ENOMEM; |
| 1494 | goto out; | 1528 | goto out; |
| 1495 | } | 1529 | } |
| 1530 | |||
| 1531 | resize->error = -EBUSY; | ||
| 1496 | resize->order = shift; | 1532 | resize->order = shift; |
| 1497 | resize->kvm = kvm; | 1533 | resize->kvm = kvm; |
| 1498 | INIT_WORK(&resize->work, resize_hpt_prepare_work); | 1534 | INIT_WORK(&resize->work, resize_hpt_prepare_work); |
| @@ -1547,16 +1583,12 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm, | |||
| 1547 | if (!resize || (resize->order != shift)) | 1583 | if (!resize || (resize->order != shift)) |
| 1548 | goto out; | 1584 | goto out; |
| 1549 | 1585 | ||
| 1550 | ret = -EBUSY; | ||
| 1551 | if (!resize->prepare_done) | ||
| 1552 | goto out; | ||
| 1553 | |||
| 1554 | ret = resize->error; | 1586 | ret = resize->error; |
| 1555 | if (ret != 0) | 1587 | if (ret) |
| 1556 | goto out; | 1588 | goto out; |
| 1557 | 1589 | ||
| 1558 | ret = resize_hpt_rehash(resize); | 1590 | ret = resize_hpt_rehash(resize); |
| 1559 | if (ret != 0) | 1591 | if (ret) |
| 1560 | goto out; | 1592 | goto out; |
| 1561 | 1593 | ||
| 1562 | resize_hpt_pivot(resize); | 1594 | resize_hpt_pivot(resize); |
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index d0dc8624198f..7deaeeb14b93 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
| @@ -60,6 +60,7 @@ static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); | |||
| 60 | #define MSR_USER32 MSR_USER | 60 | #define MSR_USER32 MSR_USER |
| 61 | #define MSR_USER64 MSR_USER | 61 | #define MSR_USER64 MSR_USER |
| 62 | #define HW_PAGE_SIZE PAGE_SIZE | 62 | #define HW_PAGE_SIZE PAGE_SIZE |
| 63 | #define HPTE_R_M _PAGE_COHERENT | ||
| 63 | #endif | 64 | #endif |
| 64 | 65 | ||
| 65 | static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) | 66 | static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) |
| @@ -557,6 +558,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 557 | pte.eaddr = eaddr; | 558 | pte.eaddr = eaddr; |
| 558 | pte.vpage = eaddr >> 12; | 559 | pte.vpage = eaddr >> 12; |
| 559 | pte.page_size = MMU_PAGE_64K; | 560 | pte.page_size = MMU_PAGE_64K; |
| 561 | pte.wimg = HPTE_R_M; | ||
| 560 | } | 562 | } |
| 561 | 563 | ||
| 562 | switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) { | 564 | switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) { |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index c4deb1f34faa..2b8eb4da4d08 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
| @@ -3781,7 +3781,8 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) | |||
| 3781 | bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) | 3781 | bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) |
| 3782 | { | 3782 | { |
| 3783 | if (unlikely(!lapic_in_kernel(vcpu) || | 3783 | if (unlikely(!lapic_in_kernel(vcpu) || |
| 3784 | kvm_event_needs_reinjection(vcpu))) | 3784 | kvm_event_needs_reinjection(vcpu) || |
| 3785 | vcpu->arch.exception.pending)) | ||
| 3785 | return false; | 3786 | return false; |
| 3786 | 3787 | ||
| 3787 | if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu)) | 3788 | if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu)) |
| @@ -5465,30 +5466,34 @@ static void mmu_destroy_caches(void) | |||
| 5465 | 5466 | ||
| 5466 | int kvm_mmu_module_init(void) | 5467 | int kvm_mmu_module_init(void) |
| 5467 | { | 5468 | { |
| 5469 | int ret = -ENOMEM; | ||
| 5470 | |||
| 5468 | kvm_mmu_clear_all_pte_masks(); | 5471 | kvm_mmu_clear_all_pte_masks(); |
| 5469 | 5472 | ||
| 5470 | pte_list_desc_cache = kmem_cache_create("pte_list_desc", | 5473 | pte_list_desc_cache = kmem_cache_create("pte_list_desc", |
| 5471 | sizeof(struct pte_list_desc), | 5474 | sizeof(struct pte_list_desc), |
| 5472 | 0, SLAB_ACCOUNT, NULL); | 5475 | 0, SLAB_ACCOUNT, NULL); |
| 5473 | if (!pte_list_desc_cache) | 5476 | if (!pte_list_desc_cache) |
| 5474 | goto nomem; | 5477 | goto out; |
| 5475 | 5478 | ||
| 5476 | mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", | 5479 | mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", |
| 5477 | sizeof(struct kvm_mmu_page), | 5480 | sizeof(struct kvm_mmu_page), |
| 5478 | 0, SLAB_ACCOUNT, NULL); | 5481 | 0, SLAB_ACCOUNT, NULL); |
| 5479 | if (!mmu_page_header_cache) | 5482 | if (!mmu_page_header_cache) |
| 5480 | goto nomem; | 5483 | goto out; |
| 5481 | 5484 | ||
| 5482 | if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL)) | 5485 | if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL)) |
| 5483 | goto nomem; | 5486 | goto out; |
| 5484 | 5487 | ||
| 5485 | register_shrinker(&mmu_shrinker); | 5488 | ret = register_shrinker(&mmu_shrinker); |
| 5489 | if (ret) | ||
| 5490 | goto out; | ||
| 5486 | 5491 | ||
| 5487 | return 0; | 5492 | return 0; |
| 5488 | 5493 | ||
| 5489 | nomem: | 5494 | out: |
| 5490 | mmu_destroy_caches(); | 5495 | mmu_destroy_caches(); |
| 5491 | return -ENOMEM; | 5496 | return ret; |
| 5492 | } | 5497 | } |
| 5493 | 5498 | ||
| 5494 | /* | 5499 | /* |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index bb31c801f1fc..3158dac87f82 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -361,7 +361,6 @@ static void recalc_intercepts(struct vcpu_svm *svm) | |||
| 361 | { | 361 | { |
| 362 | struct vmcb_control_area *c, *h; | 362 | struct vmcb_control_area *c, *h; |
| 363 | struct nested_state *g; | 363 | struct nested_state *g; |
| 364 | u32 h_intercept_exceptions; | ||
| 365 | 364 | ||
| 366 | mark_dirty(svm->vmcb, VMCB_INTERCEPTS); | 365 | mark_dirty(svm->vmcb, VMCB_INTERCEPTS); |
| 367 | 366 | ||
| @@ -372,14 +371,9 @@ static void recalc_intercepts(struct vcpu_svm *svm) | |||
| 372 | h = &svm->nested.hsave->control; | 371 | h = &svm->nested.hsave->control; |
| 373 | g = &svm->nested; | 372 | g = &svm->nested; |
| 374 | 373 | ||
| 375 | /* No need to intercept #UD if L1 doesn't intercept it */ | ||
| 376 | h_intercept_exceptions = | ||
| 377 | h->intercept_exceptions & ~(1U << UD_VECTOR); | ||
| 378 | |||
| 379 | c->intercept_cr = h->intercept_cr | g->intercept_cr; | 374 | c->intercept_cr = h->intercept_cr | g->intercept_cr; |
| 380 | c->intercept_dr = h->intercept_dr | g->intercept_dr; | 375 | c->intercept_dr = h->intercept_dr | g->intercept_dr; |
| 381 | c->intercept_exceptions = | 376 | c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions; |
| 382 | h_intercept_exceptions | g->intercept_exceptions; | ||
| 383 | c->intercept = h->intercept | g->intercept; | 377 | c->intercept = h->intercept | g->intercept; |
| 384 | } | 378 | } |
| 385 | 379 | ||
| @@ -2202,7 +2196,6 @@ static int ud_interception(struct vcpu_svm *svm) | |||
| 2202 | { | 2196 | { |
| 2203 | int er; | 2197 | int er; |
| 2204 | 2198 | ||
| 2205 | WARN_ON_ONCE(is_guest_mode(&svm->vcpu)); | ||
| 2206 | er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD); | 2199 | er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD); |
| 2207 | if (er == EMULATE_USER_EXIT) | 2200 | if (er == EMULATE_USER_EXIT) |
| 2208 | return 0; | 2201 | return 0; |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 5c14d65f676a..3f89f6783aa5 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -899,8 +899,16 @@ static inline short vmcs_field_to_offset(unsigned long field) | |||
| 899 | { | 899 | { |
| 900 | BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX); | 900 | BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX); |
| 901 | 901 | ||
| 902 | if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) || | 902 | if (field >= ARRAY_SIZE(vmcs_field_to_offset_table)) |
| 903 | vmcs_field_to_offset_table[field] == 0) | 903 | return -ENOENT; |
| 904 | |||
| 905 | /* | ||
| 906 | * FIXME: Mitigation for CVE-2017-5753. To be replaced with a | ||
| 907 | * generic mechanism. | ||
| 908 | */ | ||
| 909 | asm("lfence"); | ||
| 910 | |||
| 911 | if (vmcs_field_to_offset_table[field] == 0) | ||
| 904 | return -ENOENT; | 912 | return -ENOENT; |
| 905 | 913 | ||
| 906 | return vmcs_field_to_offset_table[field]; | 914 | return vmcs_field_to_offset_table[field]; |
| @@ -1887,7 +1895,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) | |||
| 1887 | { | 1895 | { |
| 1888 | u32 eb; | 1896 | u32 eb; |
| 1889 | 1897 | ||
| 1890 | eb = (1u << PF_VECTOR) | (1u << MC_VECTOR) | | 1898 | eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) | |
| 1891 | (1u << DB_VECTOR) | (1u << AC_VECTOR); | 1899 | (1u << DB_VECTOR) | (1u << AC_VECTOR); |
| 1892 | if ((vcpu->guest_debug & | 1900 | if ((vcpu->guest_debug & |
| 1893 | (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) == | 1901 | (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) == |
| @@ -1905,8 +1913,6 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) | |||
| 1905 | */ | 1913 | */ |
| 1906 | if (is_guest_mode(vcpu)) | 1914 | if (is_guest_mode(vcpu)) |
| 1907 | eb |= get_vmcs12(vcpu)->exception_bitmap; | 1915 | eb |= get_vmcs12(vcpu)->exception_bitmap; |
| 1908 | else | ||
| 1909 | eb |= 1u << UD_VECTOR; | ||
| 1910 | 1916 | ||
| 1911 | vmcs_write32(EXCEPTION_BITMAP, eb); | 1917 | vmcs_write32(EXCEPTION_BITMAP, eb); |
| 1912 | } | 1918 | } |
| @@ -5917,7 +5923,6 @@ static int handle_exception(struct kvm_vcpu *vcpu) | |||
| 5917 | return 1; /* already handled by vmx_vcpu_run() */ | 5923 | return 1; /* already handled by vmx_vcpu_run() */ |
| 5918 | 5924 | ||
| 5919 | if (is_invalid_opcode(intr_info)) { | 5925 | if (is_invalid_opcode(intr_info)) { |
| 5920 | WARN_ON_ONCE(is_guest_mode(vcpu)); | ||
| 5921 | er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); | 5926 | er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD); |
| 5922 | if (er == EMULATE_USER_EXIT) | 5927 | if (er == EMULATE_USER_EXIT) |
| 5923 | return 0; | 5928 | return 0; |
