diff options
| -rw-r--r-- | arch/s390/kvm/kvm-s390.c | 2 | ||||
| -rw-r--r-- | arch/x86/kvm/lapic.c | 11 | ||||
| -rw-r--r-- | arch/x86/kvm/mmu.c | 20 | ||||
| -rw-r--r-- | arch/x86/kvm/vmx.c | 12 | ||||
| -rw-r--r-- | arch/x86/kvm/x86.c | 10 |
5 files changed, 32 insertions, 23 deletions
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index afa2bd750ffc..8cd8e7b288c5 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
| @@ -110,7 +110,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
| 110 | /* upper facilities limit for kvm */ | 110 | /* upper facilities limit for kvm */ |
| 111 | unsigned long kvm_s390_fac_list_mask[] = { | 111 | unsigned long kvm_s390_fac_list_mask[] = { |
| 112 | 0xffe6fffbfcfdfc40UL, | 112 | 0xffe6fffbfcfdfc40UL, |
| 113 | 0x205c800000000000UL, | 113 | 0x005c800000000000UL, |
| 114 | }; | 114 | }; |
| 115 | 115 | ||
| 116 | unsigned long kvm_s390_fac_list_mask_size(void) | 116 | unsigned long kvm_s390_fac_list_mask_size(void) |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index d67206a7b99a..629af0f1c5c4 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
| @@ -683,8 +683,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, | |||
| 683 | unsigned long bitmap = 1; | 683 | unsigned long bitmap = 1; |
| 684 | struct kvm_lapic **dst; | 684 | struct kvm_lapic **dst; |
| 685 | int i; | 685 | int i; |
| 686 | bool ret = false; | 686 | bool ret, x2apic_ipi; |
| 687 | bool x2apic_ipi = src && apic_x2apic_mode(src); | ||
| 688 | 687 | ||
| 689 | *r = -1; | 688 | *r = -1; |
| 690 | 689 | ||
| @@ -696,16 +695,18 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, | |||
| 696 | if (irq->shorthand) | 695 | if (irq->shorthand) |
| 697 | return false; | 696 | return false; |
| 698 | 697 | ||
| 698 | x2apic_ipi = src && apic_x2apic_mode(src); | ||
| 699 | if (irq->dest_id == (x2apic_ipi ? X2APIC_BROADCAST : APIC_BROADCAST)) | 699 | if (irq->dest_id == (x2apic_ipi ? X2APIC_BROADCAST : APIC_BROADCAST)) |
| 700 | return false; | 700 | return false; |
| 701 | 701 | ||
| 702 | ret = true; | ||
| 702 | rcu_read_lock(); | 703 | rcu_read_lock(); |
| 703 | map = rcu_dereference(kvm->arch.apic_map); | 704 | map = rcu_dereference(kvm->arch.apic_map); |
| 704 | 705 | ||
| 705 | if (!map) | 706 | if (!map) { |
| 707 | ret = false; | ||
| 706 | goto out; | 708 | goto out; |
| 707 | 709 | } | |
| 708 | ret = true; | ||
| 709 | 710 | ||
| 710 | if (irq->dest_mode == APIC_DEST_PHYSICAL) { | 711 | if (irq->dest_mode == APIC_DEST_PHYSICAL) { |
| 711 | if (irq->dest_id >= ARRAY_SIZE(map->phys_map)) | 712 | if (irq->dest_id >= ARRAY_SIZE(map->phys_map)) |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 146f295ee322..d43867c33bc4 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
| @@ -4481,9 +4481,11 @@ static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, | |||
| 4481 | pfn = spte_to_pfn(*sptep); | 4481 | pfn = spte_to_pfn(*sptep); |
| 4482 | 4482 | ||
| 4483 | /* | 4483 | /* |
| 4484 | * Only EPT supported for now; otherwise, one would need to | 4484 | * We cannot do huge page mapping for indirect shadow pages, |
| 4485 | * find out efficiently whether the guest page tables are | 4485 | * which are found on the last rmap (level = 1) when not using |
| 4486 | * also using huge pages. | 4486 | * tdp; such shadow pages are synced with the page table in |
| 4487 | * the guest, and the guest page table is using 4K page size | ||
| 4488 | * mapping if the indirect sp has level = 1. | ||
| 4487 | */ | 4489 | */ |
| 4488 | if (sp->role.direct && | 4490 | if (sp->role.direct && |
| 4489 | !kvm_is_reserved_pfn(pfn) && | 4491 | !kvm_is_reserved_pfn(pfn) && |
| @@ -4504,19 +4506,12 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, | |||
| 4504 | bool flush = false; | 4506 | bool flush = false; |
| 4505 | unsigned long *rmapp; | 4507 | unsigned long *rmapp; |
| 4506 | unsigned long last_index, index; | 4508 | unsigned long last_index, index; |
| 4507 | gfn_t gfn_start, gfn_end; | ||
| 4508 | 4509 | ||
| 4509 | spin_lock(&kvm->mmu_lock); | 4510 | spin_lock(&kvm->mmu_lock); |
| 4510 | 4511 | ||
| 4511 | gfn_start = memslot->base_gfn; | ||
| 4512 | gfn_end = memslot->base_gfn + memslot->npages - 1; | ||
| 4513 | |||
| 4514 | if (gfn_start >= gfn_end) | ||
| 4515 | goto out; | ||
| 4516 | |||
| 4517 | rmapp = memslot->arch.rmap[0]; | 4512 | rmapp = memslot->arch.rmap[0]; |
| 4518 | last_index = gfn_to_index(gfn_end, memslot->base_gfn, | 4513 | last_index = gfn_to_index(memslot->base_gfn + memslot->npages - 1, |
| 4519 | PT_PAGE_TABLE_LEVEL); | 4514 | memslot->base_gfn, PT_PAGE_TABLE_LEVEL); |
| 4520 | 4515 | ||
| 4521 | for (index = 0; index <= last_index; ++index, ++rmapp) { | 4516 | for (index = 0; index <= last_index; ++index, ++rmapp) { |
| 4522 | if (*rmapp) | 4517 | if (*rmapp) |
| @@ -4534,7 +4529,6 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, | |||
| 4534 | if (flush) | 4529 | if (flush) |
| 4535 | kvm_flush_remote_tlbs(kvm); | 4530 | kvm_flush_remote_tlbs(kvm); |
| 4536 | 4531 | ||
| 4537 | out: | ||
| 4538 | spin_unlock(&kvm->mmu_lock); | 4532 | spin_unlock(&kvm->mmu_lock); |
| 4539 | } | 4533 | } |
| 4540 | 4534 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f5e8dce8046c..f7b61687bd79 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -3622,8 +3622,16 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) | |||
| 3622 | 3622 | ||
| 3623 | static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | 3623 | static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
| 3624 | { | 3624 | { |
| 3625 | unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ? | 3625 | /* |
| 3626 | KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); | 3626 | * Pass through host's Machine Check Enable value to hw_cr4, which |
| 3627 | * is in force while we are in guest mode. Do not let guests control | ||
| 3628 | * this bit, even if host CR4.MCE == 0. | ||
| 3629 | */ | ||
| 3630 | unsigned long hw_cr4 = | ||
| 3631 | (cr4_read_shadow() & X86_CR4_MCE) | | ||
| 3632 | (cr4 & ~X86_CR4_MCE) | | ||
| 3633 | (to_vmx(vcpu)->rmode.vm86_active ? | ||
| 3634 | KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); | ||
| 3627 | 3635 | ||
| 3628 | if (cr4 & X86_CR4_VMXE) { | 3636 | if (cr4 & X86_CR4_VMXE) { |
| 3629 | /* | 3637 | /* |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e1a81267f3f6..ed31c31b2485 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -5799,7 +5799,6 @@ int kvm_arch_init(void *opaque) | |||
| 5799 | kvm_set_mmio_spte_mask(); | 5799 | kvm_set_mmio_spte_mask(); |
| 5800 | 5800 | ||
| 5801 | kvm_x86_ops = ops; | 5801 | kvm_x86_ops = ops; |
| 5802 | kvm_init_msr_list(); | ||
| 5803 | 5802 | ||
| 5804 | kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, | 5803 | kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, |
| 5805 | PT_DIRTY_MASK, PT64_NX_MASK, 0); | 5804 | PT_DIRTY_MASK, PT64_NX_MASK, 0); |
| @@ -7253,7 +7252,14 @@ void kvm_arch_hardware_disable(void) | |||
| 7253 | 7252 | ||
| 7254 | int kvm_arch_hardware_setup(void) | 7253 | int kvm_arch_hardware_setup(void) |
| 7255 | { | 7254 | { |
| 7256 | return kvm_x86_ops->hardware_setup(); | 7255 | int r; |
| 7256 | |||
| 7257 | r = kvm_x86_ops->hardware_setup(); | ||
| 7258 | if (r != 0) | ||
| 7259 | return r; | ||
| 7260 | |||
| 7261 | kvm_init_msr_list(); | ||
| 7262 | return 0; | ||
| 7257 | } | 7263 | } |
| 7258 | 7264 | ||
| 7259 | void kvm_arch_hardware_unsetup(void) | 7265 | void kvm_arch_hardware_unsetup(void) |
