diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-06-01 13:48:09 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-06-01 13:48:09 -0400 |
commit | 9ea15a59c39d6395160274d8829bb8b7e185dd56 (patch) | |
tree | ca467f1c07208b36710f60d3f5fe868b4667c3b2 | |
parent | 0bb230399fd337cc9a838d47a0c9ec3433aa612e (diff) | |
parent | 47a66eed99e6f231f4a1d261a9d493f4eee94829 (diff) |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Paolo Bonzini:
"Many small x86 bug fixes: SVM segment registers access rights, nested
VMX, preempt notifiers, LAPIC virtual wire mode, NMI injection"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: x86: Fix nmi injection failure when vcpu got blocked
KVM: SVM: do not zero out segment attributes if segment is unusable or not present
KVM: SVM: ignore type when setting segment registers
KVM: nVMX: fix nested_vmx_check_vmptr failure paths under debugging
KVM: x86: Fix virtual wire mode
KVM: nVMX: Fix handling of lmsw instruction
KVM: X86: Fix preempt the preemption timer cancel
-rw-r--r-- | arch/x86/kvm/lapic.c | 5 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 26 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 147 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 7 |
4 files changed, 83 insertions, 102 deletions
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index c329d2894905..d24c8742d9b0 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -1495,8 +1495,10 @@ EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use); | |||
1495 | 1495 | ||
1496 | static void cancel_hv_timer(struct kvm_lapic *apic) | 1496 | static void cancel_hv_timer(struct kvm_lapic *apic) |
1497 | { | 1497 | { |
1498 | preempt_disable(); | ||
1498 | kvm_x86_ops->cancel_hv_timer(apic->vcpu); | 1499 | kvm_x86_ops->cancel_hv_timer(apic->vcpu); |
1499 | apic->lapic_timer.hv_timer_in_use = false; | 1500 | apic->lapic_timer.hv_timer_in_use = false; |
1501 | preempt_enable(); | ||
1500 | } | 1502 | } |
1501 | 1503 | ||
1502 | static bool start_hv_timer(struct kvm_lapic *apic) | 1504 | static bool start_hv_timer(struct kvm_lapic *apic) |
@@ -1934,7 +1936,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) | |||
1934 | for (i = 0; i < KVM_APIC_LVT_NUM; i++) | 1936 | for (i = 0; i < KVM_APIC_LVT_NUM; i++) |
1935 | kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); | 1937 | kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); |
1936 | apic_update_lvtt(apic); | 1938 | apic_update_lvtt(apic); |
1937 | if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED)) | 1939 | if (kvm_vcpu_is_reset_bsp(vcpu) && |
1940 | kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED)) | ||
1938 | kvm_lapic_set_reg(apic, APIC_LVT0, | 1941 | kvm_lapic_set_reg(apic, APIC_LVT0, |
1939 | SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); | 1942 | SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); |
1940 | apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0)); | 1943 | apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0)); |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 183ddb235fb4..ba9891ac5c56 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -1807,7 +1807,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, | |||
1807 | * AMD's VMCB does not have an explicit unusable field, so emulate it | 1807 | * AMD's VMCB does not have an explicit unusable field, so emulate it |
1808 | * for cross vendor migration purposes by "not present" | 1808 | * for cross vendor migration purposes by "not present" |
1809 | */ | 1809 | */ |
1810 | var->unusable = !var->present || (var->type == 0); | 1810 | var->unusable = !var->present; |
1811 | 1811 | ||
1812 | switch (seg) { | 1812 | switch (seg) { |
1813 | case VCPU_SREG_TR: | 1813 | case VCPU_SREG_TR: |
@@ -1840,6 +1840,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, | |||
1840 | */ | 1840 | */ |
1841 | if (var->unusable) | 1841 | if (var->unusable) |
1842 | var->db = 0; | 1842 | var->db = 0; |
1843 | /* This is symmetric with svm_set_segment() */ | ||
1843 | var->dpl = to_svm(vcpu)->vmcb->save.cpl; | 1844 | var->dpl = to_svm(vcpu)->vmcb->save.cpl; |
1844 | break; | 1845 | break; |
1845 | } | 1846 | } |
@@ -1980,18 +1981,14 @@ static void svm_set_segment(struct kvm_vcpu *vcpu, | |||
1980 | s->base = var->base; | 1981 | s->base = var->base; |
1981 | s->limit = var->limit; | 1982 | s->limit = var->limit; |
1982 | s->selector = var->selector; | 1983 | s->selector = var->selector; |
1983 | if (var->unusable) | 1984 | s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); |
1984 | s->attrib = 0; | 1985 | s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; |
1985 | else { | 1986 | s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; |
1986 | s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); | 1987 | s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT; |
1987 | s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; | 1988 | s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; |
1988 | s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; | 1989 | s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; |
1989 | s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT; | 1990 | s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; |
1990 | s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; | 1991 | s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; |
1991 | s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; | ||
1992 | s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; | ||
1993 | s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; | ||
1994 | } | ||
1995 | 1992 | ||
1996 | /* | 1993 | /* |
1997 | * This is always accurate, except if SYSRET returned to a segment | 1994 | * This is always accurate, except if SYSRET returned to a segment |
@@ -2000,7 +1997,8 @@ static void svm_set_segment(struct kvm_vcpu *vcpu, | |||
2000 | * would entail passing the CPL to userspace and back. | 1997 | * would entail passing the CPL to userspace and back. |
2001 | */ | 1998 | */ |
2002 | if (seg == VCPU_SREG_SS) | 1999 | if (seg == VCPU_SREG_SS) |
2003 | svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; | 2000 | /* This is symmetric with svm_get_segment() */ |
2001 | svm->vmcb->save.cpl = (var->dpl & 3); | ||
2004 | 2002 | ||
2005 | mark_dirty(svm->vmcb, VMCB_SEG); | 2003 | mark_dirty(svm->vmcb, VMCB_SEG); |
2006 | } | 2004 | } |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 72f78396bc09..9b4b5d6dcd34 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -6914,97 +6914,21 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, | |||
6914 | return 0; | 6914 | return 0; |
6915 | } | 6915 | } |
6916 | 6916 | ||
6917 | /* | 6917 | static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer) |
6918 | * This function performs the various checks including | ||
6919 | * - if it's 4KB aligned | ||
6920 | * - No bits beyond the physical address width are set | ||
6921 | * - Returns 0 on success or else 1 | ||
6922 | * (Intel SDM Section 30.3) | ||
6923 | */ | ||
6924 | static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, | ||
6925 | gpa_t *vmpointer) | ||
6926 | { | 6918 | { |
6927 | gva_t gva; | 6919 | gva_t gva; |
6928 | gpa_t vmptr; | ||
6929 | struct x86_exception e; | 6920 | struct x86_exception e; |
6930 | struct page *page; | ||
6931 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
6932 | int maxphyaddr = cpuid_maxphyaddr(vcpu); | ||
6933 | 6921 | ||
6934 | if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), | 6922 | if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), |
6935 | vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) | 6923 | vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) |
6936 | return 1; | 6924 | return 1; |
6937 | 6925 | ||
6938 | if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, | 6926 | if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer, |
6939 | sizeof(vmptr), &e)) { | 6927 | sizeof(*vmpointer), &e)) { |
6940 | kvm_inject_page_fault(vcpu, &e); | 6928 | kvm_inject_page_fault(vcpu, &e); |
6941 | return 1; | 6929 | return 1; |
6942 | } | 6930 | } |
6943 | 6931 | ||
6944 | switch (exit_reason) { | ||
6945 | case EXIT_REASON_VMON: | ||
6946 | /* | ||
6947 | * SDM 3: 24.11.5 | ||
6948 | * The first 4 bytes of VMXON region contain the supported | ||
6949 | * VMCS revision identifier | ||
6950 | * | ||
6951 | * Note - IA32_VMX_BASIC[48] will never be 1 | ||
6952 | * for the nested case; | ||
6953 | * which replaces physical address width with 32 | ||
6954 | * | ||
6955 | */ | ||
6956 | if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { | ||
6957 | nested_vmx_failInvalid(vcpu); | ||
6958 | return kvm_skip_emulated_instruction(vcpu); | ||
6959 | } | ||
6960 | |||
6961 | page = nested_get_page(vcpu, vmptr); | ||
6962 | if (page == NULL) { | ||
6963 | nested_vmx_failInvalid(vcpu); | ||
6964 | return kvm_skip_emulated_instruction(vcpu); | ||
6965 | } | ||
6966 | if (*(u32 *)kmap(page) != VMCS12_REVISION) { | ||
6967 | kunmap(page); | ||
6968 | nested_release_page_clean(page); | ||
6969 | nested_vmx_failInvalid(vcpu); | ||
6970 | return kvm_skip_emulated_instruction(vcpu); | ||
6971 | } | ||
6972 | kunmap(page); | ||
6973 | nested_release_page_clean(page); | ||
6974 | vmx->nested.vmxon_ptr = vmptr; | ||
6975 | break; | ||
6976 | case EXIT_REASON_VMCLEAR: | ||
6977 | if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { | ||
6978 | nested_vmx_failValid(vcpu, | ||
6979 | VMXERR_VMCLEAR_INVALID_ADDRESS); | ||
6980 | return kvm_skip_emulated_instruction(vcpu); | ||
6981 | } | ||
6982 | |||
6983 | if (vmptr == vmx->nested.vmxon_ptr) { | ||
6984 | nested_vmx_failValid(vcpu, | ||
6985 | VMXERR_VMCLEAR_VMXON_POINTER); | ||
6986 | return kvm_skip_emulated_instruction(vcpu); | ||
6987 | } | ||
6988 | break; | ||
6989 | case EXIT_REASON_VMPTRLD: | ||
6990 | if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { | ||
6991 | nested_vmx_failValid(vcpu, | ||
6992 | VMXERR_VMPTRLD_INVALID_ADDRESS); | ||
6993 | return kvm_skip_emulated_instruction(vcpu); | ||
6994 | } | ||
6995 | |||
6996 | if (vmptr == vmx->nested.vmxon_ptr) { | ||
6997 | nested_vmx_failValid(vcpu, | ||
6998 | VMXERR_VMPTRLD_VMXON_POINTER); | ||
6999 | return kvm_skip_emulated_instruction(vcpu); | ||
7000 | } | ||
7001 | break; | ||
7002 | default: | ||
7003 | return 1; /* shouldn't happen */ | ||
7004 | } | ||
7005 | |||
7006 | if (vmpointer) | ||
7007 | *vmpointer = vmptr; | ||
7008 | return 0; | 6932 | return 0; |
7009 | } | 6933 | } |
7010 | 6934 | ||
@@ -7066,6 +6990,8 @@ out_msr_bitmap: | |||
7066 | static int handle_vmon(struct kvm_vcpu *vcpu) | 6990 | static int handle_vmon(struct kvm_vcpu *vcpu) |
7067 | { | 6991 | { |
7068 | int ret; | 6992 | int ret; |
6993 | gpa_t vmptr; | ||
6994 | struct page *page; | ||
7069 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 6995 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
7070 | const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED | 6996 | const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED |
7071 | | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; | 6997 | | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; |
@@ -7095,9 +7021,37 @@ static int handle_vmon(struct kvm_vcpu *vcpu) | |||
7095 | return 1; | 7021 | return 1; |
7096 | } | 7022 | } |
7097 | 7023 | ||
7098 | if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL)) | 7024 | if (nested_vmx_get_vmptr(vcpu, &vmptr)) |
7099 | return 1; | 7025 | return 1; |
7100 | 7026 | ||
7027 | /* | ||
7028 | * SDM 3: 24.11.5 | ||
7029 | * The first 4 bytes of VMXON region contain the supported | ||
7030 | * VMCS revision identifier | ||
7031 | * | ||
7032 | * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; | ||
7033 | * which replaces physical address width with 32 | ||
7034 | */ | ||
7035 | if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { | ||
7036 | nested_vmx_failInvalid(vcpu); | ||
7037 | return kvm_skip_emulated_instruction(vcpu); | ||
7038 | } | ||
7039 | |||
7040 | page = nested_get_page(vcpu, vmptr); | ||
7041 | if (page == NULL) { | ||
7042 | nested_vmx_failInvalid(vcpu); | ||
7043 | return kvm_skip_emulated_instruction(vcpu); | ||
7044 | } | ||
7045 | if (*(u32 *)kmap(page) != VMCS12_REVISION) { | ||
7046 | kunmap(page); | ||
7047 | nested_release_page_clean(page); | ||
7048 | nested_vmx_failInvalid(vcpu); | ||
7049 | return kvm_skip_emulated_instruction(vcpu); | ||
7050 | } | ||
7051 | kunmap(page); | ||
7052 | nested_release_page_clean(page); | ||
7053 | |||
7054 | vmx->nested.vmxon_ptr = vmptr; | ||
7101 | ret = enter_vmx_operation(vcpu); | 7055 | ret = enter_vmx_operation(vcpu); |
7102 | if (ret) | 7056 | if (ret) |
7103 | return ret; | 7057 | return ret; |
@@ -7213,9 +7167,19 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) | |||
7213 | if (!nested_vmx_check_permission(vcpu)) | 7167 | if (!nested_vmx_check_permission(vcpu)) |
7214 | return 1; | 7168 | return 1; |
7215 | 7169 | ||
7216 | if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr)) | 7170 | if (nested_vmx_get_vmptr(vcpu, &vmptr)) |
7217 | return 1; | 7171 | return 1; |
7218 | 7172 | ||
7173 | if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { | ||
7174 | nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS); | ||
7175 | return kvm_skip_emulated_instruction(vcpu); | ||
7176 | } | ||
7177 | |||
7178 | if (vmptr == vmx->nested.vmxon_ptr) { | ||
7179 | nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); | ||
7180 | return kvm_skip_emulated_instruction(vcpu); | ||
7181 | } | ||
7182 | |||
7219 | if (vmptr == vmx->nested.current_vmptr) | 7183 | if (vmptr == vmx->nested.current_vmptr) |
7220 | nested_release_vmcs12(vmx); | 7184 | nested_release_vmcs12(vmx); |
7221 | 7185 | ||
@@ -7545,9 +7509,19 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) | |||
7545 | if (!nested_vmx_check_permission(vcpu)) | 7509 | if (!nested_vmx_check_permission(vcpu)) |
7546 | return 1; | 7510 | return 1; |
7547 | 7511 | ||
7548 | if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr)) | 7512 | if (nested_vmx_get_vmptr(vcpu, &vmptr)) |
7549 | return 1; | 7513 | return 1; |
7550 | 7514 | ||
7515 | if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { | ||
7516 | nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS); | ||
7517 | return kvm_skip_emulated_instruction(vcpu); | ||
7518 | } | ||
7519 | |||
7520 | if (vmptr == vmx->nested.vmxon_ptr) { | ||
7521 | nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER); | ||
7522 | return kvm_skip_emulated_instruction(vcpu); | ||
7523 | } | ||
7524 | |||
7551 | if (vmx->nested.current_vmptr != vmptr) { | 7525 | if (vmx->nested.current_vmptr != vmptr) { |
7552 | struct vmcs12 *new_vmcs12; | 7526 | struct vmcs12 *new_vmcs12; |
7553 | struct page *page; | 7527 | struct page *page; |
@@ -7913,11 +7887,13 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, | |||
7913 | { | 7887 | { |
7914 | unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | 7888 | unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
7915 | int cr = exit_qualification & 15; | 7889 | int cr = exit_qualification & 15; |
7916 | int reg = (exit_qualification >> 8) & 15; | 7890 | int reg; |
7917 | unsigned long val = kvm_register_readl(vcpu, reg); | 7891 | unsigned long val; |
7918 | 7892 | ||
7919 | switch ((exit_qualification >> 4) & 3) { | 7893 | switch ((exit_qualification >> 4) & 3) { |
7920 | case 0: /* mov to cr */ | 7894 | case 0: /* mov to cr */ |
7895 | reg = (exit_qualification >> 8) & 15; | ||
7896 | val = kvm_register_readl(vcpu, reg); | ||
7921 | switch (cr) { | 7897 | switch (cr) { |
7922 | case 0: | 7898 | case 0: |
7923 | if (vmcs12->cr0_guest_host_mask & | 7899 | if (vmcs12->cr0_guest_host_mask & |
@@ -7972,6 +7948,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, | |||
7972 | * lmsw can change bits 1..3 of cr0, and only set bit 0 of | 7948 | * lmsw can change bits 1..3 of cr0, and only set bit 0 of |
7973 | * cr0. Other attempted changes are ignored, with no exit. | 7949 | * cr0. Other attempted changes are ignored, with no exit. |
7974 | */ | 7950 | */ |
7951 | val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; | ||
7975 | if (vmcs12->cr0_guest_host_mask & 0xe & | 7952 | if (vmcs12->cr0_guest_host_mask & 0xe & |
7976 | (val ^ vmcs12->cr0_read_shadow)) | 7953 | (val ^ vmcs12->cr0_read_shadow)) |
7977 | return true; | 7954 | return true; |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 02363e37d4a6..a2cd0997343c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -8394,10 +8394,13 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) | |||
8394 | if (vcpu->arch.pv.pv_unhalted) | 8394 | if (vcpu->arch.pv.pv_unhalted) |
8395 | return true; | 8395 | return true; |
8396 | 8396 | ||
8397 | if (atomic_read(&vcpu->arch.nmi_queued)) | 8397 | if (kvm_test_request(KVM_REQ_NMI, vcpu) || |
8398 | (vcpu->arch.nmi_pending && | ||
8399 | kvm_x86_ops->nmi_allowed(vcpu))) | ||
8398 | return true; | 8400 | return true; |
8399 | 8401 | ||
8400 | if (kvm_test_request(KVM_REQ_SMI, vcpu)) | 8402 | if (kvm_test_request(KVM_REQ_SMI, vcpu) || |
8403 | (vcpu->arch.smi_pending && !is_smm(vcpu))) | ||
8401 | return true; | 8404 | return true; |
8402 | 8405 | ||
8403 | if (kvm_arch_interrupt_allowed(vcpu) && | 8406 | if (kvm_arch_interrupt_allowed(vcpu) && |