diff options
| -rw-r--r-- | Documentation/virtual/kvm/api.txt | 2 | ||||
| -rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 1 | ||||
| -rw-r--r-- | arch/arm64/include/asm/sysreg.h | 11 | ||||
| -rw-r--r-- | arch/arm64/kvm/fpsimd.c | 36 | ||||
| -rw-r--r-- | arch/x86/include/asm/vmx.h | 3 | ||||
| -rw-r--r-- | arch/x86/kvm/vmx.c | 67 | ||||
| -rw-r--r-- | arch/x86/kvm/x86.h | 9 | ||||
| -rw-r--r-- | virt/kvm/Kconfig | 2 | ||||
| -rw-r--r-- | virt/kvm/arm/mmu.c | 2 | ||||
| -rw-r--r-- | virt/kvm/arm/vgic/vgic-v3.c | 5 | ||||
| -rw-r--r-- | virt/kvm/kvm_main.c | 19 |
11 files changed, 131 insertions, 26 deletions
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 495b7742ab58..d10944e619d3 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt | |||
| @@ -4610,7 +4610,7 @@ This capability indicates that kvm will implement the interfaces to handle | |||
| 4610 | reset, migration and nested KVM for branch prediction blocking. The stfle | 4610 | reset, migration and nested KVM for branch prediction blocking. The stfle |
| 4611 | facility 82 should not be provided to the guest without this capability. | 4611 | facility 82 should not be provided to the guest without this capability. |
| 4612 | 4612 | ||
| 4613 | 8.14 KVM_CAP_HYPERV_TLBFLUSH | 4613 | 8.18 KVM_CAP_HYPERV_TLBFLUSH |
| 4614 | 4614 | ||
| 4615 | Architectures: x86 | 4615 | Architectures: x86 |
| 4616 | 4616 | ||
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index fda9a8ca48be..fe8777b12f86 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
| @@ -306,6 +306,7 @@ struct kvm_vcpu_arch { | |||
| 306 | #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */ | 306 | #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */ |
| 307 | #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */ | 307 | #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */ |
| 308 | #define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */ | 308 | #define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */ |
| 309 | #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */ | ||
| 309 | 310 | ||
| 310 | #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) | 311 | #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) |
| 311 | 312 | ||
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 6171178075dc..a8f84812c6e8 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h | |||
| @@ -728,6 +728,17 @@ asm( | |||
| 728 | asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \ | 728 | asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \ |
| 729 | } while (0) | 729 | } while (0) |
| 730 | 730 | ||
| 731 | /* | ||
| 732 | * Modify bits in a sysreg. Bits in the clear mask are zeroed, then bits in the | ||
| 733 | * set mask are set. Other bits are left as-is. | ||
| 734 | */ | ||
| 735 | #define sysreg_clear_set(sysreg, clear, set) do { \ | ||
| 736 | u64 __scs_val = read_sysreg(sysreg); \ | ||
| 737 | u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \ | ||
| 738 | if (__scs_new != __scs_val) \ | ||
| 739 | write_sysreg(__scs_new, sysreg); \ | ||
| 740 | } while (0) | ||
| 741 | |||
| 731 | static inline void config_sctlr_el1(u32 clear, u32 set) | 742 | static inline void config_sctlr_el1(u32 clear, u32 set) |
| 732 | { | 743 | { |
| 733 | u32 val; | 744 | u32 val; |
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index dc6ecfa5a2d2..aac7808ce216 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c | |||
| @@ -5,13 +5,14 @@ | |||
| 5 | * Copyright 2018 Arm Limited | 5 | * Copyright 2018 Arm Limited |
| 6 | * Author: Dave Martin <Dave.Martin@arm.com> | 6 | * Author: Dave Martin <Dave.Martin@arm.com> |
| 7 | */ | 7 | */ |
| 8 | #include <linux/bottom_half.h> | 8 | #include <linux/irqflags.h> |
| 9 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
| 10 | #include <linux/thread_info.h> | 10 | #include <linux/thread_info.h> |
| 11 | #include <linux/kvm_host.h> | 11 | #include <linux/kvm_host.h> |
| 12 | #include <asm/kvm_asm.h> | 12 | #include <asm/kvm_asm.h> |
| 13 | #include <asm/kvm_host.h> | 13 | #include <asm/kvm_host.h> |
| 14 | #include <asm/kvm_mmu.h> | 14 | #include <asm/kvm_mmu.h> |
| 15 | #include <asm/sysreg.h> | ||
| 15 | 16 | ||
| 16 | /* | 17 | /* |
| 17 | * Called on entry to KVM_RUN unless this vcpu previously ran at least | 18 | * Called on entry to KVM_RUN unless this vcpu previously ran at least |
| @@ -61,10 +62,16 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) | |||
| 61 | { | 62 | { |
| 62 | BUG_ON(!current->mm); | 63 | BUG_ON(!current->mm); |
| 63 | 64 | ||
| 64 | vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | KVM_ARM64_HOST_SVE_IN_USE); | 65 | vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | |
| 66 | KVM_ARM64_HOST_SVE_IN_USE | | ||
| 67 | KVM_ARM64_HOST_SVE_ENABLED); | ||
| 65 | vcpu->arch.flags |= KVM_ARM64_FP_HOST; | 68 | vcpu->arch.flags |= KVM_ARM64_FP_HOST; |
| 69 | |||
| 66 | if (test_thread_flag(TIF_SVE)) | 70 | if (test_thread_flag(TIF_SVE)) |
| 67 | vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE; | 71 | vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE; |
| 72 | |||
| 73 | if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN) | ||
| 74 | vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED; | ||
| 68 | } | 75 | } |
| 69 | 76 | ||
| 70 | /* | 77 | /* |
| @@ -92,19 +99,30 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) | |||
| 92 | */ | 99 | */ |
| 93 | void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) | 100 | void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) |
| 94 | { | 101 | { |
| 95 | local_bh_disable(); | 102 | unsigned long flags; |
| 96 | 103 | ||
| 97 | update_thread_flag(TIF_SVE, | 104 | local_irq_save(flags); |
| 98 | vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE); | ||
| 99 | 105 | ||
| 100 | if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { | 106 | if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { |
| 101 | /* Clean guest FP state to memory and invalidate cpu view */ | 107 | /* Clean guest FP state to memory and invalidate cpu view */ |
| 102 | fpsimd_save(); | 108 | fpsimd_save(); |
| 103 | fpsimd_flush_cpu_state(); | 109 | fpsimd_flush_cpu_state(); |
| 104 | } else if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { | 110 | } else if (system_supports_sve()) { |
| 105 | /* Ensure user trap controls are correctly restored */ | 111 | /* |
| 106 | fpsimd_bind_task_to_cpu(); | 112 | * The FPSIMD/SVE state in the CPU has not been touched, and we |
| 113 | * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been | ||
| 114 | * reset to CPACR_EL1_DEFAULT by the Hyp code, disabling SVE | ||
| 115 | * for EL0. To avoid spurious traps, restore the trap state | ||
| 116 | * seen by kvm_arch_vcpu_load_fp(): | ||
| 117 | */ | ||
| 118 | if (vcpu->arch.flags & KVM_ARM64_HOST_SVE_ENABLED) | ||
| 119 | sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN); | ||
| 120 | else | ||
| 121 | sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0); | ||
| 107 | } | 122 | } |
| 108 | 123 | ||
| 109 | local_bh_enable(); | 124 | update_thread_flag(TIF_SVE, |
| 125 | vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE); | ||
| 126 | |||
| 127 | local_irq_restore(flags); | ||
| 110 | } | 128 | } |
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 425e6b8b9547..6aa8499e1f62 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h | |||
| @@ -114,6 +114,7 @@ | |||
| 114 | #define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f | 114 | #define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f |
| 115 | #define VMX_MISC_SAVE_EFER_LMA 0x00000020 | 115 | #define VMX_MISC_SAVE_EFER_LMA 0x00000020 |
| 116 | #define VMX_MISC_ACTIVITY_HLT 0x00000040 | 116 | #define VMX_MISC_ACTIVITY_HLT 0x00000040 |
| 117 | #define VMX_MISC_ZERO_LEN_INS 0x40000000 | ||
| 117 | 118 | ||
| 118 | /* VMFUNC functions */ | 119 | /* VMFUNC functions */ |
| 119 | #define VMX_VMFUNC_EPTP_SWITCHING 0x00000001 | 120 | #define VMX_VMFUNC_EPTP_SWITCHING 0x00000001 |
| @@ -351,11 +352,13 @@ enum vmcs_field { | |||
| 351 | #define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK | 352 | #define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK |
| 352 | 353 | ||
| 353 | #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */ | 354 | #define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */ |
| 355 | #define INTR_TYPE_RESERVED (1 << 8) /* reserved */ | ||
| 354 | #define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ | 356 | #define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ |
| 355 | #define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */ | 357 | #define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */ |
| 356 | #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ | 358 | #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ |
| 357 | #define INTR_TYPE_PRIV_SW_EXCEPTION (5 << 8) /* ICE breakpoint - undocumented */ | 359 | #define INTR_TYPE_PRIV_SW_EXCEPTION (5 << 8) /* ICE breakpoint - undocumented */ |
| 358 | #define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */ | 360 | #define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */ |
| 361 | #define INTR_TYPE_OTHER_EVENT (7 << 8) /* other event */ | ||
| 359 | 362 | ||
| 360 | /* GUEST_INTERRUPTIBILITY_INFO flags. */ | 363 | /* GUEST_INTERRUPTIBILITY_INFO flags. */ |
| 361 | #define GUEST_INTR_STATE_STI 0x00000001 | 364 | #define GUEST_INTR_STATE_STI 0x00000001 |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 559a12b6184d..1689f433f3a0 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -1705,6 +1705,17 @@ static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu) | |||
| 1705 | MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS; | 1705 | MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS; |
| 1706 | } | 1706 | } |
| 1707 | 1707 | ||
| 1708 | static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu) | ||
| 1709 | { | ||
| 1710 | return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS; | ||
| 1711 | } | ||
| 1712 | |||
| 1713 | static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu) | ||
| 1714 | { | ||
| 1715 | return to_vmx(vcpu)->nested.msrs.procbased_ctls_high & | ||
| 1716 | CPU_BASED_MONITOR_TRAP_FLAG; | ||
| 1717 | } | ||
| 1718 | |||
| 1708 | static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit) | 1719 | static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit) |
| 1709 | { | 1720 | { |
| 1710 | return vmcs12->cpu_based_vm_exec_control & bit; | 1721 | return vmcs12->cpu_based_vm_exec_control & bit; |
| @@ -11620,6 +11631,62 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) | |||
| 11620 | !nested_cr3_valid(vcpu, vmcs12->host_cr3)) | 11631 | !nested_cr3_valid(vcpu, vmcs12->host_cr3)) |
| 11621 | return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD; | 11632 | return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD; |
| 11622 | 11633 | ||
| 11634 | /* | ||
| 11635 | * From the Intel SDM, volume 3: | ||
| 11636 | * Fields relevant to VM-entry event injection must be set properly. | ||
| 11637 | * These fields are the VM-entry interruption-information field, the | ||
| 11638 | * VM-entry exception error code, and the VM-entry instruction length. | ||
| 11639 | */ | ||
| 11640 | if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) { | ||
| 11641 | u32 intr_info = vmcs12->vm_entry_intr_info_field; | ||
| 11642 | u8 vector = intr_info & INTR_INFO_VECTOR_MASK; | ||
| 11643 | u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK; | ||
| 11644 | bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK; | ||
| 11645 | bool should_have_error_code; | ||
| 11646 | bool urg = nested_cpu_has2(vmcs12, | ||
| 11647 | SECONDARY_EXEC_UNRESTRICTED_GUEST); | ||
| 11648 | bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE; | ||
| 11649 | |||
| 11650 | /* VM-entry interruption-info field: interruption type */ | ||
| 11651 | if (intr_type == INTR_TYPE_RESERVED || | ||
| 11652 | (intr_type == INTR_TYPE_OTHER_EVENT && | ||
| 11653 | !nested_cpu_supports_monitor_trap_flag(vcpu))) | ||
| 11654 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; | ||
| 11655 | |||
| 11656 | /* VM-entry interruption-info field: vector */ | ||
| 11657 | if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) || | ||
| 11658 | (intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) || | ||
| 11659 | (intr_type == INTR_TYPE_OTHER_EVENT && vector != 0)) | ||
| 11660 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; | ||
| 11661 | |||
| 11662 | /* VM-entry interruption-info field: deliver error code */ | ||
| 11663 | should_have_error_code = | ||
| 11664 | intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode && | ||
| 11665 | x86_exception_has_error_code(vector); | ||
| 11666 | if (has_error_code != should_have_error_code) | ||
| 11667 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; | ||
| 11668 | |||
| 11669 | /* VM-entry exception error code */ | ||
| 11670 | if (has_error_code && | ||
| 11671 | vmcs12->vm_entry_exception_error_code & GENMASK(31, 15)) | ||
| 11672 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; | ||
| 11673 | |||
| 11674 | /* VM-entry interruption-info field: reserved bits */ | ||
| 11675 | if (intr_info & INTR_INFO_RESVD_BITS_MASK) | ||
| 11676 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; | ||
| 11677 | |||
| 11678 | /* VM-entry instruction length */ | ||
| 11679 | switch (intr_type) { | ||
| 11680 | case INTR_TYPE_SOFT_EXCEPTION: | ||
| 11681 | case INTR_TYPE_SOFT_INTR: | ||
| 11682 | case INTR_TYPE_PRIV_SW_EXCEPTION: | ||
| 11683 | if ((vmcs12->vm_entry_instruction_len > 15) || | ||
| 11684 | (vmcs12->vm_entry_instruction_len == 0 && | ||
| 11685 | !nested_cpu_has_zero_length_injection(vcpu))) | ||
| 11686 | return VMXERR_ENTRY_INVALID_CONTROL_FIELD; | ||
| 11687 | } | ||
| 11688 | } | ||
| 11689 | |||
| 11623 | return 0; | 11690 | return 0; |
| 11624 | } | 11691 | } |
| 11625 | 11692 | ||
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 331993c49dae..257f27620bc2 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h | |||
| @@ -110,6 +110,15 @@ static inline bool is_la57_mode(struct kvm_vcpu *vcpu) | |||
| 110 | #endif | 110 | #endif |
| 111 | } | 111 | } |
| 112 | 112 | ||
| 113 | static inline bool x86_exception_has_error_code(unsigned int vector) | ||
| 114 | { | ||
| 115 | static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) | | ||
| 116 | BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) | | ||
| 117 | BIT(PF_VECTOR) | BIT(AC_VECTOR); | ||
| 118 | |||
| 119 | return (1U << vector) & exception_has_error_code; | ||
| 120 | } | ||
| 121 | |||
| 113 | static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) | 122 | static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) |
| 114 | { | 123 | { |
| 115 | return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; | 124 | return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; |
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig index 72143cfaf6ec..ea434ddc8499 100644 --- a/virt/kvm/Kconfig +++ b/virt/kvm/Kconfig | |||
| @@ -47,7 +47,7 @@ config KVM_GENERIC_DIRTYLOG_READ_PROTECT | |||
| 47 | 47 | ||
| 48 | config KVM_COMPAT | 48 | config KVM_COMPAT |
| 49 | def_bool y | 49 | def_bool y |
| 50 | depends on KVM && COMPAT && !S390 | 50 | depends on KVM && COMPAT && !(S390 || ARM64) |
| 51 | 51 | ||
| 52 | config HAVE_KVM_IRQ_BYPASS | 52 | config HAVE_KVM_IRQ_BYPASS |
| 53 | bool | 53 | bool |
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index 8d90de213ce9..1d90d79706bd 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c | |||
| @@ -297,6 +297,8 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) | |||
| 297 | phys_addr_t next; | 297 | phys_addr_t next; |
| 298 | 298 | ||
| 299 | assert_spin_locked(&kvm->mmu_lock); | 299 | assert_spin_locked(&kvm->mmu_lock); |
| 300 | WARN_ON(size & ~PAGE_MASK); | ||
| 301 | |||
| 300 | pgd = kvm->arch.pgd + stage2_pgd_index(addr); | 302 | pgd = kvm->arch.pgd + stage2_pgd_index(addr); |
| 301 | do { | 303 | do { |
| 302 | /* | 304 | /* |
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index ff7dc890941a..cdce653e3c47 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c | |||
| @@ -617,11 +617,6 @@ int vgic_v3_probe(const struct gic_kvm_info *info) | |||
| 617 | pr_warn("GICV physical address 0x%llx not page aligned\n", | 617 | pr_warn("GICV physical address 0x%llx not page aligned\n", |
| 618 | (unsigned long long)info->vcpu.start); | 618 | (unsigned long long)info->vcpu.start); |
| 619 | kvm_vgic_global_state.vcpu_base = 0; | 619 | kvm_vgic_global_state.vcpu_base = 0; |
| 620 | } else if (!PAGE_ALIGNED(resource_size(&info->vcpu))) { | ||
| 621 | pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n", | ||
| 622 | (unsigned long long)resource_size(&info->vcpu), | ||
| 623 | PAGE_SIZE); | ||
| 624 | kvm_vgic_global_state.vcpu_base = 0; | ||
| 625 | } else { | 620 | } else { |
| 626 | kvm_vgic_global_state.vcpu_base = info->vcpu.start; | 621 | kvm_vgic_global_state.vcpu_base = info->vcpu.start; |
| 627 | kvm_vgic_global_state.can_emulate_gicv2 = true; | 622 | kvm_vgic_global_state.can_emulate_gicv2 = true; |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index ada21f47f22b..8b47507faab5 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
| @@ -116,6 +116,11 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, | |||
| 116 | #ifdef CONFIG_KVM_COMPAT | 116 | #ifdef CONFIG_KVM_COMPAT |
| 117 | static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, | 117 | static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl, |
| 118 | unsigned long arg); | 118 | unsigned long arg); |
| 119 | #define KVM_COMPAT(c) .compat_ioctl = (c) | ||
| 120 | #else | ||
| 121 | static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl, | ||
| 122 | unsigned long arg) { return -EINVAL; } | ||
| 123 | #define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl | ||
| 119 | #endif | 124 | #endif |
| 120 | static int hardware_enable_all(void); | 125 | static int hardware_enable_all(void); |
| 121 | static void hardware_disable_all(void); | 126 | static void hardware_disable_all(void); |
| @@ -2396,11 +2401,9 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp) | |||
| 2396 | static struct file_operations kvm_vcpu_fops = { | 2401 | static struct file_operations kvm_vcpu_fops = { |
| 2397 | .release = kvm_vcpu_release, | 2402 | .release = kvm_vcpu_release, |
| 2398 | .unlocked_ioctl = kvm_vcpu_ioctl, | 2403 | .unlocked_ioctl = kvm_vcpu_ioctl, |
| 2399 | #ifdef CONFIG_KVM_COMPAT | ||
| 2400 | .compat_ioctl = kvm_vcpu_compat_ioctl, | ||
| 2401 | #endif | ||
| 2402 | .mmap = kvm_vcpu_mmap, | 2404 | .mmap = kvm_vcpu_mmap, |
| 2403 | .llseek = noop_llseek, | 2405 | .llseek = noop_llseek, |
| 2406 | KVM_COMPAT(kvm_vcpu_compat_ioctl), | ||
| 2404 | }; | 2407 | }; |
| 2405 | 2408 | ||
| 2406 | /* | 2409 | /* |
| @@ -2824,10 +2827,8 @@ static int kvm_device_release(struct inode *inode, struct file *filp) | |||
| 2824 | 2827 | ||
| 2825 | static const struct file_operations kvm_device_fops = { | 2828 | static const struct file_operations kvm_device_fops = { |
| 2826 | .unlocked_ioctl = kvm_device_ioctl, | 2829 | .unlocked_ioctl = kvm_device_ioctl, |
| 2827 | #ifdef CONFIG_KVM_COMPAT | ||
| 2828 | .compat_ioctl = kvm_device_ioctl, | ||
| 2829 | #endif | ||
| 2830 | .release = kvm_device_release, | 2830 | .release = kvm_device_release, |
| 2831 | KVM_COMPAT(kvm_device_ioctl), | ||
| 2831 | }; | 2832 | }; |
| 2832 | 2833 | ||
| 2833 | struct kvm_device *kvm_device_from_filp(struct file *filp) | 2834 | struct kvm_device *kvm_device_from_filp(struct file *filp) |
| @@ -3165,10 +3166,8 @@ static long kvm_vm_compat_ioctl(struct file *filp, | |||
| 3165 | static struct file_operations kvm_vm_fops = { | 3166 | static struct file_operations kvm_vm_fops = { |
| 3166 | .release = kvm_vm_release, | 3167 | .release = kvm_vm_release, |
| 3167 | .unlocked_ioctl = kvm_vm_ioctl, | 3168 | .unlocked_ioctl = kvm_vm_ioctl, |
| 3168 | #ifdef CONFIG_KVM_COMPAT | ||
| 3169 | .compat_ioctl = kvm_vm_compat_ioctl, | ||
| 3170 | #endif | ||
| 3171 | .llseek = noop_llseek, | 3169 | .llseek = noop_llseek, |
| 3170 | KVM_COMPAT(kvm_vm_compat_ioctl), | ||
| 3172 | }; | 3171 | }; |
| 3173 | 3172 | ||
| 3174 | static int kvm_dev_ioctl_create_vm(unsigned long type) | 3173 | static int kvm_dev_ioctl_create_vm(unsigned long type) |
| @@ -3259,8 +3258,8 @@ out: | |||
| 3259 | 3258 | ||
| 3260 | static struct file_operations kvm_chardev_ops = { | 3259 | static struct file_operations kvm_chardev_ops = { |
| 3261 | .unlocked_ioctl = kvm_dev_ioctl, | 3260 | .unlocked_ioctl = kvm_dev_ioctl, |
| 3262 | .compat_ioctl = kvm_dev_ioctl, | ||
| 3263 | .llseek = noop_llseek, | 3261 | .llseek = noop_llseek, |
| 3262 | KVM_COMPAT(kvm_dev_ioctl), | ||
| 3264 | }; | 3263 | }; |
| 3265 | 3264 | ||
| 3266 | static struct miscdevice kvm_dev = { | 3265 | static struct miscdevice kvm_dev = { |
