diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2019-05-15 17:41:43 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2019-05-15 17:41:43 -0400 |
commit | dd53f6102c30a774e0db8e55d49017a38060f6f6 (patch) | |
tree | 82ac5f5dcd56225c70516d82a1612439e8d73669 | |
parent | 59c5c58c5b93285753d5c1de34d2e00039c27bc0 (diff) | |
parent | 9eecfc22e0bfc7a4c8ca007f083f0ae492d6e891 (diff) |
Merge tag 'kvmarm-for-v5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm updates for 5.2
- guest SVE support
- guest Pointer Authentication support
- Better discrimination of perf counters between host and guests
Conflicts:
include/uapi/linux/kvm.h
31 files changed, 1914 insertions, 181 deletions
diff --git a/Documentation/arm64/perf.txt b/Documentation/arm64/perf.txt new file mode 100644 index 000000000000..0d6a7d87d49e --- /dev/null +++ b/Documentation/arm64/perf.txt | |||
@@ -0,0 +1,85 @@ | |||
1 | Perf Event Attributes | ||
2 | ===================== | ||
3 | |||
4 | Author: Andrew Murray <andrew.murray@arm.com> | ||
5 | Date: 2019-03-06 | ||
6 | |||
7 | exclude_user | ||
8 | ------------ | ||
9 | |||
10 | This attribute excludes userspace. | ||
11 | |||
12 | Userspace always runs at EL0 and thus this attribute will exclude EL0. | ||
13 | |||
14 | |||
15 | exclude_kernel | ||
16 | -------------- | ||
17 | |||
18 | This attribute excludes the kernel. | ||
19 | |||
20 | The kernel runs at EL2 with VHE and EL1 without. Guest kernels always run | ||
21 | at EL1. | ||
22 | |||
23 | For the host this attribute will exclude EL1 and additionally EL2 on a VHE | ||
24 | system. | ||
25 | |||
26 | For the guest this attribute will exclude EL1. Please note that EL2 is | ||
27 | never counted within a guest. | ||
28 | |||
29 | |||
30 | exclude_hv | ||
31 | ---------- | ||
32 | |||
33 | This attribute excludes the hypervisor. | ||
34 | |||
35 | For a VHE host this attribute is ignored as we consider the host kernel to | ||
36 | be the hypervisor. | ||
37 | |||
38 | For a non-VHE host this attribute will exclude EL2 as we consider the | ||
39 | hypervisor to be any code that runs at EL2 which is predominantly used for | ||
40 | guest/host transitions. | ||
41 | |||
42 | For the guest this attribute has no effect. Please note that EL2 is | ||
43 | never counted within a guest. | ||
44 | |||
45 | |||
46 | exclude_host / exclude_guest | ||
47 | ---------------------------- | ||
48 | |||
49 | These attributes exclude the KVM host and guest, respectively. | ||
50 | |||
51 | The KVM host may run at EL0 (userspace), EL1 (non-VHE kernel) and EL2 (VHE | ||
52 | kernel or non-VHE hypervisor). | ||
53 | |||
54 | The KVM guest may run at EL0 (userspace) and EL1 (kernel). | ||
55 | |||
56 | Due to the overlapping exception levels between host and guests we cannot | ||
57 | exclusively rely on the PMU's hardware exception filtering - therefore we | ||
58 | must enable/disable counting on the entry and exit to the guest. This is | ||
59 | performed differently on VHE and non-VHE systems. | ||
60 | |||
61 | For non-VHE systems we exclude EL2 for exclude_host - upon entering and | ||
62 | exiting the guest we disable/enable the event as appropriate based on the | ||
63 | exclude_host and exclude_guest attributes. | ||
64 | |||
65 | For VHE systems we exclude EL1 for exclude_guest and exclude both EL0,EL2 | ||
66 | for exclude_host. Upon entering and exiting the guest we modify the event | ||
67 | to include/exclude EL0 as appropriate based on the exclude_host and | ||
68 | exclude_guest attributes. | ||
69 | |||
70 | The statements above also apply when these attributes are used within a | ||
71 | non-VHE guest however please note that EL2 is never counted within a guest. | ||
72 | |||
73 | |||
74 | Accuracy | ||
75 | -------- | ||
76 | |||
77 | On non-VHE hosts we enable/disable counters on the entry/exit of host/guest | ||
78 | transition at EL2 - however there is a period of time between | ||
79 | enabling/disabling the counters and entering/exiting the guest. We are | ||
80 | able to eliminate counters counting host events on the boundaries of guest | ||
81 | entry/exit when counting guest events by filtering out EL2 for | ||
82 | exclude_host. However when using !exclude_hv there is a small blackout | ||
83 | window at the guest entry/exit where host events are not captured. | ||
84 | |||
85 | On VHE systems there are no blackout windows. | ||
diff --git a/Documentation/arm64/pointer-authentication.txt b/Documentation/arm64/pointer-authentication.txt index 5baca42ba146..fc71b33de87e 100644 --- a/Documentation/arm64/pointer-authentication.txt +++ b/Documentation/arm64/pointer-authentication.txt | |||
@@ -87,7 +87,21 @@ used to get and set the keys for a thread. | |||
87 | Virtualization | 87 | Virtualization |
88 | -------------- | 88 | -------------- |
89 | 89 | ||
90 | Pointer authentication is not currently supported in KVM guests. KVM | 90 | Pointer authentication is enabled in KVM guest when each virtual cpu is |
91 | will mask the feature bits from ID_AA64ISAR1_EL1, and attempted use of | 91 | initialised by passing flags KVM_ARM_VCPU_PTRAUTH_[ADDRESS/GENERIC] and |
92 | the feature will result in an UNDEFINED exception being injected into | 92 | requesting these two separate cpu features to be enabled. The current KVM |
93 | the guest. | 93 | guest implementation works by enabling both features together, so both |
94 | these userspace flags are checked before enabling pointer authentication. | ||
95 | The separate userspace flag will allow to have no userspace ABI changes | ||
96 | if support is added in the future to allow these two features to be | ||
97 | enabled independently of one another. | ||
98 | |||
99 | As Arm Architecture specifies that Pointer Authentication feature is | ||
100 | implemented along with the VHE feature so KVM arm64 ptrauth code relies | ||
101 | on VHE mode to be present. | ||
102 | |||
103 | Additionally, when these vcpu feature flags are not set then KVM will | ||
104 | filter out the Pointer Authentication system key registers from | ||
105 | KVM_GET/SET_REG_* ioctls and mask those features from cpufeature ID | ||
106 | register. Any attempt to use the Pointer Authentication instructions will | ||
107 | result in an UNDEFINED exception being injected into the guest. | ||
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 8ffd9beb931b..73a501eb9291 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt | |||
@@ -1883,6 +1883,12 @@ Architectures: all | |||
1883 | Type: vcpu ioctl | 1883 | Type: vcpu ioctl |
1884 | Parameters: struct kvm_one_reg (in) | 1884 | Parameters: struct kvm_one_reg (in) |
1885 | Returns: 0 on success, negative value on failure | 1885 | Returns: 0 on success, negative value on failure |
1886 | Errors: | ||
1887 | ENOENT: no such register | ||
1888 | EINVAL: invalid register ID, or no such register | ||
1889 | EPERM: (arm64) register access not allowed before vcpu finalization | ||
1890 | (These error codes are indicative only: do not rely on a specific error | ||
1891 | code being returned in a specific situation.) | ||
1886 | 1892 | ||
1887 | struct kvm_one_reg { | 1893 | struct kvm_one_reg { |
1888 | __u64 id; | 1894 | __u64 id; |
@@ -2120,6 +2126,37 @@ contains elements ranging from 32 to 128 bits. The index is a 32bit | |||
2120 | value in the kvm_regs structure seen as a 32bit array. | 2126 | value in the kvm_regs structure seen as a 32bit array. |
2121 | 0x60x0 0000 0010 <index into the kvm_regs struct:16> | 2127 | 0x60x0 0000 0010 <index into the kvm_regs struct:16> |
2122 | 2128 | ||
2129 | Specifically: | ||
2130 | Encoding Register Bits kvm_regs member | ||
2131 | ---------------------------------------------------------------- | ||
2132 | 0x6030 0000 0010 0000 X0 64 regs.regs[0] | ||
2133 | 0x6030 0000 0010 0002 X1 64 regs.regs[1] | ||
2134 | ... | ||
2135 | 0x6030 0000 0010 003c X30 64 regs.regs[30] | ||
2136 | 0x6030 0000 0010 003e SP 64 regs.sp | ||
2137 | 0x6030 0000 0010 0040 PC 64 regs.pc | ||
2138 | 0x6030 0000 0010 0042 PSTATE 64 regs.pstate | ||
2139 | 0x6030 0000 0010 0044 SP_EL1 64 sp_el1 | ||
2140 | 0x6030 0000 0010 0046 ELR_EL1 64 elr_el1 | ||
2141 | 0x6030 0000 0010 0048 SPSR_EL1 64 spsr[KVM_SPSR_EL1] (alias SPSR_SVC) | ||
2142 | 0x6030 0000 0010 004a SPSR_ABT 64 spsr[KVM_SPSR_ABT] | ||
2143 | 0x6030 0000 0010 004c SPSR_UND 64 spsr[KVM_SPSR_UND] | ||
2144 | 0x6030 0000 0010 004e SPSR_IRQ 64 spsr[KVM_SPSR_IRQ] | ||
2145 | 0x6060 0000 0010 0050 SPSR_FIQ 64 spsr[KVM_SPSR_FIQ] | ||
2146 | 0x6040 0000 0010 0054 V0 128 fp_regs.vregs[0] (*) | ||
2147 | 0x6040 0000 0010 0058 V1 128 fp_regs.vregs[1] (*) | ||
2148 | ... | ||
2149 | 0x6040 0000 0010 00d0 V31 128 fp_regs.vregs[31] (*) | ||
2150 | 0x6020 0000 0010 00d4 FPSR 32 fp_regs.fpsr | ||
2151 | 0x6020 0000 0010 00d5 FPCR 32 fp_regs.fpcr | ||
2152 | |||
2153 | (*) These encodings are not accepted for SVE-enabled vcpus. See | ||
2154 | KVM_ARM_VCPU_INIT. | ||
2155 | |||
2156 | The equivalent register content can be accessed via bits [127:0] of | ||
2157 | the corresponding SVE Zn registers instead for vcpus that have SVE | ||
2158 | enabled (see below). | ||
2159 | |||
2123 | arm64 CCSIDR registers are demultiplexed by CSSELR value: | 2160 | arm64 CCSIDR registers are demultiplexed by CSSELR value: |
2124 | 0x6020 0000 0011 00 <csselr:8> | 2161 | 0x6020 0000 0011 00 <csselr:8> |
2125 | 2162 | ||
@@ -2129,6 +2166,64 @@ arm64 system registers have the following id bit patterns: | |||
2129 | arm64 firmware pseudo-registers have the following bit pattern: | 2166 | arm64 firmware pseudo-registers have the following bit pattern: |
2130 | 0x6030 0000 0014 <regno:16> | 2167 | 0x6030 0000 0014 <regno:16> |
2131 | 2168 | ||
2169 | arm64 SVE registers have the following bit patterns: | ||
2170 | 0x6080 0000 0015 00 <n:5> <slice:5> Zn bits[2048*slice + 2047 : 2048*slice] | ||
2171 | 0x6050 0000 0015 04 <n:4> <slice:5> Pn bits[256*slice + 255 : 256*slice] | ||
2172 | 0x6050 0000 0015 060 <slice:5> FFR bits[256*slice + 255 : 256*slice] | ||
2173 | 0x6060 0000 0015 ffff KVM_REG_ARM64_SVE_VLS pseudo-register | ||
2174 | |||
2175 | Access to register IDs where 2048 * slice >= 128 * max_vq will fail with | ||
2176 | ENOENT. max_vq is the vcpu's maximum supported vector length in 128-bit | ||
2177 | quadwords: see (**) below. | ||
2178 | |||
2179 | These registers are only accessible on vcpus for which SVE is enabled. | ||
2180 | See KVM_ARM_VCPU_INIT for details. | ||
2181 | |||
2182 | In addition, except for KVM_REG_ARM64_SVE_VLS, these registers are not | ||
2183 | accessible until the vcpu's SVE configuration has been finalized | ||
2184 | using KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_SVE). See KVM_ARM_VCPU_INIT | ||
2185 | and KVM_ARM_VCPU_FINALIZE for more information about this procedure. | ||
2186 | |||
2187 | KVM_REG_ARM64_SVE_VLS is a pseudo-register that allows the set of vector | ||
2188 | lengths supported by the vcpu to be discovered and configured by | ||
2189 | userspace. When transferred to or from user memory via KVM_GET_ONE_REG | ||
2190 | or KVM_SET_ONE_REG, the value of this register is of type | ||
2191 | __u64[KVM_ARM64_SVE_VLS_WORDS], and encodes the set of vector lengths as | ||
2192 | follows: | ||
2193 | |||
2194 | __u64 vector_lengths[KVM_ARM64_SVE_VLS_WORDS]; | ||
2195 | |||
2196 | if (vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX && | ||
2197 | ((vector_lengths[(vq - KVM_ARM64_SVE_VQ_MIN) / 64] >> | ||
2198 | ((vq - KVM_ARM64_SVE_VQ_MIN) % 64)) & 1)) | ||
2199 | /* Vector length vq * 16 bytes supported */ | ||
2200 | else | ||
2201 | /* Vector length vq * 16 bytes not supported */ | ||
2202 | |||
2203 | (**) The maximum value vq for which the above condition is true is | ||
2204 | max_vq. This is the maximum vector length available to the guest on | ||
2205 | this vcpu, and determines which register slices are visible through | ||
2206 | this ioctl interface. | ||
2207 | |||
2208 | (See Documentation/arm64/sve.txt for an explanation of the "vq" | ||
2209 | nomenclature.) | ||
2210 | |||
2211 | KVM_REG_ARM64_SVE_VLS is only accessible after KVM_ARM_VCPU_INIT. | ||
2212 | KVM_ARM_VCPU_INIT initialises it to the best set of vector lengths that | ||
2213 | the host supports. | ||
2214 | |||
2215 | Userspace may subsequently modify it if desired until the vcpu's SVE | ||
2216 | configuration is finalized using KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_SVE). | ||
2217 | |||
2218 | Apart from simply removing all vector lengths from the host set that | ||
2219 | exceed some value, support for arbitrarily chosen sets of vector lengths | ||
2220 | is hardware-dependent and may not be available. Attempting to configure | ||
2221 | an invalid set of vector lengths via KVM_SET_ONE_REG will fail with | ||
2222 | EINVAL. | ||
2223 | |||
2224 | After the vcpu's SVE configuration is finalized, further attempts to | ||
2225 | write this register will fail with EPERM. | ||
2226 | |||
2132 | 2227 | ||
2133 | MIPS registers are mapped using the lower 32 bits. The upper 16 of that is | 2228 | MIPS registers are mapped using the lower 32 bits. The upper 16 of that is |
2134 | the register group type: | 2229 | the register group type: |
@@ -2181,6 +2276,12 @@ Architectures: all | |||
2181 | Type: vcpu ioctl | 2276 | Type: vcpu ioctl |
2182 | Parameters: struct kvm_one_reg (in and out) | 2277 | Parameters: struct kvm_one_reg (in and out) |
2183 | Returns: 0 on success, negative value on failure | 2278 | Returns: 0 on success, negative value on failure |
2279 | Errors include: | ||
2280 | ENOENT: no such register | ||
2281 | EINVAL: invalid register ID, or no such register | ||
2282 | EPERM: (arm64) register access not allowed before vcpu finalization | ||
2283 | (These error codes are indicative only: do not rely on a specific error | ||
2284 | code being returned in a specific situation.) | ||
2184 | 2285 | ||
2185 | This ioctl allows to receive the value of a single register implemented | 2286 | This ioctl allows to receive the value of a single register implemented |
2186 | in a vcpu. The register to read is indicated by the "id" field of the | 2287 | in a vcpu. The register to read is indicated by the "id" field of the |
@@ -2673,6 +2774,49 @@ Possible features: | |||
2673 | - KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU. | 2774 | - KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU. |
2674 | Depends on KVM_CAP_ARM_PMU_V3. | 2775 | Depends on KVM_CAP_ARM_PMU_V3. |
2675 | 2776 | ||
2777 | - KVM_ARM_VCPU_PTRAUTH_ADDRESS: Enables Address Pointer authentication | ||
2778 | for arm64 only. | ||
2779 | Depends on KVM_CAP_ARM_PTRAUTH_ADDRESS. | ||
2780 | If KVM_CAP_ARM_PTRAUTH_ADDRESS and KVM_CAP_ARM_PTRAUTH_GENERIC are | ||
2781 | both present, then both KVM_ARM_VCPU_PTRAUTH_ADDRESS and | ||
2782 | KVM_ARM_VCPU_PTRAUTH_GENERIC must be requested or neither must be | ||
2783 | requested. | ||
2784 | |||
2785 | - KVM_ARM_VCPU_PTRAUTH_GENERIC: Enables Generic Pointer authentication | ||
2786 | for arm64 only. | ||
2787 | Depends on KVM_CAP_ARM_PTRAUTH_GENERIC. | ||
2788 | If KVM_CAP_ARM_PTRAUTH_ADDRESS and KVM_CAP_ARM_PTRAUTH_GENERIC are | ||
2789 | both present, then both KVM_ARM_VCPU_PTRAUTH_ADDRESS and | ||
2790 | KVM_ARM_VCPU_PTRAUTH_GENERIC must be requested or neither must be | ||
2791 | requested. | ||
2792 | |||
2793 | - KVM_ARM_VCPU_SVE: Enables SVE for the CPU (arm64 only). | ||
2794 | Depends on KVM_CAP_ARM_SVE. | ||
2795 | Requires KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_SVE): | ||
2796 | |||
2797 | * After KVM_ARM_VCPU_INIT: | ||
2798 | |||
2799 | - KVM_REG_ARM64_SVE_VLS may be read using KVM_GET_ONE_REG: the | ||
2800 | initial value of this pseudo-register indicates the best set of | ||
2801 | vector lengths possible for a vcpu on this host. | ||
2802 | |||
2803 | * Before KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_SVE): | ||
2804 | |||
2805 | - KVM_RUN and KVM_GET_REG_LIST are not available; | ||
2806 | |||
2807 | - KVM_GET_ONE_REG and KVM_SET_ONE_REG cannot be used to access | ||
2808 | the scalable archietctural SVE registers | ||
2809 | KVM_REG_ARM64_SVE_ZREG(), KVM_REG_ARM64_SVE_PREG() or | ||
2810 | KVM_REG_ARM64_SVE_FFR; | ||
2811 | |||
2812 | - KVM_REG_ARM64_SVE_VLS may optionally be written using | ||
2813 | KVM_SET_ONE_REG, to modify the set of vector lengths available | ||
2814 | for the vcpu. | ||
2815 | |||
2816 | * After KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_SVE): | ||
2817 | |||
2818 | - the KVM_REG_ARM64_SVE_VLS pseudo-register is immutable, and can | ||
2819 | no longer be written using KVM_SET_ONE_REG. | ||
2676 | 2820 | ||
2677 | 4.83 KVM_ARM_PREFERRED_TARGET | 2821 | 4.83 KVM_ARM_PREFERRED_TARGET |
2678 | 2822 | ||
@@ -3887,6 +4031,40 @@ number of valid entries in the 'entries' array, which is then filled. | |||
3887 | 'index' and 'flags' fields in 'struct kvm_cpuid_entry2' are currently reserved, | 4031 | 'index' and 'flags' fields in 'struct kvm_cpuid_entry2' are currently reserved, |
3888 | userspace should not expect to get any particular value there. | 4032 | userspace should not expect to get any particular value there. |
3889 | 4033 | ||
4034 | 4.119 KVM_ARM_VCPU_FINALIZE | ||
4035 | |||
4036 | Architectures: arm, arm64 | ||
4037 | Type: vcpu ioctl | ||
4038 | Parameters: int feature (in) | ||
4039 | Returns: 0 on success, -1 on error | ||
4040 | Errors: | ||
4041 | EPERM: feature not enabled, needs configuration, or already finalized | ||
4042 | EINVAL: feature unknown or not present | ||
4043 | |||
4044 | Recognised values for feature: | ||
4045 | arm64 KVM_ARM_VCPU_SVE (requires KVM_CAP_ARM_SVE) | ||
4046 | |||
4047 | Finalizes the configuration of the specified vcpu feature. | ||
4048 | |||
4049 | The vcpu must already have been initialised, enabling the affected feature, by | ||
4050 | means of a successful KVM_ARM_VCPU_INIT call with the appropriate flag set in | ||
4051 | features[]. | ||
4052 | |||
4053 | For affected vcpu features, this is a mandatory step that must be performed | ||
4054 | before the vcpu is fully usable. | ||
4055 | |||
4056 | Between KVM_ARM_VCPU_INIT and KVM_ARM_VCPU_FINALIZE, the feature may be | ||
4057 | configured by use of ioctls such as KVM_SET_ONE_REG. The exact configuration | ||
4058 | that should be performaned and how to do it are feature-dependent. | ||
4059 | |||
4060 | Other calls that depend on a particular feature being finalized, such as | ||
4061 | KVM_RUN, KVM_GET_REG_LIST, KVM_GET_ONE_REG and KVM_SET_ONE_REG, will fail with | ||
4062 | -EPERM unless the feature has already been finalized by means of a | ||
4063 | KVM_ARM_VCPU_FINALIZE call. | ||
4064 | |||
4065 | See KVM_ARM_VCPU_INIT for details of vcpu features that require finalization | ||
4066 | using this ioctl. | ||
4067 | |||
3890 | 5. The kvm_run structure | 4068 | 5. The kvm_run structure |
3891 | ------------------------ | 4069 | ------------------------ |
3892 | 4070 | ||
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 8927cae7c966..efb0e2c0d84c 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h | |||
@@ -343,4 +343,6 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, | |||
343 | } | 343 | } |
344 | } | 344 | } |
345 | 345 | ||
346 | static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) {} | ||
347 | |||
346 | #endif /* __ARM_KVM_EMULATE_H__ */ | 348 | #endif /* __ARM_KVM_EMULATE_H__ */ |
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 770d73257ad9..075e1921fdd9 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
@@ -19,6 +19,7 @@ | |||
19 | #ifndef __ARM_KVM_HOST_H__ | 19 | #ifndef __ARM_KVM_HOST_H__ |
20 | #define __ARM_KVM_HOST_H__ | 20 | #define __ARM_KVM_HOST_H__ |
21 | 21 | ||
22 | #include <linux/errno.h> | ||
22 | #include <linux/types.h> | 23 | #include <linux/types.h> |
23 | #include <linux/kvm_types.h> | 24 | #include <linux/kvm_types.h> |
24 | #include <asm/cputype.h> | 25 | #include <asm/cputype.h> |
@@ -53,6 +54,8 @@ | |||
53 | 54 | ||
54 | DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); | 55 | DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); |
55 | 56 | ||
57 | static inline int kvm_arm_init_sve(void) { return 0; } | ||
58 | |||
56 | u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); | 59 | u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); |
57 | int __attribute_const__ kvm_target_cpu(void); | 60 | int __attribute_const__ kvm_target_cpu(void); |
58 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu); | 61 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu); |
@@ -150,9 +153,13 @@ struct kvm_cpu_context { | |||
150 | u32 cp15[NR_CP15_REGS]; | 153 | u32 cp15[NR_CP15_REGS]; |
151 | }; | 154 | }; |
152 | 155 | ||
153 | typedef struct kvm_cpu_context kvm_cpu_context_t; | 156 | struct kvm_host_data { |
157 | struct kvm_cpu_context host_ctxt; | ||
158 | }; | ||
159 | |||
160 | typedef struct kvm_host_data kvm_host_data_t; | ||
154 | 161 | ||
155 | static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt, | 162 | static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt, |
156 | int cpu) | 163 | int cpu) |
157 | { | 164 | { |
158 | /* The host's MPIDR is immutable, so let's set it up at boot time */ | 165 | /* The host's MPIDR is immutable, so let's set it up at boot time */ |
@@ -182,7 +189,7 @@ struct kvm_vcpu_arch { | |||
182 | struct kvm_vcpu_fault_info fault; | 189 | struct kvm_vcpu_fault_info fault; |
183 | 190 | ||
184 | /* Host FP context */ | 191 | /* Host FP context */ |
185 | kvm_cpu_context_t *host_cpu_context; | 192 | struct kvm_cpu_context *host_cpu_context; |
186 | 193 | ||
187 | /* VGIC state */ | 194 | /* VGIC state */ |
188 | struct vgic_cpu vgic_cpu; | 195 | struct vgic_cpu vgic_cpu; |
@@ -361,6 +368,9 @@ static inline void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) {} | |||
361 | static inline void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) {} | 368 | static inline void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) {} |
362 | static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {} | 369 | static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {} |
363 | 370 | ||
371 | static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} | ||
372 | static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} | ||
373 | |||
364 | static inline void kvm_arm_vhe_guest_enter(void) {} | 374 | static inline void kvm_arm_vhe_guest_enter(void) {} |
365 | static inline void kvm_arm_vhe_guest_exit(void) {} | 375 | static inline void kvm_arm_vhe_guest_exit(void) {} |
366 | 376 | ||
@@ -409,4 +419,14 @@ static inline int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type) | |||
409 | return 0; | 419 | return 0; |
410 | } | 420 | } |
411 | 421 | ||
422 | static inline int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature) | ||
423 | { | ||
424 | return -EINVAL; | ||
425 | } | ||
426 | |||
427 | static inline bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) | ||
428 | { | ||
429 | return true; | ||
430 | } | ||
431 | |||
412 | #endif /* __ARM_KVM_HOST_H__ */ | 432 | #endif /* __ARM_KVM_HOST_H__ */ |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 7e34b9eba5de..39470784a50c 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -1288,6 +1288,7 @@ menu "ARMv8.3 architectural features" | |||
1288 | config ARM64_PTR_AUTH | 1288 | config ARM64_PTR_AUTH |
1289 | bool "Enable support for pointer authentication" | 1289 | bool "Enable support for pointer authentication" |
1290 | default y | 1290 | default y |
1291 | depends on !KVM || ARM64_VHE | ||
1291 | help | 1292 | help |
1292 | Pointer authentication (part of the ARMv8.3 Extensions) provides | 1293 | Pointer authentication (part of the ARMv8.3 Extensions) provides |
1293 | instructions for signing and authenticating pointers against secret | 1294 | instructions for signing and authenticating pointers against secret |
@@ -1301,8 +1302,9 @@ config ARM64_PTR_AUTH | |||
1301 | context-switched along with the process. | 1302 | context-switched along with the process. |
1302 | 1303 | ||
1303 | The feature is detected at runtime. If the feature is not present in | 1304 | The feature is detected at runtime. If the feature is not present in |
1304 | hardware it will not be advertised to userspace nor will it be | 1305 | hardware it will not be advertised to userspace/KVM guest nor will it |
1305 | enabled. | 1306 | be enabled. However, KVM guest also require VHE mode and hence |
1307 | CONFIG_ARM64_VHE=y option to use this feature. | ||
1306 | 1308 | ||
1307 | endmenu | 1309 | endmenu |
1308 | 1310 | ||
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index dd1ad3950ef5..df62bbd33a9a 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h | |||
@@ -24,10 +24,13 @@ | |||
24 | 24 | ||
25 | #ifndef __ASSEMBLY__ | 25 | #ifndef __ASSEMBLY__ |
26 | 26 | ||
27 | #include <linux/bitmap.h> | ||
27 | #include <linux/build_bug.h> | 28 | #include <linux/build_bug.h> |
29 | #include <linux/bug.h> | ||
28 | #include <linux/cache.h> | 30 | #include <linux/cache.h> |
29 | #include <linux/init.h> | 31 | #include <linux/init.h> |
30 | #include <linux/stddef.h> | 32 | #include <linux/stddef.h> |
33 | #include <linux/types.h> | ||
31 | 34 | ||
32 | #if defined(__KERNEL__) && defined(CONFIG_COMPAT) | 35 | #if defined(__KERNEL__) && defined(CONFIG_COMPAT) |
33 | /* Masks for extracting the FPSR and FPCR from the FPSCR */ | 36 | /* Masks for extracting the FPSR and FPCR from the FPSCR */ |
@@ -56,7 +59,8 @@ extern void fpsimd_restore_current_state(void); | |||
56 | extern void fpsimd_update_current_state(struct user_fpsimd_state const *state); | 59 | extern void fpsimd_update_current_state(struct user_fpsimd_state const *state); |
57 | 60 | ||
58 | extern void fpsimd_bind_task_to_cpu(void); | 61 | extern void fpsimd_bind_task_to_cpu(void); |
59 | extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state); | 62 | extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state, |
63 | void *sve_state, unsigned int sve_vl); | ||
60 | 64 | ||
61 | extern void fpsimd_flush_task_state(struct task_struct *target); | 65 | extern void fpsimd_flush_task_state(struct task_struct *target); |
62 | extern void fpsimd_flush_cpu_state(void); | 66 | extern void fpsimd_flush_cpu_state(void); |
@@ -87,6 +91,29 @@ extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused); | |||
87 | extern u64 read_zcr_features(void); | 91 | extern u64 read_zcr_features(void); |
88 | 92 | ||
89 | extern int __ro_after_init sve_max_vl; | 93 | extern int __ro_after_init sve_max_vl; |
94 | extern int __ro_after_init sve_max_virtualisable_vl; | ||
95 | extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); | ||
96 | |||
97 | /* | ||
98 | * Helpers to translate bit indices in sve_vq_map to VQ values (and | ||
99 | * vice versa). This allows find_next_bit() to be used to find the | ||
100 | * _maximum_ VQ not exceeding a certain value. | ||
101 | */ | ||
102 | static inline unsigned int __vq_to_bit(unsigned int vq) | ||
103 | { | ||
104 | return SVE_VQ_MAX - vq; | ||
105 | } | ||
106 | |||
107 | static inline unsigned int __bit_to_vq(unsigned int bit) | ||
108 | { | ||
109 | return SVE_VQ_MAX - bit; | ||
110 | } | ||
111 | |||
112 | /* Ensure vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX before calling this function */ | ||
113 | static inline bool sve_vq_available(unsigned int vq) | ||
114 | { | ||
115 | return test_bit(__vq_to_bit(vq), sve_vq_map); | ||
116 | } | ||
90 | 117 | ||
91 | #ifdef CONFIG_ARM64_SVE | 118 | #ifdef CONFIG_ARM64_SVE |
92 | 119 | ||
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index f5b79e995f40..ff73f5462aca 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h | |||
@@ -108,7 +108,8 @@ extern u32 __kvm_get_mdcr_el2(void); | |||
108 | .endm | 108 | .endm |
109 | 109 | ||
110 | .macro get_host_ctxt reg, tmp | 110 | .macro get_host_ctxt reg, tmp |
111 | hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp | 111 | hyp_adr_this_cpu \reg, kvm_host_data, \tmp |
112 | add \reg, \reg, #HOST_DATA_CONTEXT | ||
112 | .endm | 113 | .endm |
113 | 114 | ||
114 | .macro get_vcpu_ptr vcpu, ctxt | 115 | .macro get_vcpu_ptr vcpu, ctxt |
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index d3842791e1c4..613427fafff9 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h | |||
@@ -98,6 +98,22 @@ static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu) | |||
98 | vcpu->arch.hcr_el2 |= HCR_TWE; | 98 | vcpu->arch.hcr_el2 |= HCR_TWE; |
99 | } | 99 | } |
100 | 100 | ||
101 | static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu) | ||
102 | { | ||
103 | vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK); | ||
104 | } | ||
105 | |||
106 | static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) | ||
107 | { | ||
108 | vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); | ||
109 | } | ||
110 | |||
111 | static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) | ||
112 | { | ||
113 | if (vcpu_has_ptrauth(vcpu)) | ||
114 | vcpu_ptrauth_disable(vcpu); | ||
115 | } | ||
116 | |||
101 | static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu) | 117 | static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu) |
102 | { | 118 | { |
103 | return vcpu->arch.vsesr_el2; | 119 | return vcpu->arch.vsesr_el2; |
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index a01fe087e022..2a8d3f8ca22c 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
@@ -22,9 +22,13 @@ | |||
22 | #ifndef __ARM64_KVM_HOST_H__ | 22 | #ifndef __ARM64_KVM_HOST_H__ |
23 | #define __ARM64_KVM_HOST_H__ | 23 | #define __ARM64_KVM_HOST_H__ |
24 | 24 | ||
25 | #include <linux/bitmap.h> | ||
25 | #include <linux/types.h> | 26 | #include <linux/types.h> |
27 | #include <linux/jump_label.h> | ||
26 | #include <linux/kvm_types.h> | 28 | #include <linux/kvm_types.h> |
29 | #include <linux/percpu.h> | ||
27 | #include <asm/arch_gicv3.h> | 30 | #include <asm/arch_gicv3.h> |
31 | #include <asm/barrier.h> | ||
28 | #include <asm/cpufeature.h> | 32 | #include <asm/cpufeature.h> |
29 | #include <asm/daifflags.h> | 33 | #include <asm/daifflags.h> |
30 | #include <asm/fpsimd.h> | 34 | #include <asm/fpsimd.h> |
@@ -45,7 +49,7 @@ | |||
45 | 49 | ||
46 | #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS | 50 | #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS |
47 | 51 | ||
48 | #define KVM_VCPU_MAX_FEATURES 4 | 52 | #define KVM_VCPU_MAX_FEATURES 7 |
49 | 53 | ||
50 | #define KVM_REQ_SLEEP \ | 54 | #define KVM_REQ_SLEEP \ |
51 | KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) | 55 | KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
@@ -54,8 +58,12 @@ | |||
54 | 58 | ||
55 | DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); | 59 | DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); |
56 | 60 | ||
61 | extern unsigned int kvm_sve_max_vl; | ||
62 | int kvm_arm_init_sve(void); | ||
63 | |||
57 | int __attribute_const__ kvm_target_cpu(void); | 64 | int __attribute_const__ kvm_target_cpu(void); |
58 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu); | 65 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu); |
66 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); | ||
59 | int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext); | 67 | int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext); |
60 | void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start); | 68 | void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start); |
61 | 69 | ||
@@ -117,6 +125,7 @@ enum vcpu_sysreg { | |||
117 | SCTLR_EL1, /* System Control Register */ | 125 | SCTLR_EL1, /* System Control Register */ |
118 | ACTLR_EL1, /* Auxiliary Control Register */ | 126 | ACTLR_EL1, /* Auxiliary Control Register */ |
119 | CPACR_EL1, /* Coprocessor Access Control */ | 127 | CPACR_EL1, /* Coprocessor Access Control */ |
128 | ZCR_EL1, /* SVE Control */ | ||
120 | TTBR0_EL1, /* Translation Table Base Register 0 */ | 129 | TTBR0_EL1, /* Translation Table Base Register 0 */ |
121 | TTBR1_EL1, /* Translation Table Base Register 1 */ | 130 | TTBR1_EL1, /* Translation Table Base Register 1 */ |
122 | TCR_EL1, /* Translation Control Register */ | 131 | TCR_EL1, /* Translation Control Register */ |
@@ -152,6 +161,18 @@ enum vcpu_sysreg { | |||
152 | PMSWINC_EL0, /* Software Increment Register */ | 161 | PMSWINC_EL0, /* Software Increment Register */ |
153 | PMUSERENR_EL0, /* User Enable Register */ | 162 | PMUSERENR_EL0, /* User Enable Register */ |
154 | 163 | ||
164 | /* Pointer Authentication Registers in a strict increasing order. */ | ||
165 | APIAKEYLO_EL1, | ||
166 | APIAKEYHI_EL1, | ||
167 | APIBKEYLO_EL1, | ||
168 | APIBKEYHI_EL1, | ||
169 | APDAKEYLO_EL1, | ||
170 | APDAKEYHI_EL1, | ||
171 | APDBKEYLO_EL1, | ||
172 | APDBKEYHI_EL1, | ||
173 | APGAKEYLO_EL1, | ||
174 | APGAKEYHI_EL1, | ||
175 | |||
155 | /* 32bit specific registers. Keep them at the end of the range */ | 176 | /* 32bit specific registers. Keep them at the end of the range */ |
156 | DACR32_EL2, /* Domain Access Control Register */ | 177 | DACR32_EL2, /* Domain Access Control Register */ |
157 | IFSR32_EL2, /* Instruction Fault Status Register */ | 178 | IFSR32_EL2, /* Instruction Fault Status Register */ |
@@ -212,7 +233,17 @@ struct kvm_cpu_context { | |||
212 | struct kvm_vcpu *__hyp_running_vcpu; | 233 | struct kvm_vcpu *__hyp_running_vcpu; |
213 | }; | 234 | }; |
214 | 235 | ||
215 | typedef struct kvm_cpu_context kvm_cpu_context_t; | 236 | struct kvm_pmu_events { |
237 | u32 events_host; | ||
238 | u32 events_guest; | ||
239 | }; | ||
240 | |||
241 | struct kvm_host_data { | ||
242 | struct kvm_cpu_context host_ctxt; | ||
243 | struct kvm_pmu_events pmu_events; | ||
244 | }; | ||
245 | |||
246 | typedef struct kvm_host_data kvm_host_data_t; | ||
216 | 247 | ||
217 | struct vcpu_reset_state { | 248 | struct vcpu_reset_state { |
218 | unsigned long pc; | 249 | unsigned long pc; |
@@ -223,6 +254,8 @@ struct vcpu_reset_state { | |||
223 | 254 | ||
224 | struct kvm_vcpu_arch { | 255 | struct kvm_vcpu_arch { |
225 | struct kvm_cpu_context ctxt; | 256 | struct kvm_cpu_context ctxt; |
257 | void *sve_state; | ||
258 | unsigned int sve_max_vl; | ||
226 | 259 | ||
227 | /* HYP configuration */ | 260 | /* HYP configuration */ |
228 | u64 hcr_el2; | 261 | u64 hcr_el2; |
@@ -255,7 +288,7 @@ struct kvm_vcpu_arch { | |||
255 | struct kvm_guest_debug_arch external_debug_state; | 288 | struct kvm_guest_debug_arch external_debug_state; |
256 | 289 | ||
257 | /* Pointer to host CPU context */ | 290 | /* Pointer to host CPU context */ |
258 | kvm_cpu_context_t *host_cpu_context; | 291 | struct kvm_cpu_context *host_cpu_context; |
259 | 292 | ||
260 | struct thread_info *host_thread_info; /* hyp VA */ | 293 | struct thread_info *host_thread_info; /* hyp VA */ |
261 | struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ | 294 | struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ |
@@ -318,12 +351,40 @@ struct kvm_vcpu_arch { | |||
318 | bool sysregs_loaded_on_cpu; | 351 | bool sysregs_loaded_on_cpu; |
319 | }; | 352 | }; |
320 | 353 | ||
354 | /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */ | ||
355 | #define vcpu_sve_pffr(vcpu) ((void *)((char *)((vcpu)->arch.sve_state) + \ | ||
356 | sve_ffr_offset((vcpu)->arch.sve_max_vl))) | ||
357 | |||
358 | #define vcpu_sve_state_size(vcpu) ({ \ | ||
359 | size_t __size_ret; \ | ||
360 | unsigned int __vcpu_vq; \ | ||
361 | \ | ||
362 | if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \ | ||
363 | __size_ret = 0; \ | ||
364 | } else { \ | ||
365 | __vcpu_vq = sve_vq_from_vl((vcpu)->arch.sve_max_vl); \ | ||
366 | __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \ | ||
367 | } \ | ||
368 | \ | ||
369 | __size_ret; \ | ||
370 | }) | ||
371 | |||
321 | /* vcpu_arch flags field values: */ | 372 | /* vcpu_arch flags field values: */ |
322 | #define KVM_ARM64_DEBUG_DIRTY (1 << 0) | 373 | #define KVM_ARM64_DEBUG_DIRTY (1 << 0) |
323 | #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */ | 374 | #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */ |
324 | #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */ | 375 | #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */ |
325 | #define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */ | 376 | #define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */ |
326 | #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */ | 377 | #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */ |
378 | #define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */ | ||
379 | #define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */ | ||
380 | #define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */ | ||
381 | |||
382 | #define vcpu_has_sve(vcpu) (system_supports_sve() && \ | ||
383 | ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE)) | ||
384 | |||
385 | #define vcpu_has_ptrauth(vcpu) ((system_supports_address_auth() || \ | ||
386 | system_supports_generic_auth()) && \ | ||
387 | ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)) | ||
327 | 388 | ||
328 | #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) | 389 | #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) |
329 | 390 | ||
@@ -432,9 +493,9 @@ void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome); | |||
432 | 493 | ||
433 | struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); | 494 | struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); |
434 | 495 | ||
435 | DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state); | 496 | DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data); |
436 | 497 | ||
437 | static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt, | 498 | static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt, |
438 | int cpu) | 499 | int cpu) |
439 | { | 500 | { |
440 | /* The host's MPIDR is immutable, so let's set it up at boot time */ | 501 | /* The host's MPIDR is immutable, so let's set it up at boot time */ |
@@ -452,8 +513,8 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr, | |||
452 | * kernel's mapping to the linear mapping, and store it in tpidr_el2 | 513 | * kernel's mapping to the linear mapping, and store it in tpidr_el2 |
453 | * so that we can use adr_l to access per-cpu variables in EL2. | 514 | * so that we can use adr_l to access per-cpu variables in EL2. |
454 | */ | 515 | */ |
455 | u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_cpu_state) - | 516 | u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_data) - |
456 | (u64)kvm_ksym_ref(kvm_host_cpu_state)); | 517 | (u64)kvm_ksym_ref(kvm_host_data)); |
457 | 518 | ||
458 | /* | 519 | /* |
459 | * Call initialization code, and switch to the full blown HYP code. | 520 | * Call initialization code, and switch to the full blown HYP code. |
@@ -491,9 +552,10 @@ static inline bool kvm_arch_requires_vhe(void) | |||
491 | return false; | 552 | return false; |
492 | } | 553 | } |
493 | 554 | ||
555 | void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu); | ||
556 | |||
494 | static inline void kvm_arch_hardware_unsetup(void) {} | 557 | static inline void kvm_arch_hardware_unsetup(void) {} |
495 | static inline void kvm_arch_sync_events(struct kvm *kvm) {} | 558 | static inline void kvm_arch_sync_events(struct kvm *kvm) {} |
496 | static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} | ||
497 | static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} | 559 | static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} |
498 | static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} | 560 | static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} |
499 | 561 | ||
@@ -516,11 +578,28 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); | |||
516 | void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); | 578 | void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); |
517 | void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); | 579 | void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); |
518 | 580 | ||
581 | static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) | ||
582 | { | ||
583 | return (!has_vhe() && attr->exclude_host); | ||
584 | } | ||
585 | |||
519 | #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */ | 586 | #ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */ |
520 | static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) | 587 | static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) |
521 | { | 588 | { |
522 | return kvm_arch_vcpu_run_map_fp(vcpu); | 589 | return kvm_arch_vcpu_run_map_fp(vcpu); |
523 | } | 590 | } |
591 | |||
592 | void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr); | ||
593 | void kvm_clr_pmu_events(u32 clr); | ||
594 | |||
595 | void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt); | ||
596 | bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt); | ||
597 | |||
598 | void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu); | ||
599 | void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); | ||
600 | #else | ||
601 | static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {} | ||
602 | static inline void kvm_clr_pmu_events(u32 clr) {} | ||
524 | #endif | 603 | #endif |
525 | 604 | ||
526 | static inline void kvm_arm_vhe_guest_enter(void) | 605 | static inline void kvm_arm_vhe_guest_enter(void) |
@@ -594,4 +673,10 @@ void kvm_arch_free_vm(struct kvm *kvm); | |||
594 | 673 | ||
595 | int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type); | 674 | int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type); |
596 | 675 | ||
676 | int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature); | ||
677 | bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); | ||
678 | |||
679 | #define kvm_arm_vcpu_sve_finalized(vcpu) \ | ||
680 | ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED) | ||
681 | |||
597 | #endif /* __ARM64_KVM_HOST_H__ */ | 682 | #endif /* __ARM64_KVM_HOST_H__ */ |
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 4da765f2cca5..ef8b8394d3d1 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h | |||
@@ -149,7 +149,6 @@ void __debug_switch_to_host(struct kvm_vcpu *vcpu); | |||
149 | 149 | ||
150 | void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); | 150 | void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); |
151 | void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); | 151 | void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); |
152 | bool __fpsimd_enabled(void); | ||
153 | 152 | ||
154 | void activate_traps_vhe_load(struct kvm_vcpu *vcpu); | 153 | void activate_traps_vhe_load(struct kvm_vcpu *vcpu); |
155 | void deactivate_traps_vhe_put(void); | 154 | void deactivate_traps_vhe_put(void); |
diff --git a/arch/arm64/include/asm/kvm_ptrauth.h b/arch/arm64/include/asm/kvm_ptrauth.h new file mode 100644 index 000000000000..6301813dcace --- /dev/null +++ b/arch/arm64/include/asm/kvm_ptrauth.h | |||
@@ -0,0 +1,111 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* arch/arm64/include/asm/kvm_ptrauth.h: Guest/host ptrauth save/restore | ||
3 | * Copyright 2019 Arm Limited | ||
4 | * Authors: Mark Rutland <mark.rutland@arm.com> | ||
5 | * Amit Daniel Kachhap <amit.kachhap@arm.com> | ||
6 | */ | ||
7 | |||
8 | #ifndef __ASM_KVM_PTRAUTH_H | ||
9 | #define __ASM_KVM_PTRAUTH_H | ||
10 | |||
11 | #ifdef __ASSEMBLY__ | ||
12 | |||
13 | #include <asm/sysreg.h> | ||
14 | |||
15 | #ifdef CONFIG_ARM64_PTR_AUTH | ||
16 | |||
17 | #define PTRAUTH_REG_OFFSET(x) (x - CPU_APIAKEYLO_EL1) | ||
18 | |||
19 | /* | ||
20 | * CPU_AP*_EL1 values exceed immediate offset range (512) for stp | ||
21 | * instruction so below macros takes CPU_APIAKEYLO_EL1 as base and | ||
22 | * calculates the offset of the keys from this base to avoid an extra add | ||
23 | * instruction. These macros assumes the keys offsets follow the order of | ||
24 | * the sysreg enum in kvm_host.h. | ||
25 | */ | ||
26 | .macro ptrauth_save_state base, reg1, reg2 | ||
27 | mrs_s \reg1, SYS_APIAKEYLO_EL1 | ||
28 | mrs_s \reg2, SYS_APIAKEYHI_EL1 | ||
29 | stp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIAKEYLO_EL1)] | ||
30 | mrs_s \reg1, SYS_APIBKEYLO_EL1 | ||
31 | mrs_s \reg2, SYS_APIBKEYHI_EL1 | ||
32 | stp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIBKEYLO_EL1)] | ||
33 | mrs_s \reg1, SYS_APDAKEYLO_EL1 | ||
34 | mrs_s \reg2, SYS_APDAKEYHI_EL1 | ||
35 | stp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDAKEYLO_EL1)] | ||
36 | mrs_s \reg1, SYS_APDBKEYLO_EL1 | ||
37 | mrs_s \reg2, SYS_APDBKEYHI_EL1 | ||
38 | stp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDBKEYLO_EL1)] | ||
39 | mrs_s \reg1, SYS_APGAKEYLO_EL1 | ||
40 | mrs_s \reg2, SYS_APGAKEYHI_EL1 | ||
41 | stp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APGAKEYLO_EL1)] | ||
42 | .endm | ||
43 | |||
44 | .macro ptrauth_restore_state base, reg1, reg2 | ||
45 | ldp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIAKEYLO_EL1)] | ||
46 | msr_s SYS_APIAKEYLO_EL1, \reg1 | ||
47 | msr_s SYS_APIAKEYHI_EL1, \reg2 | ||
48 | ldp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APIBKEYLO_EL1)] | ||
49 | msr_s SYS_APIBKEYLO_EL1, \reg1 | ||
50 | msr_s SYS_APIBKEYHI_EL1, \reg2 | ||
51 | ldp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDAKEYLO_EL1)] | ||
52 | msr_s SYS_APDAKEYLO_EL1, \reg1 | ||
53 | msr_s SYS_APDAKEYHI_EL1, \reg2 | ||
54 | ldp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APDBKEYLO_EL1)] | ||
55 | msr_s SYS_APDBKEYLO_EL1, \reg1 | ||
56 | msr_s SYS_APDBKEYHI_EL1, \reg2 | ||
57 | ldp \reg1, \reg2, [\base, #PTRAUTH_REG_OFFSET(CPU_APGAKEYLO_EL1)] | ||
58 | msr_s SYS_APGAKEYLO_EL1, \reg1 | ||
59 | msr_s SYS_APGAKEYHI_EL1, \reg2 | ||
60 | .endm | ||
61 | |||
62 | /* | ||
63 | * Both ptrauth_switch_to_guest and ptrauth_switch_to_host macros will | ||
64 | * check for the presence of one of the cpufeature flag | ||
65 | * ARM64_HAS_ADDRESS_AUTH_ARCH or ARM64_HAS_ADDRESS_AUTH_IMP_DEF and | ||
66 | * then proceed ahead with the save/restore of Pointer Authentication | ||
67 | * key registers. | ||
68 | */ | ||
69 | .macro ptrauth_switch_to_guest g_ctxt, reg1, reg2, reg3 | ||
70 | alternative_if ARM64_HAS_ADDRESS_AUTH_ARCH | ||
71 | b 1000f | ||
72 | alternative_else_nop_endif | ||
73 | alternative_if_not ARM64_HAS_ADDRESS_AUTH_IMP_DEF | ||
74 | b 1001f | ||
75 | alternative_else_nop_endif | ||
76 | 1000: | ||
77 | ldr \reg1, [\g_ctxt, #(VCPU_HCR_EL2 - VCPU_CONTEXT)] | ||
78 | and \reg1, \reg1, #(HCR_API | HCR_APK) | ||
79 | cbz \reg1, 1001f | ||
80 | add \reg1, \g_ctxt, #CPU_APIAKEYLO_EL1 | ||
81 | ptrauth_restore_state \reg1, \reg2, \reg3 | ||
82 | 1001: | ||
83 | .endm | ||
84 | |||
85 | .macro ptrauth_switch_to_host g_ctxt, h_ctxt, reg1, reg2, reg3 | ||
86 | alternative_if ARM64_HAS_ADDRESS_AUTH_ARCH | ||
87 | b 2000f | ||
88 | alternative_else_nop_endif | ||
89 | alternative_if_not ARM64_HAS_ADDRESS_AUTH_IMP_DEF | ||
90 | b 2001f | ||
91 | alternative_else_nop_endif | ||
92 | 2000: | ||
93 | ldr \reg1, [\g_ctxt, #(VCPU_HCR_EL2 - VCPU_CONTEXT)] | ||
94 | and \reg1, \reg1, #(HCR_API | HCR_APK) | ||
95 | cbz \reg1, 2001f | ||
96 | add \reg1, \g_ctxt, #CPU_APIAKEYLO_EL1 | ||
97 | ptrauth_save_state \reg1, \reg2, \reg3 | ||
98 | add \reg1, \h_ctxt, #CPU_APIAKEYLO_EL1 | ||
99 | ptrauth_restore_state \reg1, \reg2, \reg3 | ||
100 | isb | ||
101 | 2001: | ||
102 | .endm | ||
103 | |||
104 | #else /* !CONFIG_ARM64_PTR_AUTH */ | ||
105 | .macro ptrauth_switch_to_guest g_ctxt, reg1, reg2, reg3 | ||
106 | .endm | ||
107 | .macro ptrauth_switch_to_host g_ctxt, h_ctxt, reg1, reg2, reg3 | ||
108 | .endm | ||
109 | #endif /* CONFIG_ARM64_PTR_AUTH */ | ||
110 | #endif /* __ASSEMBLY__ */ | ||
111 | #endif /* __ASM_KVM_PTRAUTH_H */ | ||
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 5b267dec6194..4d6262df79bb 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h | |||
@@ -454,6 +454,9 @@ | |||
454 | #define SYS_ICH_LR14_EL2 __SYS__LR8_EL2(6) | 454 | #define SYS_ICH_LR14_EL2 __SYS__LR8_EL2(6) |
455 | #define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7) | 455 | #define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7) |
456 | 456 | ||
457 | /* VHE encodings for architectural EL0/1 system registers */ | ||
458 | #define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0) | ||
459 | |||
457 | /* Common SCTLR_ELx flags. */ | 460 | /* Common SCTLR_ELx flags. */ |
458 | #define SCTLR_ELx_DSSBS (_BITUL(44)) | 461 | #define SCTLR_ELx_DSSBS (_BITUL(44)) |
459 | #define SCTLR_ELx_ENIA (_BITUL(31)) | 462 | #define SCTLR_ELx_ENIA (_BITUL(31)) |
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 97c3478ee6e7..7b7ac0f6cec9 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/psci.h> | 35 | #include <linux/psci.h> |
36 | #include <linux/types.h> | 36 | #include <linux/types.h> |
37 | #include <asm/ptrace.h> | 37 | #include <asm/ptrace.h> |
38 | #include <asm/sve_context.h> | ||
38 | 39 | ||
39 | #define __KVM_HAVE_GUEST_DEBUG | 40 | #define __KVM_HAVE_GUEST_DEBUG |
40 | #define __KVM_HAVE_IRQ_LINE | 41 | #define __KVM_HAVE_IRQ_LINE |
@@ -102,6 +103,9 @@ struct kvm_regs { | |||
102 | #define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */ | 103 | #define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */ |
103 | #define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */ | 104 | #define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */ |
104 | #define KVM_ARM_VCPU_PMU_V3 3 /* Support guest PMUv3 */ | 105 | #define KVM_ARM_VCPU_PMU_V3 3 /* Support guest PMUv3 */ |
106 | #define KVM_ARM_VCPU_SVE 4 /* enable SVE for this CPU */ | ||
107 | #define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */ | ||
108 | #define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */ | ||
105 | 109 | ||
106 | struct kvm_vcpu_init { | 110 | struct kvm_vcpu_init { |
107 | __u32 target; | 111 | __u32 target; |
@@ -226,6 +230,45 @@ struct kvm_vcpu_events { | |||
226 | KVM_REG_ARM_FW | ((r) & 0xffff)) | 230 | KVM_REG_ARM_FW | ((r) & 0xffff)) |
227 | #define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) | 231 | #define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) |
228 | 232 | ||
233 | /* SVE registers */ | ||
234 | #define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT) | ||
235 | |||
236 | /* Z- and P-regs occupy blocks at the following offsets within this range: */ | ||
237 | #define KVM_REG_ARM64_SVE_ZREG_BASE 0 | ||
238 | #define KVM_REG_ARM64_SVE_PREG_BASE 0x400 | ||
239 | #define KVM_REG_ARM64_SVE_FFR_BASE 0x600 | ||
240 | |||
241 | #define KVM_ARM64_SVE_NUM_ZREGS __SVE_NUM_ZREGS | ||
242 | #define KVM_ARM64_SVE_NUM_PREGS __SVE_NUM_PREGS | ||
243 | |||
244 | #define KVM_ARM64_SVE_MAX_SLICES 32 | ||
245 | |||
246 | #define KVM_REG_ARM64_SVE_ZREG(n, i) \ | ||
247 | (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | KVM_REG_ARM64_SVE_ZREG_BASE | \ | ||
248 | KVM_REG_SIZE_U2048 | \ | ||
249 | (((n) & (KVM_ARM64_SVE_NUM_ZREGS - 1)) << 5) | \ | ||
250 | ((i) & (KVM_ARM64_SVE_MAX_SLICES - 1))) | ||
251 | |||
252 | #define KVM_REG_ARM64_SVE_PREG(n, i) \ | ||
253 | (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | KVM_REG_ARM64_SVE_PREG_BASE | \ | ||
254 | KVM_REG_SIZE_U256 | \ | ||
255 | (((n) & (KVM_ARM64_SVE_NUM_PREGS - 1)) << 5) | \ | ||
256 | ((i) & (KVM_ARM64_SVE_MAX_SLICES - 1))) | ||
257 | |||
258 | #define KVM_REG_ARM64_SVE_FFR(i) \ | ||
259 | (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | KVM_REG_ARM64_SVE_FFR_BASE | \ | ||
260 | KVM_REG_SIZE_U256 | \ | ||
261 | ((i) & (KVM_ARM64_SVE_MAX_SLICES - 1))) | ||
262 | |||
263 | #define KVM_ARM64_SVE_VQ_MIN __SVE_VQ_MIN | ||
264 | #define KVM_ARM64_SVE_VQ_MAX __SVE_VQ_MAX | ||
265 | |||
266 | /* Vector lengths pseudo-register: */ | ||
267 | #define KVM_REG_ARM64_SVE_VLS (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | \ | ||
268 | KVM_REG_SIZE_U512 | 0xffff) | ||
269 | #define KVM_ARM64_SVE_VLS_WORDS \ | ||
270 | ((KVM_ARM64_SVE_VQ_MAX - KVM_ARM64_SVE_VQ_MIN) / 64 + 1) | ||
271 | |||
229 | /* Device Control API: ARM VGIC */ | 272 | /* Device Control API: ARM VGIC */ |
230 | #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 | 273 | #define KVM_DEV_ARM_VGIC_GRP_ADDR 0 |
231 | #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 | 274 | #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 |
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 7f40dcbdd51d..768b23101ff0 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c | |||
@@ -125,9 +125,16 @@ int main(void) | |||
125 | DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); | 125 | DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); |
126 | DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1)); | 126 | DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1)); |
127 | DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags)); | 127 | DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags)); |
128 | DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2)); | ||
128 | DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); | 129 | DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); |
130 | DEFINE(CPU_APIAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIAKEYLO_EL1])); | ||
131 | DEFINE(CPU_APIBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIBKEYLO_EL1])); | ||
132 | DEFINE(CPU_APDAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDAKEYLO_EL1])); | ||
133 | DEFINE(CPU_APDBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDBKEYLO_EL1])); | ||
134 | DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1])); | ||
129 | DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs)); | 135 | DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs)); |
130 | DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu)); | 136 | DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu)); |
137 | DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt)); | ||
131 | #endif | 138 | #endif |
132 | #ifdef CONFIG_CPU_PM | 139 | #ifdef CONFIG_CPU_PM |
133 | DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp)); | 140 | DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp)); |
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 4061de10cea6..7f8cc51f0740 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c | |||
@@ -1863,7 +1863,7 @@ static void verify_sve_features(void) | |||
1863 | unsigned int len = zcr & ZCR_ELx_LEN_MASK; | 1863 | unsigned int len = zcr & ZCR_ELx_LEN_MASK; |
1864 | 1864 | ||
1865 | if (len < safe_len || sve_verify_vq_map()) { | 1865 | if (len < safe_len || sve_verify_vq_map()) { |
1866 | pr_crit("CPU%d: SVE: required vector length(s) missing\n", | 1866 | pr_crit("CPU%d: SVE: vector length support mismatch\n", |
1867 | smp_processor_id()); | 1867 | smp_processor_id()); |
1868 | cpu_die_early(); | 1868 | cpu_die_early(); |
1869 | } | 1869 | } |
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 5ebe73b69961..56afa40263d9 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c | |||
@@ -18,6 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/bitmap.h> | 20 | #include <linux/bitmap.h> |
21 | #include <linux/bitops.h> | ||
21 | #include <linux/bottom_half.h> | 22 | #include <linux/bottom_half.h> |
22 | #include <linux/bug.h> | 23 | #include <linux/bug.h> |
23 | #include <linux/cache.h> | 24 | #include <linux/cache.h> |
@@ -48,6 +49,7 @@ | |||
48 | #include <asm/sigcontext.h> | 49 | #include <asm/sigcontext.h> |
49 | #include <asm/sysreg.h> | 50 | #include <asm/sysreg.h> |
50 | #include <asm/traps.h> | 51 | #include <asm/traps.h> |
52 | #include <asm/virt.h> | ||
51 | 53 | ||
52 | #define FPEXC_IOF (1 << 0) | 54 | #define FPEXC_IOF (1 << 0) |
53 | #define FPEXC_DZF (1 << 1) | 55 | #define FPEXC_DZF (1 << 1) |
@@ -119,6 +121,8 @@ | |||
119 | */ | 121 | */ |
120 | struct fpsimd_last_state_struct { | 122 | struct fpsimd_last_state_struct { |
121 | struct user_fpsimd_state *st; | 123 | struct user_fpsimd_state *st; |
124 | void *sve_state; | ||
125 | unsigned int sve_vl; | ||
122 | }; | 126 | }; |
123 | 127 | ||
124 | static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state); | 128 | static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state); |
@@ -130,14 +134,23 @@ static int sve_default_vl = -1; | |||
130 | 134 | ||
131 | /* Maximum supported vector length across all CPUs (initially poisoned) */ | 135 | /* Maximum supported vector length across all CPUs (initially poisoned) */ |
132 | int __ro_after_init sve_max_vl = SVE_VL_MIN; | 136 | int __ro_after_init sve_max_vl = SVE_VL_MIN; |
133 | /* Set of available vector lengths, as vq_to_bit(vq): */ | 137 | int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN; |
134 | static __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); | 138 | |
139 | /* | ||
140 | * Set of available vector lengths, | ||
141 | * where length vq encoded as bit __vq_to_bit(vq): | ||
142 | */ | ||
143 | __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); | ||
144 | /* Set of vector lengths present on at least one cpu: */ | ||
145 | static __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX); | ||
146 | |||
135 | static void __percpu *efi_sve_state; | 147 | static void __percpu *efi_sve_state; |
136 | 148 | ||
137 | #else /* ! CONFIG_ARM64_SVE */ | 149 | #else /* ! CONFIG_ARM64_SVE */ |
138 | 150 | ||
139 | /* Dummy declaration for code that will be optimised out: */ | 151 | /* Dummy declaration for code that will be optimised out: */ |
140 | extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); | 152 | extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); |
153 | extern __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX); | ||
141 | extern void __percpu *efi_sve_state; | 154 | extern void __percpu *efi_sve_state; |
142 | 155 | ||
143 | #endif /* ! CONFIG_ARM64_SVE */ | 156 | #endif /* ! CONFIG_ARM64_SVE */ |
@@ -235,14 +248,15 @@ static void task_fpsimd_load(void) | |||
235 | */ | 248 | */ |
236 | void fpsimd_save(void) | 249 | void fpsimd_save(void) |
237 | { | 250 | { |
238 | struct user_fpsimd_state *st = __this_cpu_read(fpsimd_last_state.st); | 251 | struct fpsimd_last_state_struct const *last = |
252 | this_cpu_ptr(&fpsimd_last_state); | ||
239 | /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */ | 253 | /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */ |
240 | 254 | ||
241 | WARN_ON(!in_softirq() && !irqs_disabled()); | 255 | WARN_ON(!in_softirq() && !irqs_disabled()); |
242 | 256 | ||
243 | if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { | 257 | if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { |
244 | if (system_supports_sve() && test_thread_flag(TIF_SVE)) { | 258 | if (system_supports_sve() && test_thread_flag(TIF_SVE)) { |
245 | if (WARN_ON(sve_get_vl() != current->thread.sve_vl)) { | 259 | if (WARN_ON(sve_get_vl() != last->sve_vl)) { |
246 | /* | 260 | /* |
247 | * Can't save the user regs, so current would | 261 | * Can't save the user regs, so current would |
248 | * re-enter user with corrupt state. | 262 | * re-enter user with corrupt state. |
@@ -252,32 +266,15 @@ void fpsimd_save(void) | |||
252 | return; | 266 | return; |
253 | } | 267 | } |
254 | 268 | ||
255 | sve_save_state(sve_pffr(¤t->thread), &st->fpsr); | 269 | sve_save_state((char *)last->sve_state + |
270 | sve_ffr_offset(last->sve_vl), | ||
271 | &last->st->fpsr); | ||
256 | } else | 272 | } else |
257 | fpsimd_save_state(st); | 273 | fpsimd_save_state(last->st); |
258 | } | 274 | } |
259 | } | 275 | } |
260 | 276 | ||
261 | /* | 277 | /* |
262 | * Helpers to translate bit indices in sve_vq_map to VQ values (and | ||
263 | * vice versa). This allows find_next_bit() to be used to find the | ||
264 | * _maximum_ VQ not exceeding a certain value. | ||
265 | */ | ||
266 | |||
267 | static unsigned int vq_to_bit(unsigned int vq) | ||
268 | { | ||
269 | return SVE_VQ_MAX - vq; | ||
270 | } | ||
271 | |||
272 | static unsigned int bit_to_vq(unsigned int bit) | ||
273 | { | ||
274 | if (WARN_ON(bit >= SVE_VQ_MAX)) | ||
275 | bit = SVE_VQ_MAX - 1; | ||
276 | |||
277 | return SVE_VQ_MAX - bit; | ||
278 | } | ||
279 | |||
280 | /* | ||
281 | * All vector length selection from userspace comes through here. | 278 | * All vector length selection from userspace comes through here. |
282 | * We're on a slow path, so some sanity-checks are included. | 279 | * We're on a slow path, so some sanity-checks are included. |
283 | * If things go wrong there's a bug somewhere, but try to fall back to a | 280 | * If things go wrong there's a bug somewhere, but try to fall back to a |
@@ -298,8 +295,8 @@ static unsigned int find_supported_vector_length(unsigned int vl) | |||
298 | vl = max_vl; | 295 | vl = max_vl; |
299 | 296 | ||
300 | bit = find_next_bit(sve_vq_map, SVE_VQ_MAX, | 297 | bit = find_next_bit(sve_vq_map, SVE_VQ_MAX, |
301 | vq_to_bit(sve_vq_from_vl(vl))); | 298 | __vq_to_bit(sve_vq_from_vl(vl))); |
302 | return sve_vl_from_vq(bit_to_vq(bit)); | 299 | return sve_vl_from_vq(__bit_to_vq(bit)); |
303 | } | 300 | } |
304 | 301 | ||
305 | #ifdef CONFIG_SYSCTL | 302 | #ifdef CONFIG_SYSCTL |
@@ -550,7 +547,6 @@ int sve_set_vector_length(struct task_struct *task, | |||
550 | local_bh_disable(); | 547 | local_bh_disable(); |
551 | 548 | ||
552 | fpsimd_save(); | 549 | fpsimd_save(); |
553 | set_thread_flag(TIF_FOREIGN_FPSTATE); | ||
554 | } | 550 | } |
555 | 551 | ||
556 | fpsimd_flush_task_state(task); | 552 | fpsimd_flush_task_state(task); |
@@ -624,12 +620,6 @@ int sve_get_current_vl(void) | |||
624 | return sve_prctl_status(0); | 620 | return sve_prctl_status(0); |
625 | } | 621 | } |
626 | 622 | ||
627 | /* | ||
628 | * Bitmap for temporary storage of the per-CPU set of supported vector lengths | ||
629 | * during secondary boot. | ||
630 | */ | ||
631 | static DECLARE_BITMAP(sve_secondary_vq_map, SVE_VQ_MAX); | ||
632 | |||
633 | static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX)) | 623 | static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX)) |
634 | { | 624 | { |
635 | unsigned int vq, vl; | 625 | unsigned int vq, vl; |
@@ -644,40 +634,82 @@ static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX)) | |||
644 | write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */ | 634 | write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */ |
645 | vl = sve_get_vl(); | 635 | vl = sve_get_vl(); |
646 | vq = sve_vq_from_vl(vl); /* skip intervening lengths */ | 636 | vq = sve_vq_from_vl(vl); /* skip intervening lengths */ |
647 | set_bit(vq_to_bit(vq), map); | 637 | set_bit(__vq_to_bit(vq), map); |
648 | } | 638 | } |
649 | } | 639 | } |
650 | 640 | ||
641 | /* | ||
642 | * Initialise the set of known supported VQs for the boot CPU. | ||
643 | * This is called during kernel boot, before secondary CPUs are brought up. | ||
644 | */ | ||
651 | void __init sve_init_vq_map(void) | 645 | void __init sve_init_vq_map(void) |
652 | { | 646 | { |
653 | sve_probe_vqs(sve_vq_map); | 647 | sve_probe_vqs(sve_vq_map); |
648 | bitmap_copy(sve_vq_partial_map, sve_vq_map, SVE_VQ_MAX); | ||
654 | } | 649 | } |
655 | 650 | ||
656 | /* | 651 | /* |
657 | * If we haven't committed to the set of supported VQs yet, filter out | 652 | * If we haven't committed to the set of supported VQs yet, filter out |
658 | * those not supported by the current CPU. | 653 | * those not supported by the current CPU. |
654 | * This function is called during the bring-up of early secondary CPUs only. | ||
659 | */ | 655 | */ |
660 | void sve_update_vq_map(void) | 656 | void sve_update_vq_map(void) |
661 | { | 657 | { |
662 | sve_probe_vqs(sve_secondary_vq_map); | 658 | DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); |
663 | bitmap_and(sve_vq_map, sve_vq_map, sve_secondary_vq_map, SVE_VQ_MAX); | 659 | |
660 | sve_probe_vqs(tmp_map); | ||
661 | bitmap_and(sve_vq_map, sve_vq_map, tmp_map, SVE_VQ_MAX); | ||
662 | bitmap_or(sve_vq_partial_map, sve_vq_partial_map, tmp_map, SVE_VQ_MAX); | ||
664 | } | 663 | } |
665 | 664 | ||
666 | /* Check whether the current CPU supports all VQs in the committed set */ | 665 | /* |
666 | * Check whether the current CPU supports all VQs in the committed set. | ||
667 | * This function is called during the bring-up of late secondary CPUs only. | ||
668 | */ | ||
667 | int sve_verify_vq_map(void) | 669 | int sve_verify_vq_map(void) |
668 | { | 670 | { |
669 | int ret = 0; | 671 | DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); |
672 | unsigned long b; | ||
670 | 673 | ||
671 | sve_probe_vqs(sve_secondary_vq_map); | 674 | sve_probe_vqs(tmp_map); |
672 | bitmap_andnot(sve_secondary_vq_map, sve_vq_map, sve_secondary_vq_map, | 675 | |
673 | SVE_VQ_MAX); | 676 | bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX); |
674 | if (!bitmap_empty(sve_secondary_vq_map, SVE_VQ_MAX)) { | 677 | if (bitmap_intersects(tmp_map, sve_vq_map, SVE_VQ_MAX)) { |
675 | pr_warn("SVE: cpu%d: Required vector length(s) missing\n", | 678 | pr_warn("SVE: cpu%d: Required vector length(s) missing\n", |
676 | smp_processor_id()); | 679 | smp_processor_id()); |
677 | ret = -EINVAL; | 680 | return -EINVAL; |
678 | } | 681 | } |
679 | 682 | ||
680 | return ret; | 683 | if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available()) |
684 | return 0; | ||
685 | |||
686 | /* | ||
687 | * For KVM, it is necessary to ensure that this CPU doesn't | ||
688 | * support any vector length that guests may have probed as | ||
689 | * unsupported. | ||
690 | */ | ||
691 | |||
692 | /* Recover the set of supported VQs: */ | ||
693 | bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX); | ||
694 | /* Find VQs supported that are not globally supported: */ | ||
695 | bitmap_andnot(tmp_map, tmp_map, sve_vq_map, SVE_VQ_MAX); | ||
696 | |||
697 | /* Find the lowest such VQ, if any: */ | ||
698 | b = find_last_bit(tmp_map, SVE_VQ_MAX); | ||
699 | if (b >= SVE_VQ_MAX) | ||
700 | return 0; /* no mismatches */ | ||
701 | |||
702 | /* | ||
703 | * Mismatches above sve_max_virtualisable_vl are fine, since | ||
704 | * no guest is allowed to configure ZCR_EL2.LEN to exceed this: | ||
705 | */ | ||
706 | if (sve_vl_from_vq(__bit_to_vq(b)) <= sve_max_virtualisable_vl) { | ||
707 | pr_warn("SVE: cpu%d: Unsupported vector length(s) present\n", | ||
708 | smp_processor_id()); | ||
709 | return -EINVAL; | ||
710 | } | ||
711 | |||
712 | return 0; | ||
681 | } | 713 | } |
682 | 714 | ||
683 | static void __init sve_efi_setup(void) | 715 | static void __init sve_efi_setup(void) |
@@ -744,6 +776,8 @@ u64 read_zcr_features(void) | |||
744 | void __init sve_setup(void) | 776 | void __init sve_setup(void) |
745 | { | 777 | { |
746 | u64 zcr; | 778 | u64 zcr; |
779 | DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); | ||
780 | unsigned long b; | ||
747 | 781 | ||
748 | if (!system_supports_sve()) | 782 | if (!system_supports_sve()) |
749 | return; | 783 | return; |
@@ -753,8 +787,8 @@ void __init sve_setup(void) | |||
753 | * so sve_vq_map must have at least SVE_VQ_MIN set. | 787 | * so sve_vq_map must have at least SVE_VQ_MIN set. |
754 | * If something went wrong, at least try to patch it up: | 788 | * If something went wrong, at least try to patch it up: |
755 | */ | 789 | */ |
756 | if (WARN_ON(!test_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map))) | 790 | if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map))) |
757 | set_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map); | 791 | set_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map); |
758 | 792 | ||
759 | zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1); | 793 | zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1); |
760 | sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1); | 794 | sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1); |
@@ -772,11 +806,31 @@ void __init sve_setup(void) | |||
772 | */ | 806 | */ |
773 | sve_default_vl = find_supported_vector_length(64); | 807 | sve_default_vl = find_supported_vector_length(64); |
774 | 808 | ||
809 | bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map, | ||
810 | SVE_VQ_MAX); | ||
811 | |||
812 | b = find_last_bit(tmp_map, SVE_VQ_MAX); | ||
813 | if (b >= SVE_VQ_MAX) | ||
814 | /* No non-virtualisable VLs found */ | ||
815 | sve_max_virtualisable_vl = SVE_VQ_MAX; | ||
816 | else if (WARN_ON(b == SVE_VQ_MAX - 1)) | ||
817 | /* No virtualisable VLs? This is architecturally forbidden. */ | ||
818 | sve_max_virtualisable_vl = SVE_VQ_MIN; | ||
819 | else /* b + 1 < SVE_VQ_MAX */ | ||
820 | sve_max_virtualisable_vl = sve_vl_from_vq(__bit_to_vq(b + 1)); | ||
821 | |||
822 | if (sve_max_virtualisable_vl > sve_max_vl) | ||
823 | sve_max_virtualisable_vl = sve_max_vl; | ||
824 | |||
775 | pr_info("SVE: maximum available vector length %u bytes per vector\n", | 825 | pr_info("SVE: maximum available vector length %u bytes per vector\n", |
776 | sve_max_vl); | 826 | sve_max_vl); |
777 | pr_info("SVE: default vector length %u bytes per vector\n", | 827 | pr_info("SVE: default vector length %u bytes per vector\n", |
778 | sve_default_vl); | 828 | sve_default_vl); |
779 | 829 | ||
830 | /* KVM decides whether to support mismatched systems. Just warn here: */ | ||
831 | if (sve_max_virtualisable_vl < sve_max_vl) | ||
832 | pr_warn("SVE: unvirtualisable vector lengths present\n"); | ||
833 | |||
780 | sve_efi_setup(); | 834 | sve_efi_setup(); |
781 | } | 835 | } |
782 | 836 | ||
@@ -816,12 +870,11 @@ asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs) | |||
816 | local_bh_disable(); | 870 | local_bh_disable(); |
817 | 871 | ||
818 | fpsimd_save(); | 872 | fpsimd_save(); |
819 | fpsimd_to_sve(current); | ||
820 | 873 | ||
821 | /* Force ret_to_user to reload the registers: */ | 874 | /* Force ret_to_user to reload the registers: */ |
822 | fpsimd_flush_task_state(current); | 875 | fpsimd_flush_task_state(current); |
823 | set_thread_flag(TIF_FOREIGN_FPSTATE); | ||
824 | 876 | ||
877 | fpsimd_to_sve(current); | ||
825 | if (test_and_set_thread_flag(TIF_SVE)) | 878 | if (test_and_set_thread_flag(TIF_SVE)) |
826 | WARN_ON(1); /* SVE access shouldn't have trapped */ | 879 | WARN_ON(1); /* SVE access shouldn't have trapped */ |
827 | 880 | ||
@@ -894,9 +947,9 @@ void fpsimd_flush_thread(void) | |||
894 | 947 | ||
895 | local_bh_disable(); | 948 | local_bh_disable(); |
896 | 949 | ||
950 | fpsimd_flush_task_state(current); | ||
897 | memset(¤t->thread.uw.fpsimd_state, 0, | 951 | memset(¤t->thread.uw.fpsimd_state, 0, |
898 | sizeof(current->thread.uw.fpsimd_state)); | 952 | sizeof(current->thread.uw.fpsimd_state)); |
899 | fpsimd_flush_task_state(current); | ||
900 | 953 | ||
901 | if (system_supports_sve()) { | 954 | if (system_supports_sve()) { |
902 | clear_thread_flag(TIF_SVE); | 955 | clear_thread_flag(TIF_SVE); |
@@ -933,8 +986,6 @@ void fpsimd_flush_thread(void) | |||
933 | current->thread.sve_vl_onexec = 0; | 986 | current->thread.sve_vl_onexec = 0; |
934 | } | 987 | } |
935 | 988 | ||
936 | set_thread_flag(TIF_FOREIGN_FPSTATE); | ||
937 | |||
938 | local_bh_enable(); | 989 | local_bh_enable(); |
939 | } | 990 | } |
940 | 991 | ||
@@ -974,6 +1025,8 @@ void fpsimd_bind_task_to_cpu(void) | |||
974 | this_cpu_ptr(&fpsimd_last_state); | 1025 | this_cpu_ptr(&fpsimd_last_state); |
975 | 1026 | ||
976 | last->st = ¤t->thread.uw.fpsimd_state; | 1027 | last->st = ¤t->thread.uw.fpsimd_state; |
1028 | last->sve_state = current->thread.sve_state; | ||
1029 | last->sve_vl = current->thread.sve_vl; | ||
977 | current->thread.fpsimd_cpu = smp_processor_id(); | 1030 | current->thread.fpsimd_cpu = smp_processor_id(); |
978 | 1031 | ||
979 | if (system_supports_sve()) { | 1032 | if (system_supports_sve()) { |
@@ -987,7 +1040,8 @@ void fpsimd_bind_task_to_cpu(void) | |||
987 | } | 1040 | } |
988 | } | 1041 | } |
989 | 1042 | ||
990 | void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st) | 1043 | void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, |
1044 | unsigned int sve_vl) | ||
991 | { | 1045 | { |
992 | struct fpsimd_last_state_struct *last = | 1046 | struct fpsimd_last_state_struct *last = |
993 | this_cpu_ptr(&fpsimd_last_state); | 1047 | this_cpu_ptr(&fpsimd_last_state); |
@@ -995,6 +1049,8 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st) | |||
995 | WARN_ON(!in_softirq() && !irqs_disabled()); | 1049 | WARN_ON(!in_softirq() && !irqs_disabled()); |
996 | 1050 | ||
997 | last->st = st; | 1051 | last->st = st; |
1052 | last->sve_state = sve_state; | ||
1053 | last->sve_vl = sve_vl; | ||
998 | } | 1054 | } |
999 | 1055 | ||
1000 | /* | 1056 | /* |
@@ -1043,12 +1099,29 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state) | |||
1043 | 1099 | ||
1044 | /* | 1100 | /* |
1045 | * Invalidate live CPU copies of task t's FPSIMD state | 1101 | * Invalidate live CPU copies of task t's FPSIMD state |
1102 | * | ||
1103 | * This function may be called with preemption enabled. The barrier() | ||
1104 | * ensures that the assignment to fpsimd_cpu is visible to any | ||
1105 | * preemption/softirq that could race with set_tsk_thread_flag(), so | ||
1106 | * that TIF_FOREIGN_FPSTATE cannot be spuriously re-cleared. | ||
1107 | * | ||
1108 | * The final barrier ensures that TIF_FOREIGN_FPSTATE is seen set by any | ||
1109 | * subsequent code. | ||
1046 | */ | 1110 | */ |
1047 | void fpsimd_flush_task_state(struct task_struct *t) | 1111 | void fpsimd_flush_task_state(struct task_struct *t) |
1048 | { | 1112 | { |
1049 | t->thread.fpsimd_cpu = NR_CPUS; | 1113 | t->thread.fpsimd_cpu = NR_CPUS; |
1114 | |||
1115 | barrier(); | ||
1116 | set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE); | ||
1117 | |||
1118 | barrier(); | ||
1050 | } | 1119 | } |
1051 | 1120 | ||
1121 | /* | ||
1122 | * Invalidate any task's FPSIMD state that is present on this cpu. | ||
1123 | * This function must be called with softirqs disabled. | ||
1124 | */ | ||
1052 | void fpsimd_flush_cpu_state(void) | 1125 | void fpsimd_flush_cpu_state(void) |
1053 | { | 1126 | { |
1054 | __this_cpu_write(fpsimd_last_state.st, NULL); | 1127 | __this_cpu_write(fpsimd_last_state.st, NULL); |
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 4addb38bc250..314b1adedf06 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c | |||
@@ -26,6 +26,7 @@ | |||
26 | 26 | ||
27 | #include <linux/acpi.h> | 27 | #include <linux/acpi.h> |
28 | #include <linux/clocksource.h> | 28 | #include <linux/clocksource.h> |
29 | #include <linux/kvm_host.h> | ||
29 | #include <linux/of.h> | 30 | #include <linux/of.h> |
30 | #include <linux/perf/arm_pmu.h> | 31 | #include <linux/perf/arm_pmu.h> |
31 | #include <linux/platform_device.h> | 32 | #include <linux/platform_device.h> |
@@ -528,12 +529,21 @@ static inline int armv8pmu_enable_counter(int idx) | |||
528 | 529 | ||
529 | static inline void armv8pmu_enable_event_counter(struct perf_event *event) | 530 | static inline void armv8pmu_enable_event_counter(struct perf_event *event) |
530 | { | 531 | { |
532 | struct perf_event_attr *attr = &event->attr; | ||
531 | int idx = event->hw.idx; | 533 | int idx = event->hw.idx; |
534 | u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx)); | ||
532 | 535 | ||
533 | armv8pmu_enable_counter(idx); | ||
534 | if (armv8pmu_event_is_chained(event)) | 536 | if (armv8pmu_event_is_chained(event)) |
535 | armv8pmu_enable_counter(idx - 1); | 537 | counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1)); |
536 | isb(); | 538 | |
539 | kvm_set_pmu_events(counter_bits, attr); | ||
540 | |||
541 | /* We rely on the hypervisor switch code to enable guest counters */ | ||
542 | if (!kvm_pmu_counter_deferred(attr)) { | ||
543 | armv8pmu_enable_counter(idx); | ||
544 | if (armv8pmu_event_is_chained(event)) | ||
545 | armv8pmu_enable_counter(idx - 1); | ||
546 | } | ||
537 | } | 547 | } |
538 | 548 | ||
539 | static inline int armv8pmu_disable_counter(int idx) | 549 | static inline int armv8pmu_disable_counter(int idx) |
@@ -546,11 +556,21 @@ static inline int armv8pmu_disable_counter(int idx) | |||
546 | static inline void armv8pmu_disable_event_counter(struct perf_event *event) | 556 | static inline void armv8pmu_disable_event_counter(struct perf_event *event) |
547 | { | 557 | { |
548 | struct hw_perf_event *hwc = &event->hw; | 558 | struct hw_perf_event *hwc = &event->hw; |
559 | struct perf_event_attr *attr = &event->attr; | ||
549 | int idx = hwc->idx; | 560 | int idx = hwc->idx; |
561 | u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx)); | ||
550 | 562 | ||
551 | if (armv8pmu_event_is_chained(event)) | 563 | if (armv8pmu_event_is_chained(event)) |
552 | armv8pmu_disable_counter(idx - 1); | 564 | counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1)); |
553 | armv8pmu_disable_counter(idx); | 565 | |
566 | kvm_clr_pmu_events(counter_bits); | ||
567 | |||
568 | /* We rely on the hypervisor switch code to disable guest counters */ | ||
569 | if (!kvm_pmu_counter_deferred(attr)) { | ||
570 | if (armv8pmu_event_is_chained(event)) | ||
571 | armv8pmu_disable_counter(idx - 1); | ||
572 | armv8pmu_disable_counter(idx); | ||
573 | } | ||
554 | } | 574 | } |
555 | 575 | ||
556 | static inline int armv8pmu_enable_intens(int idx) | 576 | static inline int armv8pmu_enable_intens(int idx) |
@@ -827,14 +847,23 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, | |||
827 | * with other architectures (x86 and Power). | 847 | * with other architectures (x86 and Power). |
828 | */ | 848 | */ |
829 | if (is_kernel_in_hyp_mode()) { | 849 | if (is_kernel_in_hyp_mode()) { |
830 | if (!attr->exclude_kernel) | 850 | if (!attr->exclude_kernel && !attr->exclude_host) |
831 | config_base |= ARMV8_PMU_INCLUDE_EL2; | 851 | config_base |= ARMV8_PMU_INCLUDE_EL2; |
832 | } else { | 852 | if (attr->exclude_guest) |
833 | if (attr->exclude_kernel) | ||
834 | config_base |= ARMV8_PMU_EXCLUDE_EL1; | 853 | config_base |= ARMV8_PMU_EXCLUDE_EL1; |
835 | if (!attr->exclude_hv) | 854 | if (attr->exclude_host) |
855 | config_base |= ARMV8_PMU_EXCLUDE_EL0; | ||
856 | } else { | ||
857 | if (!attr->exclude_hv && !attr->exclude_host) | ||
836 | config_base |= ARMV8_PMU_INCLUDE_EL2; | 858 | config_base |= ARMV8_PMU_INCLUDE_EL2; |
837 | } | 859 | } |
860 | |||
861 | /* | ||
862 | * Filter out !VHE kernels and guest kernels | ||
863 | */ | ||
864 | if (attr->exclude_kernel) | ||
865 | config_base |= ARMV8_PMU_EXCLUDE_EL1; | ||
866 | |||
838 | if (attr->exclude_user) | 867 | if (attr->exclude_user) |
839 | config_base |= ARMV8_PMU_EXCLUDE_EL0; | 868 | config_base |= ARMV8_PMU_EXCLUDE_EL0; |
840 | 869 | ||
@@ -864,6 +893,9 @@ static void armv8pmu_reset(void *info) | |||
864 | armv8pmu_disable_intens(idx); | 893 | armv8pmu_disable_intens(idx); |
865 | } | 894 | } |
866 | 895 | ||
896 | /* Clear the counters we flip at guest entry/exit */ | ||
897 | kvm_clr_pmu_events(U32_MAX); | ||
898 | |||
867 | /* | 899 | /* |
868 | * Initialize & Reset PMNC. Request overflow interrupt for | 900 | * Initialize & Reset PMNC. Request overflow interrupt for |
869 | * 64 bit cycle counter but cheat in armv8pmu_write_counter(). | 901 | * 64 bit cycle counter but cheat in armv8pmu_write_counter(). |
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 867a7cea70e5..a9b0485df074 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c | |||
@@ -296,11 +296,6 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user) | |||
296 | */ | 296 | */ |
297 | 297 | ||
298 | fpsimd_flush_task_state(current); | 298 | fpsimd_flush_task_state(current); |
299 | barrier(); | ||
300 | /* From now, fpsimd_thread_switch() won't clear TIF_FOREIGN_FPSTATE */ | ||
301 | |||
302 | set_thread_flag(TIF_FOREIGN_FPSTATE); | ||
303 | barrier(); | ||
304 | /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ | 299 | /* From now, fpsimd_thread_switch() won't touch thread.sve_state */ |
305 | 300 | ||
306 | sve_alloc(current); | 301 | sve_alloc(current); |
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile index 690e033a91c0..3ac1a64d2fb9 100644 --- a/arch/arm64/kvm/Makefile +++ b/arch/arm64/kvm/Makefile | |||
@@ -17,7 +17,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/psci.o $(KVM)/arm/perf.o | |||
17 | kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o va_layout.o | 17 | kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o va_layout.o |
18 | kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o | 18 | kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o |
19 | kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o | 19 | kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o |
20 | kvm-$(CONFIG_KVM_ARM_HOST) += vgic-sys-reg-v3.o fpsimd.o | 20 | kvm-$(CONFIG_KVM_ARM_HOST) += vgic-sys-reg-v3.o fpsimd.o pmu.o |
21 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/aarch32.o | 21 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/aarch32.o |
22 | 22 | ||
23 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o | 23 | kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o |
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index aac7808ce216..6e3c9c8b2df9 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
10 | #include <linux/thread_info.h> | 10 | #include <linux/thread_info.h> |
11 | #include <linux/kvm_host.h> | 11 | #include <linux/kvm_host.h> |
12 | #include <asm/fpsimd.h> | ||
12 | #include <asm/kvm_asm.h> | 13 | #include <asm/kvm_asm.h> |
13 | #include <asm/kvm_host.h> | 14 | #include <asm/kvm_host.h> |
14 | #include <asm/kvm_mmu.h> | 15 | #include <asm/kvm_mmu.h> |
@@ -85,9 +86,12 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) | |||
85 | WARN_ON_ONCE(!irqs_disabled()); | 86 | WARN_ON_ONCE(!irqs_disabled()); |
86 | 87 | ||
87 | if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { | 88 | if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { |
88 | fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.gp_regs.fp_regs); | 89 | fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.gp_regs.fp_regs, |
90 | vcpu->arch.sve_state, | ||
91 | vcpu->arch.sve_max_vl); | ||
92 | |||
89 | clear_thread_flag(TIF_FOREIGN_FPSTATE); | 93 | clear_thread_flag(TIF_FOREIGN_FPSTATE); |
90 | clear_thread_flag(TIF_SVE); | 94 | update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu)); |
91 | } | 95 | } |
92 | } | 96 | } |
93 | 97 | ||
@@ -100,14 +104,21 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) | |||
100 | void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) | 104 | void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) |
101 | { | 105 | { |
102 | unsigned long flags; | 106 | unsigned long flags; |
107 | bool host_has_sve = system_supports_sve(); | ||
108 | bool guest_has_sve = vcpu_has_sve(vcpu); | ||
103 | 109 | ||
104 | local_irq_save(flags); | 110 | local_irq_save(flags); |
105 | 111 | ||
106 | if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { | 112 | if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { |
113 | u64 *guest_zcr = &vcpu->arch.ctxt.sys_regs[ZCR_EL1]; | ||
114 | |||
107 | /* Clean guest FP state to memory and invalidate cpu view */ | 115 | /* Clean guest FP state to memory and invalidate cpu view */ |
108 | fpsimd_save(); | 116 | fpsimd_save(); |
109 | fpsimd_flush_cpu_state(); | 117 | fpsimd_flush_cpu_state(); |
110 | } else if (system_supports_sve()) { | 118 | |
119 | if (guest_has_sve) | ||
120 | *guest_zcr = read_sysreg_s(SYS_ZCR_EL12); | ||
121 | } else if (host_has_sve) { | ||
111 | /* | 122 | /* |
112 | * The FPSIMD/SVE state in the CPU has not been touched, and we | 123 | * The FPSIMD/SVE state in the CPU has not been touched, and we |
113 | * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been | 124 | * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been |
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index dd436a50fce7..3ae2f82fca46 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c | |||
@@ -19,18 +19,25 @@ | |||
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/bits.h> | ||
22 | #include <linux/errno.h> | 23 | #include <linux/errno.h> |
23 | #include <linux/err.h> | 24 | #include <linux/err.h> |
25 | #include <linux/nospec.h> | ||
24 | #include <linux/kvm_host.h> | 26 | #include <linux/kvm_host.h> |
25 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/stddef.h> | ||
29 | #include <linux/string.h> | ||
26 | #include <linux/vmalloc.h> | 30 | #include <linux/vmalloc.h> |
27 | #include <linux/fs.h> | 31 | #include <linux/fs.h> |
28 | #include <kvm/arm_psci.h> | 32 | #include <kvm/arm_psci.h> |
29 | #include <asm/cputype.h> | 33 | #include <asm/cputype.h> |
30 | #include <linux/uaccess.h> | 34 | #include <linux/uaccess.h> |
35 | #include <asm/fpsimd.h> | ||
31 | #include <asm/kvm.h> | 36 | #include <asm/kvm.h> |
32 | #include <asm/kvm_emulate.h> | 37 | #include <asm/kvm_emulate.h> |
33 | #include <asm/kvm_coproc.h> | 38 | #include <asm/kvm_coproc.h> |
39 | #include <asm/kvm_host.h> | ||
40 | #include <asm/sigcontext.h> | ||
34 | 41 | ||
35 | #include "trace.h" | 42 | #include "trace.h" |
36 | 43 | ||
@@ -52,12 +59,19 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
52 | return 0; | 59 | return 0; |
53 | } | 60 | } |
54 | 61 | ||
62 | static bool core_reg_offset_is_vreg(u64 off) | ||
63 | { | ||
64 | return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) && | ||
65 | off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr); | ||
66 | } | ||
67 | |||
55 | static u64 core_reg_offset_from_id(u64 id) | 68 | static u64 core_reg_offset_from_id(u64 id) |
56 | { | 69 | { |
57 | return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); | 70 | return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); |
58 | } | 71 | } |
59 | 72 | ||
60 | static int validate_core_offset(const struct kvm_one_reg *reg) | 73 | static int validate_core_offset(const struct kvm_vcpu *vcpu, |
74 | const struct kvm_one_reg *reg) | ||
61 | { | 75 | { |
62 | u64 off = core_reg_offset_from_id(reg->id); | 76 | u64 off = core_reg_offset_from_id(reg->id); |
63 | int size; | 77 | int size; |
@@ -89,11 +103,19 @@ static int validate_core_offset(const struct kvm_one_reg *reg) | |||
89 | return -EINVAL; | 103 | return -EINVAL; |
90 | } | 104 | } |
91 | 105 | ||
92 | if (KVM_REG_SIZE(reg->id) == size && | 106 | if (KVM_REG_SIZE(reg->id) != size || |
93 | IS_ALIGNED(off, size / sizeof(__u32))) | 107 | !IS_ALIGNED(off, size / sizeof(__u32))) |
94 | return 0; | 108 | return -EINVAL; |
95 | 109 | ||
96 | return -EINVAL; | 110 | /* |
111 | * The KVM_REG_ARM64_SVE regs must be used instead of | ||
112 | * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on | ||
113 | * SVE-enabled vcpus: | ||
114 | */ | ||
115 | if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off)) | ||
116 | return -EINVAL; | ||
117 | |||
118 | return 0; | ||
97 | } | 119 | } |
98 | 120 | ||
99 | static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | 121 | static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) |
@@ -115,7 +137,7 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |||
115 | (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) | 137 | (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) |
116 | return -ENOENT; | 138 | return -ENOENT; |
117 | 139 | ||
118 | if (validate_core_offset(reg)) | 140 | if (validate_core_offset(vcpu, reg)) |
119 | return -EINVAL; | 141 | return -EINVAL; |
120 | 142 | ||
121 | if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id))) | 143 | if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id))) |
@@ -140,7 +162,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |||
140 | (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) | 162 | (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) |
141 | return -ENOENT; | 163 | return -ENOENT; |
142 | 164 | ||
143 | if (validate_core_offset(reg)) | 165 | if (validate_core_offset(vcpu, reg)) |
144 | return -EINVAL; | 166 | return -EINVAL; |
145 | 167 | ||
146 | if (KVM_REG_SIZE(reg->id) > sizeof(tmp)) | 168 | if (KVM_REG_SIZE(reg->id) > sizeof(tmp)) |
@@ -183,6 +205,239 @@ out: | |||
183 | return err; | 205 | return err; |
184 | } | 206 | } |
185 | 207 | ||
208 | #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64) | ||
209 | #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64) | ||
210 | |||
211 | static bool vq_present( | ||
212 | const u64 (*const vqs)[KVM_ARM64_SVE_VLS_WORDS], | ||
213 | unsigned int vq) | ||
214 | { | ||
215 | return (*vqs)[vq_word(vq)] & vq_mask(vq); | ||
216 | } | ||
217 | |||
218 | static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | ||
219 | { | ||
220 | unsigned int max_vq, vq; | ||
221 | u64 vqs[KVM_ARM64_SVE_VLS_WORDS]; | ||
222 | |||
223 | if (!vcpu_has_sve(vcpu)) | ||
224 | return -ENOENT; | ||
225 | |||
226 | if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl))) | ||
227 | return -EINVAL; | ||
228 | |||
229 | memset(vqs, 0, sizeof(vqs)); | ||
230 | |||
231 | max_vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); | ||
232 | for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq) | ||
233 | if (sve_vq_available(vq)) | ||
234 | vqs[vq_word(vq)] |= vq_mask(vq); | ||
235 | |||
236 | if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs))) | ||
237 | return -EFAULT; | ||
238 | |||
239 | return 0; | ||
240 | } | ||
241 | |||
242 | static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | ||
243 | { | ||
244 | unsigned int max_vq, vq; | ||
245 | u64 vqs[KVM_ARM64_SVE_VLS_WORDS]; | ||
246 | |||
247 | if (!vcpu_has_sve(vcpu)) | ||
248 | return -ENOENT; | ||
249 | |||
250 | if (kvm_arm_vcpu_sve_finalized(vcpu)) | ||
251 | return -EPERM; /* too late! */ | ||
252 | |||
253 | if (WARN_ON(vcpu->arch.sve_state)) | ||
254 | return -EINVAL; | ||
255 | |||
256 | if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs))) | ||
257 | return -EFAULT; | ||
258 | |||
259 | max_vq = 0; | ||
260 | for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq) | ||
261 | if (vq_present(&vqs, vq)) | ||
262 | max_vq = vq; | ||
263 | |||
264 | if (max_vq > sve_vq_from_vl(kvm_sve_max_vl)) | ||
265 | return -EINVAL; | ||
266 | |||
267 | /* | ||
268 | * Vector lengths supported by the host can't currently be | ||
269 | * hidden from the guest individually: instead we can only set a | ||
270 | * maxmium via ZCR_EL2.LEN. So, make sure the available vector | ||
271 | * lengths match the set requested exactly up to the requested | ||
272 | * maximum: | ||
273 | */ | ||
274 | for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq) | ||
275 | if (vq_present(&vqs, vq) != sve_vq_available(vq)) | ||
276 | return -EINVAL; | ||
277 | |||
278 | /* Can't run with no vector lengths at all: */ | ||
279 | if (max_vq < SVE_VQ_MIN) | ||
280 | return -EINVAL; | ||
281 | |||
282 | /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */ | ||
283 | vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq); | ||
284 | |||
285 | return 0; | ||
286 | } | ||
287 | |||
288 | #define SVE_REG_SLICE_SHIFT 0 | ||
289 | #define SVE_REG_SLICE_BITS 5 | ||
290 | #define SVE_REG_ID_SHIFT (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS) | ||
291 | #define SVE_REG_ID_BITS 5 | ||
292 | |||
293 | #define SVE_REG_SLICE_MASK \ | ||
294 | GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1, \ | ||
295 | SVE_REG_SLICE_SHIFT) | ||
296 | #define SVE_REG_ID_MASK \ | ||
297 | GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT) | ||
298 | |||
299 | #define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS) | ||
300 | |||
301 | #define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0)) | ||
302 | #define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0)) | ||
303 | |||
304 | /* | ||
305 | * Number of register slices required to cover each whole SVE register. | ||
306 | * NOTE: Only the first slice every exists, for now. | ||
307 | * If you are tempted to modify this, you must also rework sve_reg_to_region() | ||
308 | * to match: | ||
309 | */ | ||
310 | #define vcpu_sve_slices(vcpu) 1 | ||
311 | |||
312 | /* Bounds of a single SVE register slice within vcpu->arch.sve_state */ | ||
313 | struct sve_state_reg_region { | ||
314 | unsigned int koffset; /* offset into sve_state in kernel memory */ | ||
315 | unsigned int klen; /* length in kernel memory */ | ||
316 | unsigned int upad; /* extra trailing padding in user memory */ | ||
317 | }; | ||
318 | |||
319 | /* | ||
320 | * Validate SVE register ID and get sanitised bounds for user/kernel SVE | ||
321 | * register copy | ||
322 | */ | ||
323 | static int sve_reg_to_region(struct sve_state_reg_region *region, | ||
324 | struct kvm_vcpu *vcpu, | ||
325 | const struct kvm_one_reg *reg) | ||
326 | { | ||
327 | /* reg ID ranges for Z- registers */ | ||
328 | const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0); | ||
329 | const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1, | ||
330 | SVE_NUM_SLICES - 1); | ||
331 | |||
332 | /* reg ID ranges for P- registers and FFR (which are contiguous) */ | ||
333 | const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0); | ||
334 | const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1); | ||
335 | |||
336 | unsigned int vq; | ||
337 | unsigned int reg_num; | ||
338 | |||
339 | unsigned int reqoffset, reqlen; /* User-requested offset and length */ | ||
340 | unsigned int maxlen; /* Maxmimum permitted length */ | ||
341 | |||
342 | size_t sve_state_size; | ||
343 | |||
344 | const u64 last_preg_id = KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS - 1, | ||
345 | SVE_NUM_SLICES - 1); | ||
346 | |||
347 | /* Verify that the P-regs and FFR really do have contiguous IDs: */ | ||
348 | BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id + 1); | ||
349 | |||
350 | /* Verify that we match the UAPI header: */ | ||
351 | BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES); | ||
352 | |||
353 | reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT; | ||
354 | |||
355 | if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) { | ||
356 | if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0) | ||
357 | return -ENOENT; | ||
358 | |||
359 | vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); | ||
360 | |||
361 | reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) - | ||
362 | SVE_SIG_REGS_OFFSET; | ||
363 | reqlen = KVM_SVE_ZREG_SIZE; | ||
364 | maxlen = SVE_SIG_ZREG_SIZE(vq); | ||
365 | } else if (reg->id >= preg_id_min && reg->id <= preg_id_max) { | ||
366 | if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0) | ||
367 | return -ENOENT; | ||
368 | |||
369 | vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); | ||
370 | |||
371 | reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) - | ||
372 | SVE_SIG_REGS_OFFSET; | ||
373 | reqlen = KVM_SVE_PREG_SIZE; | ||
374 | maxlen = SVE_SIG_PREG_SIZE(vq); | ||
375 | } else { | ||
376 | return -EINVAL; | ||
377 | } | ||
378 | |||
379 | sve_state_size = vcpu_sve_state_size(vcpu); | ||
380 | if (WARN_ON(!sve_state_size)) | ||
381 | return -EINVAL; | ||
382 | |||
383 | region->koffset = array_index_nospec(reqoffset, sve_state_size); | ||
384 | region->klen = min(maxlen, reqlen); | ||
385 | region->upad = reqlen - region->klen; | ||
386 | |||
387 | return 0; | ||
388 | } | ||
389 | |||
390 | static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | ||
391 | { | ||
392 | int ret; | ||
393 | struct sve_state_reg_region region; | ||
394 | char __user *uptr = (char __user *)reg->addr; | ||
395 | |||
396 | /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */ | ||
397 | if (reg->id == KVM_REG_ARM64_SVE_VLS) | ||
398 | return get_sve_vls(vcpu, reg); | ||
399 | |||
400 | /* Try to interpret reg ID as an architectural SVE register... */ | ||
401 | ret = sve_reg_to_region(®ion, vcpu, reg); | ||
402 | if (ret) | ||
403 | return ret; | ||
404 | |||
405 | if (!kvm_arm_vcpu_sve_finalized(vcpu)) | ||
406 | return -EPERM; | ||
407 | |||
408 | if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset, | ||
409 | region.klen) || | ||
410 | clear_user(uptr + region.klen, region.upad)) | ||
411 | return -EFAULT; | ||
412 | |||
413 | return 0; | ||
414 | } | ||
415 | |||
416 | static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | ||
417 | { | ||
418 | int ret; | ||
419 | struct sve_state_reg_region region; | ||
420 | const char __user *uptr = (const char __user *)reg->addr; | ||
421 | |||
422 | /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */ | ||
423 | if (reg->id == KVM_REG_ARM64_SVE_VLS) | ||
424 | return set_sve_vls(vcpu, reg); | ||
425 | |||
426 | /* Try to interpret reg ID as an architectural SVE register... */ | ||
427 | ret = sve_reg_to_region(®ion, vcpu, reg); | ||
428 | if (ret) | ||
429 | return ret; | ||
430 | |||
431 | if (!kvm_arm_vcpu_sve_finalized(vcpu)) | ||
432 | return -EPERM; | ||
433 | |||
434 | if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr, | ||
435 | region.klen)) | ||
436 | return -EFAULT; | ||
437 | |||
438 | return 0; | ||
439 | } | ||
440 | |||
186 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | 441 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
187 | { | 442 | { |
188 | return -EINVAL; | 443 | return -EINVAL; |
@@ -193,9 +448,37 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
193 | return -EINVAL; | 448 | return -EINVAL; |
194 | } | 449 | } |
195 | 450 | ||
196 | static unsigned long num_core_regs(void) | 451 | static int copy_core_reg_indices(const struct kvm_vcpu *vcpu, |
452 | u64 __user *uindices) | ||
453 | { | ||
454 | unsigned int i; | ||
455 | int n = 0; | ||
456 | const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE; | ||
457 | |||
458 | for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { | ||
459 | /* | ||
460 | * The KVM_REG_ARM64_SVE regs must be used instead of | ||
461 | * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on | ||
462 | * SVE-enabled vcpus: | ||
463 | */ | ||
464 | if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(i)) | ||
465 | continue; | ||
466 | |||
467 | if (uindices) { | ||
468 | if (put_user(core_reg | i, uindices)) | ||
469 | return -EFAULT; | ||
470 | uindices++; | ||
471 | } | ||
472 | |||
473 | n++; | ||
474 | } | ||
475 | |||
476 | return n; | ||
477 | } | ||
478 | |||
479 | static unsigned long num_core_regs(const struct kvm_vcpu *vcpu) | ||
197 | { | 480 | { |
198 | return sizeof(struct kvm_regs) / sizeof(__u32); | 481 | return copy_core_reg_indices(vcpu, NULL); |
199 | } | 482 | } |
200 | 483 | ||
201 | /** | 484 | /** |
@@ -251,6 +534,67 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |||
251 | return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0; | 534 | return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0; |
252 | } | 535 | } |
253 | 536 | ||
537 | static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu) | ||
538 | { | ||
539 | const unsigned int slices = vcpu_sve_slices(vcpu); | ||
540 | |||
541 | if (!vcpu_has_sve(vcpu)) | ||
542 | return 0; | ||
543 | |||
544 | /* Policed by KVM_GET_REG_LIST: */ | ||
545 | WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu)); | ||
546 | |||
547 | return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */) | ||
548 | + 1; /* KVM_REG_ARM64_SVE_VLS */ | ||
549 | } | ||
550 | |||
551 | static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu, | ||
552 | u64 __user *uindices) | ||
553 | { | ||
554 | const unsigned int slices = vcpu_sve_slices(vcpu); | ||
555 | u64 reg; | ||
556 | unsigned int i, n; | ||
557 | int num_regs = 0; | ||
558 | |||
559 | if (!vcpu_has_sve(vcpu)) | ||
560 | return 0; | ||
561 | |||
562 | /* Policed by KVM_GET_REG_LIST: */ | ||
563 | WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu)); | ||
564 | |||
565 | /* | ||
566 | * Enumerate this first, so that userspace can save/restore in | ||
567 | * the order reported by KVM_GET_REG_LIST: | ||
568 | */ | ||
569 | reg = KVM_REG_ARM64_SVE_VLS; | ||
570 | if (put_user(reg, uindices++)) | ||
571 | return -EFAULT; | ||
572 | ++num_regs; | ||
573 | |||
574 | for (i = 0; i < slices; i++) { | ||
575 | for (n = 0; n < SVE_NUM_ZREGS; n++) { | ||
576 | reg = KVM_REG_ARM64_SVE_ZREG(n, i); | ||
577 | if (put_user(reg, uindices++)) | ||
578 | return -EFAULT; | ||
579 | num_regs++; | ||
580 | } | ||
581 | |||
582 | for (n = 0; n < SVE_NUM_PREGS; n++) { | ||
583 | reg = KVM_REG_ARM64_SVE_PREG(n, i); | ||
584 | if (put_user(reg, uindices++)) | ||
585 | return -EFAULT; | ||
586 | num_regs++; | ||
587 | } | ||
588 | |||
589 | reg = KVM_REG_ARM64_SVE_FFR(i); | ||
590 | if (put_user(reg, uindices++)) | ||
591 | return -EFAULT; | ||
592 | num_regs++; | ||
593 | } | ||
594 | |||
595 | return num_regs; | ||
596 | } | ||
597 | |||
254 | /** | 598 | /** |
255 | * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG | 599 | * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG |
256 | * | 600 | * |
@@ -258,8 +602,15 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |||
258 | */ | 602 | */ |
259 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) | 603 | unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) |
260 | { | 604 | { |
261 | return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu) | 605 | unsigned long res = 0; |
262 | + kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS; | 606 | |
607 | res += num_core_regs(vcpu); | ||
608 | res += num_sve_regs(vcpu); | ||
609 | res += kvm_arm_num_sys_reg_descs(vcpu); | ||
610 | res += kvm_arm_get_fw_num_regs(vcpu); | ||
611 | res += NUM_TIMER_REGS; | ||
612 | |||
613 | return res; | ||
263 | } | 614 | } |
264 | 615 | ||
265 | /** | 616 | /** |
@@ -269,23 +620,25 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) | |||
269 | */ | 620 | */ |
270 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | 621 | int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) |
271 | { | 622 | { |
272 | unsigned int i; | ||
273 | const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE; | ||
274 | int ret; | 623 | int ret; |
275 | 624 | ||
276 | for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { | 625 | ret = copy_core_reg_indices(vcpu, uindices); |
277 | if (put_user(core_reg | i, uindices)) | 626 | if (ret < 0) |
278 | return -EFAULT; | 627 | return ret; |
279 | uindices++; | 628 | uindices += ret; |
280 | } | 629 | |
630 | ret = copy_sve_reg_indices(vcpu, uindices); | ||
631 | if (ret < 0) | ||
632 | return ret; | ||
633 | uindices += ret; | ||
281 | 634 | ||
282 | ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices); | 635 | ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices); |
283 | if (ret) | 636 | if (ret < 0) |
284 | return ret; | 637 | return ret; |
285 | uindices += kvm_arm_get_fw_num_regs(vcpu); | 638 | uindices += kvm_arm_get_fw_num_regs(vcpu); |
286 | 639 | ||
287 | ret = copy_timer_indices(vcpu, uindices); | 640 | ret = copy_timer_indices(vcpu, uindices); |
288 | if (ret) | 641 | if (ret < 0) |
289 | return ret; | 642 | return ret; |
290 | uindices += NUM_TIMER_REGS; | 643 | uindices += NUM_TIMER_REGS; |
291 | 644 | ||
@@ -298,12 +651,11 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |||
298 | if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) | 651 | if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) |
299 | return -EINVAL; | 652 | return -EINVAL; |
300 | 653 | ||
301 | /* Register group 16 means we want a core register. */ | 654 | switch (reg->id & KVM_REG_ARM_COPROC_MASK) { |
302 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) | 655 | case KVM_REG_ARM_CORE: return get_core_reg(vcpu, reg); |
303 | return get_core_reg(vcpu, reg); | 656 | case KVM_REG_ARM_FW: return kvm_arm_get_fw_reg(vcpu, reg); |
304 | 657 | case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg); | |
305 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) | 658 | } |
306 | return kvm_arm_get_fw_reg(vcpu, reg); | ||
307 | 659 | ||
308 | if (is_timer_reg(reg->id)) | 660 | if (is_timer_reg(reg->id)) |
309 | return get_timer_reg(vcpu, reg); | 661 | return get_timer_reg(vcpu, reg); |
@@ -317,12 +669,11 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |||
317 | if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) | 669 | if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) |
318 | return -EINVAL; | 670 | return -EINVAL; |
319 | 671 | ||
320 | /* Register group 16 means we set a core register. */ | 672 | switch (reg->id & KVM_REG_ARM_COPROC_MASK) { |
321 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) | 673 | case KVM_REG_ARM_CORE: return set_core_reg(vcpu, reg); |
322 | return set_core_reg(vcpu, reg); | 674 | case KVM_REG_ARM_FW: return kvm_arm_set_fw_reg(vcpu, reg); |
323 | 675 | case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg); | |
324 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW) | 676 | } |
325 | return kvm_arm_set_fw_reg(vcpu, reg); | ||
326 | 677 | ||
327 | if (is_timer_reg(reg->id)) | 678 | if (is_timer_reg(reg->id)) |
328 | return set_timer_reg(vcpu, reg); | 679 | return set_timer_reg(vcpu, reg); |
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 0b7983442071..516aead3c2a9 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c | |||
@@ -173,20 +173,40 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
173 | return 1; | 173 | return 1; |
174 | } | 174 | } |
175 | 175 | ||
176 | #define __ptrauth_save_key(regs, key) \ | ||
177 | ({ \ | ||
178 | regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ | ||
179 | regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ | ||
180 | }) | ||
181 | |||
182 | /* | ||
183 | * Handle the guest trying to use a ptrauth instruction, or trying to access a | ||
184 | * ptrauth register. | ||
185 | */ | ||
186 | void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu) | ||
187 | { | ||
188 | struct kvm_cpu_context *ctxt; | ||
189 | |||
190 | if (vcpu_has_ptrauth(vcpu)) { | ||
191 | vcpu_ptrauth_enable(vcpu); | ||
192 | ctxt = vcpu->arch.host_cpu_context; | ||
193 | __ptrauth_save_key(ctxt->sys_regs, APIA); | ||
194 | __ptrauth_save_key(ctxt->sys_regs, APIB); | ||
195 | __ptrauth_save_key(ctxt->sys_regs, APDA); | ||
196 | __ptrauth_save_key(ctxt->sys_regs, APDB); | ||
197 | __ptrauth_save_key(ctxt->sys_regs, APGA); | ||
198 | } else { | ||
199 | kvm_inject_undefined(vcpu); | ||
200 | } | ||
201 | } | ||
202 | |||
176 | /* | 203 | /* |
177 | * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into | 204 | * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into |
178 | * a NOP). | 205 | * a NOP). |
179 | */ | 206 | */ |
180 | static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run) | 207 | static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run) |
181 | { | 208 | { |
182 | /* | 209 | kvm_arm_vcpu_ptrauth_trap(vcpu); |
183 | * We don't currently support ptrauth in a guest, and we mask the ID | ||
184 | * registers to prevent well-behaved guests from trying to make use of | ||
185 | * it. | ||
186 | * | ||
187 | * Inject an UNDEF, as if the feature really isn't present. | ||
188 | */ | ||
189 | kvm_inject_undefined(vcpu); | ||
190 | return 1; | 210 | return 1; |
191 | } | 211 | } |
192 | 212 | ||
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index 675fdc186e3b..93ba3d7ef027 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <asm/kvm_arm.h> | 24 | #include <asm/kvm_arm.h> |
25 | #include <asm/kvm_asm.h> | 25 | #include <asm/kvm_asm.h> |
26 | #include <asm/kvm_mmu.h> | 26 | #include <asm/kvm_mmu.h> |
27 | #include <asm/kvm_ptrauth.h> | ||
27 | 28 | ||
28 | #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) | 29 | #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) |
29 | #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) | 30 | #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) |
@@ -64,6 +65,13 @@ ENTRY(__guest_enter) | |||
64 | 65 | ||
65 | add x18, x0, #VCPU_CONTEXT | 66 | add x18, x0, #VCPU_CONTEXT |
66 | 67 | ||
68 | // Macro ptrauth_switch_to_guest format: | ||
69 | // ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3) | ||
70 | // The below macro to restore guest keys is not implemented in C code | ||
71 | // as it may cause Pointer Authentication key signing mismatch errors | ||
72 | // when this feature is enabled for kernel code. | ||
73 | ptrauth_switch_to_guest x18, x0, x1, x2 | ||
74 | |||
67 | // Restore guest regs x0-x17 | 75 | // Restore guest regs x0-x17 |
68 | ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)] | 76 | ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)] |
69 | ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)] | 77 | ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)] |
@@ -118,6 +126,13 @@ ENTRY(__guest_exit) | |||
118 | 126 | ||
119 | get_host_ctxt x2, x3 | 127 | get_host_ctxt x2, x3 |
120 | 128 | ||
129 | // Macro ptrauth_switch_to_guest format: | ||
130 | // ptrauth_switch_to_host(guest cxt, host cxt, tmp1, tmp2, tmp3) | ||
131 | // The below macro to save/restore keys is not implemented in C code | ||
132 | // as it may cause Pointer Authentication key signing mismatch errors | ||
133 | // when this feature is enabled for kernel code. | ||
134 | ptrauth_switch_to_host x1, x2, x3, x4, x5 | ||
135 | |||
121 | // Now restore the host regs | 136 | // Now restore the host regs |
122 | restore_callee_saved_regs x2 | 137 | restore_callee_saved_regs x2 |
123 | 138 | ||
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 3563fe655cd5..22b4c335e0b2 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c | |||
@@ -100,7 +100,10 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu) | |||
100 | val = read_sysreg(cpacr_el1); | 100 | val = read_sysreg(cpacr_el1); |
101 | val |= CPACR_EL1_TTA; | 101 | val |= CPACR_EL1_TTA; |
102 | val &= ~CPACR_EL1_ZEN; | 102 | val &= ~CPACR_EL1_ZEN; |
103 | if (!update_fp_enabled(vcpu)) { | 103 | if (update_fp_enabled(vcpu)) { |
104 | if (vcpu_has_sve(vcpu)) | ||
105 | val |= CPACR_EL1_ZEN; | ||
106 | } else { | ||
104 | val &= ~CPACR_EL1_FPEN; | 107 | val &= ~CPACR_EL1_FPEN; |
105 | __activate_traps_fpsimd32(vcpu); | 108 | __activate_traps_fpsimd32(vcpu); |
106 | } | 109 | } |
@@ -317,16 +320,48 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) | |||
317 | return true; | 320 | return true; |
318 | } | 321 | } |
319 | 322 | ||
320 | static bool __hyp_text __hyp_switch_fpsimd(struct kvm_vcpu *vcpu) | 323 | /* Check for an FPSIMD/SVE trap and handle as appropriate */ |
324 | static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu) | ||
321 | { | 325 | { |
322 | struct user_fpsimd_state *host_fpsimd = vcpu->arch.host_fpsimd_state; | 326 | bool vhe, sve_guest, sve_host; |
327 | u8 hsr_ec; | ||
323 | 328 | ||
324 | if (has_vhe()) | 329 | if (!system_supports_fpsimd()) |
325 | write_sysreg(read_sysreg(cpacr_el1) | CPACR_EL1_FPEN, | 330 | return false; |
326 | cpacr_el1); | 331 | |
327 | else | 332 | if (system_supports_sve()) { |
333 | sve_guest = vcpu_has_sve(vcpu); | ||
334 | sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE; | ||
335 | vhe = true; | ||
336 | } else { | ||
337 | sve_guest = false; | ||
338 | sve_host = false; | ||
339 | vhe = has_vhe(); | ||
340 | } | ||
341 | |||
342 | hsr_ec = kvm_vcpu_trap_get_class(vcpu); | ||
343 | if (hsr_ec != ESR_ELx_EC_FP_ASIMD && | ||
344 | hsr_ec != ESR_ELx_EC_SVE) | ||
345 | return false; | ||
346 | |||
347 | /* Don't handle SVE traps for non-SVE vcpus here: */ | ||
348 | if (!sve_guest) | ||
349 | if (hsr_ec != ESR_ELx_EC_FP_ASIMD) | ||
350 | return false; | ||
351 | |||
352 | /* Valid trap. Switch the context: */ | ||
353 | |||
354 | if (vhe) { | ||
355 | u64 reg = read_sysreg(cpacr_el1) | CPACR_EL1_FPEN; | ||
356 | |||
357 | if (sve_guest) | ||
358 | reg |= CPACR_EL1_ZEN; | ||
359 | |||
360 | write_sysreg(reg, cpacr_el1); | ||
361 | } else { | ||
328 | write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP, | 362 | write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP, |
329 | cptr_el2); | 363 | cptr_el2); |
364 | } | ||
330 | 365 | ||
331 | isb(); | 366 | isb(); |
332 | 367 | ||
@@ -335,21 +370,28 @@ static bool __hyp_text __hyp_switch_fpsimd(struct kvm_vcpu *vcpu) | |||
335 | * In the SVE case, VHE is assumed: it is enforced by | 370 | * In the SVE case, VHE is assumed: it is enforced by |
336 | * Kconfig and kvm_arch_init(). | 371 | * Kconfig and kvm_arch_init(). |
337 | */ | 372 | */ |
338 | if (system_supports_sve() && | 373 | if (sve_host) { |
339 | (vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE)) { | ||
340 | struct thread_struct *thread = container_of( | 374 | struct thread_struct *thread = container_of( |
341 | host_fpsimd, | 375 | vcpu->arch.host_fpsimd_state, |
342 | struct thread_struct, uw.fpsimd_state); | 376 | struct thread_struct, uw.fpsimd_state); |
343 | 377 | ||
344 | sve_save_state(sve_pffr(thread), &host_fpsimd->fpsr); | 378 | sve_save_state(sve_pffr(thread), |
379 | &vcpu->arch.host_fpsimd_state->fpsr); | ||
345 | } else { | 380 | } else { |
346 | __fpsimd_save_state(host_fpsimd); | 381 | __fpsimd_save_state(vcpu->arch.host_fpsimd_state); |
347 | } | 382 | } |
348 | 383 | ||
349 | vcpu->arch.flags &= ~KVM_ARM64_FP_HOST; | 384 | vcpu->arch.flags &= ~KVM_ARM64_FP_HOST; |
350 | } | 385 | } |
351 | 386 | ||
352 | __fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs); | 387 | if (sve_guest) { |
388 | sve_load_state(vcpu_sve_pffr(vcpu), | ||
389 | &vcpu->arch.ctxt.gp_regs.fp_regs.fpsr, | ||
390 | sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1); | ||
391 | write_sysreg_s(vcpu->arch.ctxt.sys_regs[ZCR_EL1], SYS_ZCR_EL12); | ||
392 | } else { | ||
393 | __fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs); | ||
394 | } | ||
353 | 395 | ||
354 | /* Skip restoring fpexc32 for AArch64 guests */ | 396 | /* Skip restoring fpexc32 for AArch64 guests */ |
355 | if (!(read_sysreg(hcr_el2) & HCR_RW)) | 397 | if (!(read_sysreg(hcr_el2) & HCR_RW)) |
@@ -385,10 +427,10 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) | |||
385 | * and restore the guest context lazily. | 427 | * and restore the guest context lazily. |
386 | * If FP/SIMD is not implemented, handle the trap and inject an | 428 | * If FP/SIMD is not implemented, handle the trap and inject an |
387 | * undefined instruction exception to the guest. | 429 | * undefined instruction exception to the guest. |
430 | * Similarly for trapped SVE accesses. | ||
388 | */ | 431 | */ |
389 | if (system_supports_fpsimd() && | 432 | if (__hyp_handle_fpsimd(vcpu)) |
390 | kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_FP_ASIMD) | 433 | return true; |
391 | return __hyp_switch_fpsimd(vcpu); | ||
392 | 434 | ||
393 | if (!__populate_fault_info(vcpu)) | 435 | if (!__populate_fault_info(vcpu)) |
394 | return true; | 436 | return true; |
@@ -524,6 +566,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) | |||
524 | { | 566 | { |
525 | struct kvm_cpu_context *host_ctxt; | 567 | struct kvm_cpu_context *host_ctxt; |
526 | struct kvm_cpu_context *guest_ctxt; | 568 | struct kvm_cpu_context *guest_ctxt; |
569 | bool pmu_switch_needed; | ||
527 | u64 exit_code; | 570 | u64 exit_code; |
528 | 571 | ||
529 | /* | 572 | /* |
@@ -543,6 +586,8 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) | |||
543 | host_ctxt->__hyp_running_vcpu = vcpu; | 586 | host_ctxt->__hyp_running_vcpu = vcpu; |
544 | guest_ctxt = &vcpu->arch.ctxt; | 587 | guest_ctxt = &vcpu->arch.ctxt; |
545 | 588 | ||
589 | pmu_switch_needed = __pmu_switch_to_guest(host_ctxt); | ||
590 | |||
546 | __sysreg_save_state_nvhe(host_ctxt); | 591 | __sysreg_save_state_nvhe(host_ctxt); |
547 | 592 | ||
548 | __activate_vm(kern_hyp_va(vcpu->kvm)); | 593 | __activate_vm(kern_hyp_va(vcpu->kvm)); |
@@ -589,6 +634,9 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) | |||
589 | */ | 634 | */ |
590 | __debug_switch_to_host(vcpu); | 635 | __debug_switch_to_host(vcpu); |
591 | 636 | ||
637 | if (pmu_switch_needed) | ||
638 | __pmu_switch_to_host(host_ctxt); | ||
639 | |||
592 | /* Returning to host will clear PSR.I, remask PMR if needed */ | 640 | /* Returning to host will clear PSR.I, remask PMR if needed */ |
593 | if (system_uses_irq_prio_masking()) | 641 | if (system_uses_irq_prio_masking()) |
594 | gic_write_pmr(GIC_PRIO_IRQOFF); | 642 | gic_write_pmr(GIC_PRIO_IRQOFF); |
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c new file mode 100644 index 000000000000..3da94a5bb6b7 --- /dev/null +++ b/arch/arm64/kvm/pmu.c | |||
@@ -0,0 +1,239 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Copyright 2019 Arm Limited | ||
4 | * Author: Andrew Murray <Andrew.Murray@arm.com> | ||
5 | */ | ||
6 | #include <linux/kvm_host.h> | ||
7 | #include <linux/perf_event.h> | ||
8 | #include <asm/kvm_hyp.h> | ||
9 | |||
10 | /* | ||
11 | * Given the perf event attributes and system type, determine | ||
12 | * if we are going to need to switch counters at guest entry/exit. | ||
13 | */ | ||
14 | static bool kvm_pmu_switch_needed(struct perf_event_attr *attr) | ||
15 | { | ||
16 | /** | ||
17 | * With VHE the guest kernel runs at EL1 and the host at EL2, | ||
18 | * where user (EL0) is excluded then we have no reason to switch | ||
19 | * counters. | ||
20 | */ | ||
21 | if (has_vhe() && attr->exclude_user) | ||
22 | return false; | ||
23 | |||
24 | /* Only switch if attributes are different */ | ||
25 | return (attr->exclude_host != attr->exclude_guest); | ||
26 | } | ||
27 | |||
28 | /* | ||
29 | * Add events to track that we may want to switch at guest entry/exit | ||
30 | * time. | ||
31 | */ | ||
32 | void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) | ||
33 | { | ||
34 | struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data); | ||
35 | |||
36 | if (!kvm_pmu_switch_needed(attr)) | ||
37 | return; | ||
38 | |||
39 | if (!attr->exclude_host) | ||
40 | ctx->pmu_events.events_host |= set; | ||
41 | if (!attr->exclude_guest) | ||
42 | ctx->pmu_events.events_guest |= set; | ||
43 | } | ||
44 | |||
45 | /* | ||
46 | * Stop tracking events | ||
47 | */ | ||
48 | void kvm_clr_pmu_events(u32 clr) | ||
49 | { | ||
50 | struct kvm_host_data *ctx = this_cpu_ptr(&kvm_host_data); | ||
51 | |||
52 | ctx->pmu_events.events_host &= ~clr; | ||
53 | ctx->pmu_events.events_guest &= ~clr; | ||
54 | } | ||
55 | |||
56 | /** | ||
57 | * Disable host events, enable guest events | ||
58 | */ | ||
59 | bool __hyp_text __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt) | ||
60 | { | ||
61 | struct kvm_host_data *host; | ||
62 | struct kvm_pmu_events *pmu; | ||
63 | |||
64 | host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); | ||
65 | pmu = &host->pmu_events; | ||
66 | |||
67 | if (pmu->events_host) | ||
68 | write_sysreg(pmu->events_host, pmcntenclr_el0); | ||
69 | |||
70 | if (pmu->events_guest) | ||
71 | write_sysreg(pmu->events_guest, pmcntenset_el0); | ||
72 | |||
73 | return (pmu->events_host || pmu->events_guest); | ||
74 | } | ||
75 | |||
76 | /** | ||
77 | * Disable guest events, enable host events | ||
78 | */ | ||
79 | void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt) | ||
80 | { | ||
81 | struct kvm_host_data *host; | ||
82 | struct kvm_pmu_events *pmu; | ||
83 | |||
84 | host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); | ||
85 | pmu = &host->pmu_events; | ||
86 | |||
87 | if (pmu->events_guest) | ||
88 | write_sysreg(pmu->events_guest, pmcntenclr_el0); | ||
89 | |||
90 | if (pmu->events_host) | ||
91 | write_sysreg(pmu->events_host, pmcntenset_el0); | ||
92 | } | ||
93 | |||
94 | #define PMEVTYPER_READ_CASE(idx) \ | ||
95 | case idx: \ | ||
96 | return read_sysreg(pmevtyper##idx##_el0) | ||
97 | |||
98 | #define PMEVTYPER_WRITE_CASE(idx) \ | ||
99 | case idx: \ | ||
100 | write_sysreg(val, pmevtyper##idx##_el0); \ | ||
101 | break | ||
102 | |||
103 | #define PMEVTYPER_CASES(readwrite) \ | ||
104 | PMEVTYPER_##readwrite##_CASE(0); \ | ||
105 | PMEVTYPER_##readwrite##_CASE(1); \ | ||
106 | PMEVTYPER_##readwrite##_CASE(2); \ | ||
107 | PMEVTYPER_##readwrite##_CASE(3); \ | ||
108 | PMEVTYPER_##readwrite##_CASE(4); \ | ||
109 | PMEVTYPER_##readwrite##_CASE(5); \ | ||
110 | PMEVTYPER_##readwrite##_CASE(6); \ | ||
111 | PMEVTYPER_##readwrite##_CASE(7); \ | ||
112 | PMEVTYPER_##readwrite##_CASE(8); \ | ||
113 | PMEVTYPER_##readwrite##_CASE(9); \ | ||
114 | PMEVTYPER_##readwrite##_CASE(10); \ | ||
115 | PMEVTYPER_##readwrite##_CASE(11); \ | ||
116 | PMEVTYPER_##readwrite##_CASE(12); \ | ||
117 | PMEVTYPER_##readwrite##_CASE(13); \ | ||
118 | PMEVTYPER_##readwrite##_CASE(14); \ | ||
119 | PMEVTYPER_##readwrite##_CASE(15); \ | ||
120 | PMEVTYPER_##readwrite##_CASE(16); \ | ||
121 | PMEVTYPER_##readwrite##_CASE(17); \ | ||
122 | PMEVTYPER_##readwrite##_CASE(18); \ | ||
123 | PMEVTYPER_##readwrite##_CASE(19); \ | ||
124 | PMEVTYPER_##readwrite##_CASE(20); \ | ||
125 | PMEVTYPER_##readwrite##_CASE(21); \ | ||
126 | PMEVTYPER_##readwrite##_CASE(22); \ | ||
127 | PMEVTYPER_##readwrite##_CASE(23); \ | ||
128 | PMEVTYPER_##readwrite##_CASE(24); \ | ||
129 | PMEVTYPER_##readwrite##_CASE(25); \ | ||
130 | PMEVTYPER_##readwrite##_CASE(26); \ | ||
131 | PMEVTYPER_##readwrite##_CASE(27); \ | ||
132 | PMEVTYPER_##readwrite##_CASE(28); \ | ||
133 | PMEVTYPER_##readwrite##_CASE(29); \ | ||
134 | PMEVTYPER_##readwrite##_CASE(30) | ||
135 | |||
136 | /* | ||
137 | * Read a value direct from PMEVTYPER<idx> where idx is 0-30 | ||
138 | * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31). | ||
139 | */ | ||
140 | static u64 kvm_vcpu_pmu_read_evtype_direct(int idx) | ||
141 | { | ||
142 | switch (idx) { | ||
143 | PMEVTYPER_CASES(READ); | ||
144 | case ARMV8_PMU_CYCLE_IDX: | ||
145 | return read_sysreg(pmccfiltr_el0); | ||
146 | default: | ||
147 | WARN_ON(1); | ||
148 | } | ||
149 | |||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | /* | ||
154 | * Write a value direct to PMEVTYPER<idx> where idx is 0-30 | ||
155 | * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31). | ||
156 | */ | ||
157 | static void kvm_vcpu_pmu_write_evtype_direct(int idx, u32 val) | ||
158 | { | ||
159 | switch (idx) { | ||
160 | PMEVTYPER_CASES(WRITE); | ||
161 | case ARMV8_PMU_CYCLE_IDX: | ||
162 | write_sysreg(val, pmccfiltr_el0); | ||
163 | break; | ||
164 | default: | ||
165 | WARN_ON(1); | ||
166 | } | ||
167 | } | ||
168 | |||
169 | /* | ||
170 | * Modify ARMv8 PMU events to include EL0 counting | ||
171 | */ | ||
172 | static void kvm_vcpu_pmu_enable_el0(unsigned long events) | ||
173 | { | ||
174 | u64 typer; | ||
175 | u32 counter; | ||
176 | |||
177 | for_each_set_bit(counter, &events, 32) { | ||
178 | typer = kvm_vcpu_pmu_read_evtype_direct(counter); | ||
179 | typer &= ~ARMV8_PMU_EXCLUDE_EL0; | ||
180 | kvm_vcpu_pmu_write_evtype_direct(counter, typer); | ||
181 | } | ||
182 | } | ||
183 | |||
184 | /* | ||
185 | * Modify ARMv8 PMU events to exclude EL0 counting | ||
186 | */ | ||
187 | static void kvm_vcpu_pmu_disable_el0(unsigned long events) | ||
188 | { | ||
189 | u64 typer; | ||
190 | u32 counter; | ||
191 | |||
192 | for_each_set_bit(counter, &events, 32) { | ||
193 | typer = kvm_vcpu_pmu_read_evtype_direct(counter); | ||
194 | typer |= ARMV8_PMU_EXCLUDE_EL0; | ||
195 | kvm_vcpu_pmu_write_evtype_direct(counter, typer); | ||
196 | } | ||
197 | } | ||
198 | |||
199 | /* | ||
200 | * On VHE ensure that only guest events have EL0 counting enabled | ||
201 | */ | ||
202 | void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) | ||
203 | { | ||
204 | struct kvm_cpu_context *host_ctxt; | ||
205 | struct kvm_host_data *host; | ||
206 | u32 events_guest, events_host; | ||
207 | |||
208 | if (!has_vhe()) | ||
209 | return; | ||
210 | |||
211 | host_ctxt = vcpu->arch.host_cpu_context; | ||
212 | host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); | ||
213 | events_guest = host->pmu_events.events_guest; | ||
214 | events_host = host->pmu_events.events_host; | ||
215 | |||
216 | kvm_vcpu_pmu_enable_el0(events_guest); | ||
217 | kvm_vcpu_pmu_disable_el0(events_host); | ||
218 | } | ||
219 | |||
220 | /* | ||
221 | * On VHE ensure that only host events have EL0 counting enabled | ||
222 | */ | ||
223 | void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) | ||
224 | { | ||
225 | struct kvm_cpu_context *host_ctxt; | ||
226 | struct kvm_host_data *host; | ||
227 | u32 events_guest, events_host; | ||
228 | |||
229 | if (!has_vhe()) | ||
230 | return; | ||
231 | |||
232 | host_ctxt = vcpu->arch.host_cpu_context; | ||
233 | host = container_of(host_ctxt, struct kvm_host_data, host_ctxt); | ||
234 | events_guest = host->pmu_events.events_guest; | ||
235 | events_host = host->pmu_events.events_host; | ||
236 | |||
237 | kvm_vcpu_pmu_enable_el0(events_host); | ||
238 | kvm_vcpu_pmu_disable_el0(events_guest); | ||
239 | } | ||
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index e2a0500cd7a2..1140b4485575 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c | |||
@@ -20,20 +20,26 @@ | |||
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/errno.h> | 22 | #include <linux/errno.h> |
23 | #include <linux/kernel.h> | ||
23 | #include <linux/kvm_host.h> | 24 | #include <linux/kvm_host.h> |
24 | #include <linux/kvm.h> | 25 | #include <linux/kvm.h> |
25 | #include <linux/hw_breakpoint.h> | 26 | #include <linux/hw_breakpoint.h> |
27 | #include <linux/slab.h> | ||
28 | #include <linux/string.h> | ||
29 | #include <linux/types.h> | ||
26 | 30 | ||
27 | #include <kvm/arm_arch_timer.h> | 31 | #include <kvm/arm_arch_timer.h> |
28 | 32 | ||
29 | #include <asm/cpufeature.h> | 33 | #include <asm/cpufeature.h> |
30 | #include <asm/cputype.h> | 34 | #include <asm/cputype.h> |
35 | #include <asm/fpsimd.h> | ||
31 | #include <asm/ptrace.h> | 36 | #include <asm/ptrace.h> |
32 | #include <asm/kvm_arm.h> | 37 | #include <asm/kvm_arm.h> |
33 | #include <asm/kvm_asm.h> | 38 | #include <asm/kvm_asm.h> |
34 | #include <asm/kvm_coproc.h> | 39 | #include <asm/kvm_coproc.h> |
35 | #include <asm/kvm_emulate.h> | 40 | #include <asm/kvm_emulate.h> |
36 | #include <asm/kvm_mmu.h> | 41 | #include <asm/kvm_mmu.h> |
42 | #include <asm/virt.h> | ||
37 | 43 | ||
38 | /* Maximum phys_shift supported for any VM on this host */ | 44 | /* Maximum phys_shift supported for any VM on this host */ |
39 | static u32 kvm_ipa_limit; | 45 | static u32 kvm_ipa_limit; |
@@ -92,6 +98,14 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
92 | case KVM_CAP_ARM_VM_IPA_SIZE: | 98 | case KVM_CAP_ARM_VM_IPA_SIZE: |
93 | r = kvm_ipa_limit; | 99 | r = kvm_ipa_limit; |
94 | break; | 100 | break; |
101 | case KVM_CAP_ARM_SVE: | ||
102 | r = system_supports_sve(); | ||
103 | break; | ||
104 | case KVM_CAP_ARM_PTRAUTH_ADDRESS: | ||
105 | case KVM_CAP_ARM_PTRAUTH_GENERIC: | ||
106 | r = has_vhe() && system_supports_address_auth() && | ||
107 | system_supports_generic_auth(); | ||
108 | break; | ||
95 | default: | 109 | default: |
96 | r = 0; | 110 | r = 0; |
97 | } | 111 | } |
@@ -99,13 +113,148 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
99 | return r; | 113 | return r; |
100 | } | 114 | } |
101 | 115 | ||
116 | unsigned int kvm_sve_max_vl; | ||
117 | |||
118 | int kvm_arm_init_sve(void) | ||
119 | { | ||
120 | if (system_supports_sve()) { | ||
121 | kvm_sve_max_vl = sve_max_virtualisable_vl; | ||
122 | |||
123 | /* | ||
124 | * The get_sve_reg()/set_sve_reg() ioctl interface will need | ||
125 | * to be extended with multiple register slice support in | ||
126 | * order to support vector lengths greater than | ||
127 | * SVE_VL_ARCH_MAX: | ||
128 | */ | ||
129 | if (WARN_ON(kvm_sve_max_vl > SVE_VL_ARCH_MAX)) | ||
130 | kvm_sve_max_vl = SVE_VL_ARCH_MAX; | ||
131 | |||
132 | /* | ||
133 | * Don't even try to make use of vector lengths that | ||
134 | * aren't available on all CPUs, for now: | ||
135 | */ | ||
136 | if (kvm_sve_max_vl < sve_max_vl) | ||
137 | pr_warn("KVM: SVE vector length for guests limited to %u bytes\n", | ||
138 | kvm_sve_max_vl); | ||
139 | } | ||
140 | |||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu) | ||
145 | { | ||
146 | if (!system_supports_sve()) | ||
147 | return -EINVAL; | ||
148 | |||
149 | /* Verify that KVM startup enforced this when SVE was detected: */ | ||
150 | if (WARN_ON(!has_vhe())) | ||
151 | return -EINVAL; | ||
152 | |||
153 | vcpu->arch.sve_max_vl = kvm_sve_max_vl; | ||
154 | |||
155 | /* | ||
156 | * Userspace can still customize the vector lengths by writing | ||
157 | * KVM_REG_ARM64_SVE_VLS. Allocation is deferred until | ||
158 | * kvm_arm_vcpu_finalize(), which freezes the configuration. | ||
159 | */ | ||
160 | vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_SVE; | ||
161 | |||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * Finalize vcpu's maximum SVE vector length, allocating | ||
167 | * vcpu->arch.sve_state as necessary. | ||
168 | */ | ||
169 | static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) | ||
170 | { | ||
171 | void *buf; | ||
172 | unsigned int vl; | ||
173 | |||
174 | vl = vcpu->arch.sve_max_vl; | ||
175 | |||
176 | /* | ||
177 | * Resposibility for these properties is shared between | ||
178 | * kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and | ||
179 | * set_sve_vls(). Double-check here just to be sure: | ||
180 | */ | ||
181 | if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl || | ||
182 | vl > SVE_VL_ARCH_MAX)) | ||
183 | return -EIO; | ||
184 | |||
185 | buf = kzalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)), GFP_KERNEL); | ||
186 | if (!buf) | ||
187 | return -ENOMEM; | ||
188 | |||
189 | vcpu->arch.sve_state = buf; | ||
190 | vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED; | ||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature) | ||
195 | { | ||
196 | switch (feature) { | ||
197 | case KVM_ARM_VCPU_SVE: | ||
198 | if (!vcpu_has_sve(vcpu)) | ||
199 | return -EINVAL; | ||
200 | |||
201 | if (kvm_arm_vcpu_sve_finalized(vcpu)) | ||
202 | return -EPERM; | ||
203 | |||
204 | return kvm_vcpu_finalize_sve(vcpu); | ||
205 | } | ||
206 | |||
207 | return -EINVAL; | ||
208 | } | ||
209 | |||
210 | bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) | ||
211 | { | ||
212 | if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu)) | ||
213 | return false; | ||
214 | |||
215 | return true; | ||
216 | } | ||
217 | |||
218 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | ||
219 | { | ||
220 | kfree(vcpu->arch.sve_state); | ||
221 | } | ||
222 | |||
223 | static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu) | ||
224 | { | ||
225 | if (vcpu_has_sve(vcpu)) | ||
226 | memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu)); | ||
227 | } | ||
228 | |||
229 | static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu) | ||
230 | { | ||
231 | /* Support ptrauth only if the system supports these capabilities. */ | ||
232 | if (!has_vhe()) | ||
233 | return -EINVAL; | ||
234 | |||
235 | if (!system_supports_address_auth() || | ||
236 | !system_supports_generic_auth()) | ||
237 | return -EINVAL; | ||
238 | /* | ||
239 | * For now make sure that both address/generic pointer authentication | ||
240 | * features are requested by the userspace together. | ||
241 | */ | ||
242 | if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || | ||
243 | !test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) | ||
244 | return -EINVAL; | ||
245 | |||
246 | vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_PTRAUTH; | ||
247 | return 0; | ||
248 | } | ||
249 | |||
102 | /** | 250 | /** |
103 | * kvm_reset_vcpu - sets core registers and sys_regs to reset value | 251 | * kvm_reset_vcpu - sets core registers and sys_regs to reset value |
104 | * @vcpu: The VCPU pointer | 252 | * @vcpu: The VCPU pointer |
105 | * | 253 | * |
106 | * This function finds the right table above and sets the registers on | 254 | * This function finds the right table above and sets the registers on |
107 | * the virtual CPU struct to their architecturally defined reset | 255 | * the virtual CPU struct to their architecturally defined reset |
108 | * values. | 256 | * values, except for registers whose reset is deferred until |
257 | * kvm_arm_vcpu_finalize(). | ||
109 | * | 258 | * |
110 | * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT | 259 | * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT |
111 | * ioctl or as part of handling a request issued by another VCPU in the PSCI | 260 | * ioctl or as part of handling a request issued by another VCPU in the PSCI |
@@ -131,6 +280,22 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | |||
131 | if (loaded) | 280 | if (loaded) |
132 | kvm_arch_vcpu_put(vcpu); | 281 | kvm_arch_vcpu_put(vcpu); |
133 | 282 | ||
283 | if (!kvm_arm_vcpu_sve_finalized(vcpu)) { | ||
284 | if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) { | ||
285 | ret = kvm_vcpu_enable_sve(vcpu); | ||
286 | if (ret) | ||
287 | goto out; | ||
288 | } | ||
289 | } else { | ||
290 | kvm_vcpu_reset_sve(vcpu); | ||
291 | } | ||
292 | |||
293 | if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || | ||
294 | test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) { | ||
295 | if (kvm_vcpu_enable_ptrauth(vcpu)) | ||
296 | goto out; | ||
297 | } | ||
298 | |||
134 | switch (vcpu->arch.target) { | 299 | switch (vcpu->arch.target) { |
135 | default: | 300 | default: |
136 | if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { | 301 | if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { |
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 539feecda5b8..857b226bcdde 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c | |||
@@ -695,6 +695,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, | |||
695 | val |= p->regval & ARMV8_PMU_PMCR_MASK; | 695 | val |= p->regval & ARMV8_PMU_PMCR_MASK; |
696 | __vcpu_sys_reg(vcpu, PMCR_EL0) = val; | 696 | __vcpu_sys_reg(vcpu, PMCR_EL0) = val; |
697 | kvm_pmu_handle_pmcr(vcpu, val); | 697 | kvm_pmu_handle_pmcr(vcpu, val); |
698 | kvm_vcpu_pmu_restore_guest(vcpu); | ||
698 | } else { | 699 | } else { |
699 | /* PMCR.P & PMCR.C are RAZ */ | 700 | /* PMCR.P & PMCR.C are RAZ */ |
700 | val = __vcpu_sys_reg(vcpu, PMCR_EL0) | 701 | val = __vcpu_sys_reg(vcpu, PMCR_EL0) |
@@ -850,6 +851,7 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, | |||
850 | if (p->is_write) { | 851 | if (p->is_write) { |
851 | kvm_pmu_set_counter_event_type(vcpu, p->regval, idx); | 852 | kvm_pmu_set_counter_event_type(vcpu, p->regval, idx); |
852 | __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK; | 853 | __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK; |
854 | kvm_vcpu_pmu_restore_guest(vcpu); | ||
853 | } else { | 855 | } else { |
854 | p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK; | 856 | p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK; |
855 | } | 857 | } |
@@ -875,6 +877,7 @@ static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, | |||
875 | /* accessing PMCNTENSET_EL0 */ | 877 | /* accessing PMCNTENSET_EL0 */ |
876 | __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val; | 878 | __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val; |
877 | kvm_pmu_enable_counter(vcpu, val); | 879 | kvm_pmu_enable_counter(vcpu, val); |
880 | kvm_vcpu_pmu_restore_guest(vcpu); | ||
878 | } else { | 881 | } else { |
879 | /* accessing PMCNTENCLR_EL0 */ | 882 | /* accessing PMCNTENCLR_EL0 */ |
880 | __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val; | 883 | __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val; |
@@ -1007,6 +1010,37 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, | |||
1007 | { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \ | 1010 | { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \ |
1008 | access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), } | 1011 | access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), } |
1009 | 1012 | ||
1013 | static bool trap_ptrauth(struct kvm_vcpu *vcpu, | ||
1014 | struct sys_reg_params *p, | ||
1015 | const struct sys_reg_desc *rd) | ||
1016 | { | ||
1017 | kvm_arm_vcpu_ptrauth_trap(vcpu); | ||
1018 | |||
1019 | /* | ||
1020 | * Return false for both cases as we never skip the trapped | ||
1021 | * instruction: | ||
1022 | * | ||
1023 | * - Either we re-execute the same key register access instruction | ||
1024 | * after enabling ptrauth. | ||
1025 | * - Or an UNDEF is injected as ptrauth is not supported/enabled. | ||
1026 | */ | ||
1027 | return false; | ||
1028 | } | ||
1029 | |||
1030 | static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu, | ||
1031 | const struct sys_reg_desc *rd) | ||
1032 | { | ||
1033 | return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN_USER | REG_HIDDEN_GUEST; | ||
1034 | } | ||
1035 | |||
1036 | #define __PTRAUTH_KEY(k) \ | ||
1037 | { SYS_DESC(SYS_## k), trap_ptrauth, reset_unknown, k, \ | ||
1038 | .visibility = ptrauth_visibility} | ||
1039 | |||
1040 | #define PTRAUTH_KEY(k) \ | ||
1041 | __PTRAUTH_KEY(k ## KEYLO_EL1), \ | ||
1042 | __PTRAUTH_KEY(k ## KEYHI_EL1) | ||
1043 | |||
1010 | static bool access_arch_timer(struct kvm_vcpu *vcpu, | 1044 | static bool access_arch_timer(struct kvm_vcpu *vcpu, |
1011 | struct sys_reg_params *p, | 1045 | struct sys_reg_params *p, |
1012 | const struct sys_reg_desc *r) | 1046 | const struct sys_reg_desc *r) |
@@ -1044,25 +1078,20 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu, | |||
1044 | } | 1078 | } |
1045 | 1079 | ||
1046 | /* Read a sanitised cpufeature ID register by sys_reg_desc */ | 1080 | /* Read a sanitised cpufeature ID register by sys_reg_desc */ |
1047 | static u64 read_id_reg(struct sys_reg_desc const *r, bool raz) | 1081 | static u64 read_id_reg(const struct kvm_vcpu *vcpu, |
1082 | struct sys_reg_desc const *r, bool raz) | ||
1048 | { | 1083 | { |
1049 | u32 id = sys_reg((u32)r->Op0, (u32)r->Op1, | 1084 | u32 id = sys_reg((u32)r->Op0, (u32)r->Op1, |
1050 | (u32)r->CRn, (u32)r->CRm, (u32)r->Op2); | 1085 | (u32)r->CRn, (u32)r->CRm, (u32)r->Op2); |
1051 | u64 val = raz ? 0 : read_sanitised_ftr_reg(id); | 1086 | u64 val = raz ? 0 : read_sanitised_ftr_reg(id); |
1052 | 1087 | ||
1053 | if (id == SYS_ID_AA64PFR0_EL1) { | 1088 | if (id == SYS_ID_AA64PFR0_EL1 && !vcpu_has_sve(vcpu)) { |
1054 | if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT)) | ||
1055 | kvm_debug("SVE unsupported for guests, suppressing\n"); | ||
1056 | |||
1057 | val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); | 1089 | val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); |
1058 | } else if (id == SYS_ID_AA64ISAR1_EL1) { | 1090 | } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) { |
1059 | const u64 ptrauth_mask = (0xfUL << ID_AA64ISAR1_APA_SHIFT) | | 1091 | val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) | |
1060 | (0xfUL << ID_AA64ISAR1_API_SHIFT) | | 1092 | (0xfUL << ID_AA64ISAR1_API_SHIFT) | |
1061 | (0xfUL << ID_AA64ISAR1_GPA_SHIFT) | | 1093 | (0xfUL << ID_AA64ISAR1_GPA_SHIFT) | |
1062 | (0xfUL << ID_AA64ISAR1_GPI_SHIFT); | 1094 | (0xfUL << ID_AA64ISAR1_GPI_SHIFT)); |
1063 | if (val & ptrauth_mask) | ||
1064 | kvm_debug("ptrauth unsupported for guests, suppressing\n"); | ||
1065 | val &= ~ptrauth_mask; | ||
1066 | } | 1095 | } |
1067 | 1096 | ||
1068 | return val; | 1097 | return val; |
@@ -1078,7 +1107,7 @@ static bool __access_id_reg(struct kvm_vcpu *vcpu, | |||
1078 | if (p->is_write) | 1107 | if (p->is_write) |
1079 | return write_to_read_only(vcpu, p, r); | 1108 | return write_to_read_only(vcpu, p, r); |
1080 | 1109 | ||
1081 | p->regval = read_id_reg(r, raz); | 1110 | p->regval = read_id_reg(vcpu, r, raz); |
1082 | return true; | 1111 | return true; |
1083 | } | 1112 | } |
1084 | 1113 | ||
@@ -1100,6 +1129,81 @@ static int reg_from_user(u64 *val, const void __user *uaddr, u64 id); | |||
1100 | static int reg_to_user(void __user *uaddr, const u64 *val, u64 id); | 1129 | static int reg_to_user(void __user *uaddr, const u64 *val, u64 id); |
1101 | static u64 sys_reg_to_index(const struct sys_reg_desc *reg); | 1130 | static u64 sys_reg_to_index(const struct sys_reg_desc *reg); |
1102 | 1131 | ||
1132 | /* Visibility overrides for SVE-specific control registers */ | ||
1133 | static unsigned int sve_visibility(const struct kvm_vcpu *vcpu, | ||
1134 | const struct sys_reg_desc *rd) | ||
1135 | { | ||
1136 | if (vcpu_has_sve(vcpu)) | ||
1137 | return 0; | ||
1138 | |||
1139 | return REG_HIDDEN_USER | REG_HIDDEN_GUEST; | ||
1140 | } | ||
1141 | |||
1142 | /* Visibility overrides for SVE-specific ID registers */ | ||
1143 | static unsigned int sve_id_visibility(const struct kvm_vcpu *vcpu, | ||
1144 | const struct sys_reg_desc *rd) | ||
1145 | { | ||
1146 | if (vcpu_has_sve(vcpu)) | ||
1147 | return 0; | ||
1148 | |||
1149 | return REG_HIDDEN_USER; | ||
1150 | } | ||
1151 | |||
1152 | /* Generate the emulated ID_AA64ZFR0_EL1 value exposed to the guest */ | ||
1153 | static u64 guest_id_aa64zfr0_el1(const struct kvm_vcpu *vcpu) | ||
1154 | { | ||
1155 | if (!vcpu_has_sve(vcpu)) | ||
1156 | return 0; | ||
1157 | |||
1158 | return read_sanitised_ftr_reg(SYS_ID_AA64ZFR0_EL1); | ||
1159 | } | ||
1160 | |||
1161 | static bool access_id_aa64zfr0_el1(struct kvm_vcpu *vcpu, | ||
1162 | struct sys_reg_params *p, | ||
1163 | const struct sys_reg_desc *rd) | ||
1164 | { | ||
1165 | if (p->is_write) | ||
1166 | return write_to_read_only(vcpu, p, rd); | ||
1167 | |||
1168 | p->regval = guest_id_aa64zfr0_el1(vcpu); | ||
1169 | return true; | ||
1170 | } | ||
1171 | |||
1172 | static int get_id_aa64zfr0_el1(struct kvm_vcpu *vcpu, | ||
1173 | const struct sys_reg_desc *rd, | ||
1174 | const struct kvm_one_reg *reg, void __user *uaddr) | ||
1175 | { | ||
1176 | u64 val; | ||
1177 | |||
1178 | if (WARN_ON(!vcpu_has_sve(vcpu))) | ||
1179 | return -ENOENT; | ||
1180 | |||
1181 | val = guest_id_aa64zfr0_el1(vcpu); | ||
1182 | return reg_to_user(uaddr, &val, reg->id); | ||
1183 | } | ||
1184 | |||
1185 | static int set_id_aa64zfr0_el1(struct kvm_vcpu *vcpu, | ||
1186 | const struct sys_reg_desc *rd, | ||
1187 | const struct kvm_one_reg *reg, void __user *uaddr) | ||
1188 | { | ||
1189 | const u64 id = sys_reg_to_index(rd); | ||
1190 | int err; | ||
1191 | u64 val; | ||
1192 | |||
1193 | if (WARN_ON(!vcpu_has_sve(vcpu))) | ||
1194 | return -ENOENT; | ||
1195 | |||
1196 | err = reg_from_user(&val, uaddr, id); | ||
1197 | if (err) | ||
1198 | return err; | ||
1199 | |||
1200 | /* This is what we mean by invariant: you can't change it. */ | ||
1201 | if (val != guest_id_aa64zfr0_el1(vcpu)) | ||
1202 | return -EINVAL; | ||
1203 | |||
1204 | return 0; | ||
1205 | } | ||
1206 | |||
1103 | /* | 1207 | /* |
1104 | * cpufeature ID register user accessors | 1208 | * cpufeature ID register user accessors |
1105 | * | 1209 | * |
@@ -1107,16 +1211,18 @@ static u64 sys_reg_to_index(const struct sys_reg_desc *reg); | |||
1107 | * are stored, and for set_id_reg() we don't allow the effective value | 1211 | * are stored, and for set_id_reg() we don't allow the effective value |
1108 | * to be changed. | 1212 | * to be changed. |
1109 | */ | 1213 | */ |
1110 | static int __get_id_reg(const struct sys_reg_desc *rd, void __user *uaddr, | 1214 | static int __get_id_reg(const struct kvm_vcpu *vcpu, |
1215 | const struct sys_reg_desc *rd, void __user *uaddr, | ||
1111 | bool raz) | 1216 | bool raz) |
1112 | { | 1217 | { |
1113 | const u64 id = sys_reg_to_index(rd); | 1218 | const u64 id = sys_reg_to_index(rd); |
1114 | const u64 val = read_id_reg(rd, raz); | 1219 | const u64 val = read_id_reg(vcpu, rd, raz); |
1115 | 1220 | ||
1116 | return reg_to_user(uaddr, &val, id); | 1221 | return reg_to_user(uaddr, &val, id); |
1117 | } | 1222 | } |
1118 | 1223 | ||
1119 | static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr, | 1224 | static int __set_id_reg(const struct kvm_vcpu *vcpu, |
1225 | const struct sys_reg_desc *rd, void __user *uaddr, | ||
1120 | bool raz) | 1226 | bool raz) |
1121 | { | 1227 | { |
1122 | const u64 id = sys_reg_to_index(rd); | 1228 | const u64 id = sys_reg_to_index(rd); |
@@ -1128,7 +1234,7 @@ static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr, | |||
1128 | return err; | 1234 | return err; |
1129 | 1235 | ||
1130 | /* This is what we mean by invariant: you can't change it. */ | 1236 | /* This is what we mean by invariant: you can't change it. */ |
1131 | if (val != read_id_reg(rd, raz)) | 1237 | if (val != read_id_reg(vcpu, rd, raz)) |
1132 | return -EINVAL; | 1238 | return -EINVAL; |
1133 | 1239 | ||
1134 | return 0; | 1240 | return 0; |
@@ -1137,25 +1243,25 @@ static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr, | |||
1137 | static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, | 1243 | static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, |
1138 | const struct kvm_one_reg *reg, void __user *uaddr) | 1244 | const struct kvm_one_reg *reg, void __user *uaddr) |
1139 | { | 1245 | { |
1140 | return __get_id_reg(rd, uaddr, false); | 1246 | return __get_id_reg(vcpu, rd, uaddr, false); |
1141 | } | 1247 | } |
1142 | 1248 | ||
1143 | static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, | 1249 | static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, |
1144 | const struct kvm_one_reg *reg, void __user *uaddr) | 1250 | const struct kvm_one_reg *reg, void __user *uaddr) |
1145 | { | 1251 | { |
1146 | return __set_id_reg(rd, uaddr, false); | 1252 | return __set_id_reg(vcpu, rd, uaddr, false); |
1147 | } | 1253 | } |
1148 | 1254 | ||
1149 | static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, | 1255 | static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, |
1150 | const struct kvm_one_reg *reg, void __user *uaddr) | 1256 | const struct kvm_one_reg *reg, void __user *uaddr) |
1151 | { | 1257 | { |
1152 | return __get_id_reg(rd, uaddr, true); | 1258 | return __get_id_reg(vcpu, rd, uaddr, true); |
1153 | } | 1259 | } |
1154 | 1260 | ||
1155 | static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, | 1261 | static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, |
1156 | const struct kvm_one_reg *reg, void __user *uaddr) | 1262 | const struct kvm_one_reg *reg, void __user *uaddr) |
1157 | { | 1263 | { |
1158 | return __set_id_reg(rd, uaddr, true); | 1264 | return __set_id_reg(vcpu, rd, uaddr, true); |
1159 | } | 1265 | } |
1160 | 1266 | ||
1161 | static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, | 1267 | static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
@@ -1343,7 +1449,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { | |||
1343 | ID_SANITISED(ID_AA64PFR1_EL1), | 1449 | ID_SANITISED(ID_AA64PFR1_EL1), |
1344 | ID_UNALLOCATED(4,2), | 1450 | ID_UNALLOCATED(4,2), |
1345 | ID_UNALLOCATED(4,3), | 1451 | ID_UNALLOCATED(4,3), |
1346 | ID_UNALLOCATED(4,4), | 1452 | { SYS_DESC(SYS_ID_AA64ZFR0_EL1), access_id_aa64zfr0_el1, .get_user = get_id_aa64zfr0_el1, .set_user = set_id_aa64zfr0_el1, .visibility = sve_id_visibility }, |
1347 | ID_UNALLOCATED(4,5), | 1453 | ID_UNALLOCATED(4,5), |
1348 | ID_UNALLOCATED(4,6), | 1454 | ID_UNALLOCATED(4,6), |
1349 | ID_UNALLOCATED(4,7), | 1455 | ID_UNALLOCATED(4,7), |
@@ -1380,10 +1486,17 @@ static const struct sys_reg_desc sys_reg_descs[] = { | |||
1380 | 1486 | ||
1381 | { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 }, | 1487 | { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 }, |
1382 | { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 }, | 1488 | { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 }, |
1489 | { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility }, | ||
1383 | { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 }, | 1490 | { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 }, |
1384 | { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 }, | 1491 | { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 }, |
1385 | { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 }, | 1492 | { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 }, |
1386 | 1493 | ||
1494 | PTRAUTH_KEY(APIA), | ||
1495 | PTRAUTH_KEY(APIB), | ||
1496 | PTRAUTH_KEY(APDA), | ||
1497 | PTRAUTH_KEY(APDB), | ||
1498 | PTRAUTH_KEY(APGA), | ||
1499 | |||
1387 | { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 }, | 1500 | { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 }, |
1388 | { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 }, | 1501 | { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 }, |
1389 | { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 }, | 1502 | { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 }, |
@@ -1924,6 +2037,12 @@ static void perform_access(struct kvm_vcpu *vcpu, | |||
1924 | { | 2037 | { |
1925 | trace_kvm_sys_access(*vcpu_pc(vcpu), params, r); | 2038 | trace_kvm_sys_access(*vcpu_pc(vcpu), params, r); |
1926 | 2039 | ||
2040 | /* Check for regs disabled by runtime config */ | ||
2041 | if (sysreg_hidden_from_guest(vcpu, r)) { | ||
2042 | kvm_inject_undefined(vcpu); | ||
2043 | return; | ||
2044 | } | ||
2045 | |||
1927 | /* | 2046 | /* |
1928 | * Not having an accessor means that we have configured a trap | 2047 | * Not having an accessor means that we have configured a trap |
1929 | * that we don't know how to handle. This certainly qualifies | 2048 | * that we don't know how to handle. This certainly qualifies |
@@ -2435,6 +2554,10 @@ int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg | |||
2435 | if (!r) | 2554 | if (!r) |
2436 | return get_invariant_sys_reg(reg->id, uaddr); | 2555 | return get_invariant_sys_reg(reg->id, uaddr); |
2437 | 2556 | ||
2557 | /* Check for regs disabled by runtime config */ | ||
2558 | if (sysreg_hidden_from_user(vcpu, r)) | ||
2559 | return -ENOENT; | ||
2560 | |||
2438 | if (r->get_user) | 2561 | if (r->get_user) |
2439 | return (r->get_user)(vcpu, r, reg, uaddr); | 2562 | return (r->get_user)(vcpu, r, reg, uaddr); |
2440 | 2563 | ||
@@ -2456,6 +2579,10 @@ int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg | |||
2456 | if (!r) | 2579 | if (!r) |
2457 | return set_invariant_sys_reg(reg->id, uaddr); | 2580 | return set_invariant_sys_reg(reg->id, uaddr); |
2458 | 2581 | ||
2582 | /* Check for regs disabled by runtime config */ | ||
2583 | if (sysreg_hidden_from_user(vcpu, r)) | ||
2584 | return -ENOENT; | ||
2585 | |||
2459 | if (r->set_user) | 2586 | if (r->set_user) |
2460 | return (r->set_user)(vcpu, r, reg, uaddr); | 2587 | return (r->set_user)(vcpu, r, reg, uaddr); |
2461 | 2588 | ||
@@ -2512,7 +2639,8 @@ static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind) | |||
2512 | return true; | 2639 | return true; |
2513 | } | 2640 | } |
2514 | 2641 | ||
2515 | static int walk_one_sys_reg(const struct sys_reg_desc *rd, | 2642 | static int walk_one_sys_reg(const struct kvm_vcpu *vcpu, |
2643 | const struct sys_reg_desc *rd, | ||
2516 | u64 __user **uind, | 2644 | u64 __user **uind, |
2517 | unsigned int *total) | 2645 | unsigned int *total) |
2518 | { | 2646 | { |
@@ -2523,6 +2651,9 @@ static int walk_one_sys_reg(const struct sys_reg_desc *rd, | |||
2523 | if (!(rd->reg || rd->get_user)) | 2651 | if (!(rd->reg || rd->get_user)) |
2524 | return 0; | 2652 | return 0; |
2525 | 2653 | ||
2654 | if (sysreg_hidden_from_user(vcpu, rd)) | ||
2655 | return 0; | ||
2656 | |||
2526 | if (!copy_reg_to_user(rd, uind)) | 2657 | if (!copy_reg_to_user(rd, uind)) |
2527 | return -EFAULT; | 2658 | return -EFAULT; |
2528 | 2659 | ||
@@ -2551,9 +2682,9 @@ static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind) | |||
2551 | int cmp = cmp_sys_reg(i1, i2); | 2682 | int cmp = cmp_sys_reg(i1, i2); |
2552 | /* target-specific overrides generic entry. */ | 2683 | /* target-specific overrides generic entry. */ |
2553 | if (cmp <= 0) | 2684 | if (cmp <= 0) |
2554 | err = walk_one_sys_reg(i1, &uind, &total); | 2685 | err = walk_one_sys_reg(vcpu, i1, &uind, &total); |
2555 | else | 2686 | else |
2556 | err = walk_one_sys_reg(i2, &uind, &total); | 2687 | err = walk_one_sys_reg(vcpu, i2, &uind, &total); |
2557 | 2688 | ||
2558 | if (err) | 2689 | if (err) |
2559 | return err; | 2690 | return err; |
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h index 3b1bc7f01d0b..2be99508dcb9 100644 --- a/arch/arm64/kvm/sys_regs.h +++ b/arch/arm64/kvm/sys_regs.h | |||
@@ -64,8 +64,15 @@ struct sys_reg_desc { | |||
64 | const struct kvm_one_reg *reg, void __user *uaddr); | 64 | const struct kvm_one_reg *reg, void __user *uaddr); |
65 | int (*set_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, | 65 | int (*set_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, |
66 | const struct kvm_one_reg *reg, void __user *uaddr); | 66 | const struct kvm_one_reg *reg, void __user *uaddr); |
67 | |||
68 | /* Return mask of REG_* runtime visibility overrides */ | ||
69 | unsigned int (*visibility)(const struct kvm_vcpu *vcpu, | ||
70 | const struct sys_reg_desc *rd); | ||
67 | }; | 71 | }; |
68 | 72 | ||
73 | #define REG_HIDDEN_USER (1 << 0) /* hidden from userspace ioctls */ | ||
74 | #define REG_HIDDEN_GUEST (1 << 1) /* hidden from guest */ | ||
75 | |||
69 | static inline void print_sys_reg_instr(const struct sys_reg_params *p) | 76 | static inline void print_sys_reg_instr(const struct sys_reg_params *p) |
70 | { | 77 | { |
71 | /* Look, we even formatted it for you to paste into the table! */ | 78 | /* Look, we even formatted it for you to paste into the table! */ |
@@ -102,6 +109,24 @@ static inline void reset_val(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r | |||
102 | __vcpu_sys_reg(vcpu, r->reg) = r->val; | 109 | __vcpu_sys_reg(vcpu, r->reg) = r->val; |
103 | } | 110 | } |
104 | 111 | ||
112 | static inline bool sysreg_hidden_from_guest(const struct kvm_vcpu *vcpu, | ||
113 | const struct sys_reg_desc *r) | ||
114 | { | ||
115 | if (likely(!r->visibility)) | ||
116 | return false; | ||
117 | |||
118 | return r->visibility(vcpu, r) & REG_HIDDEN_GUEST; | ||
119 | } | ||
120 | |||
121 | static inline bool sysreg_hidden_from_user(const struct kvm_vcpu *vcpu, | ||
122 | const struct sys_reg_desc *r) | ||
123 | { | ||
124 | if (likely(!r->visibility)) | ||
125 | return false; | ||
126 | |||
127 | return r->visibility(vcpu, r) & REG_HIDDEN_USER; | ||
128 | } | ||
129 | |||
105 | static inline int cmp_sys_reg(const struct sys_reg_desc *i1, | 130 | static inline int cmp_sys_reg(const struct sys_reg_desc *i1, |
106 | const struct sys_reg_desc *i2) | 131 | const struct sys_reg_desc *i2) |
107 | { | 132 | { |
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index d727adf07801..2fe12b40d503 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h | |||
@@ -990,6 +990,9 @@ struct kvm_ppc_resize_hpt { | |||
990 | #define KVM_CAP_HYPERV_CPUID 167 | 990 | #define KVM_CAP_HYPERV_CPUID 167 |
991 | #define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 168 | 991 | #define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 168 |
992 | #define KVM_CAP_PPC_IRQ_XIVE 169 | 992 | #define KVM_CAP_PPC_IRQ_XIVE 169 |
993 | #define KVM_CAP_ARM_SVE 170 | ||
994 | #define KVM_CAP_ARM_PTRAUTH_ADDRESS 171 | ||
995 | #define KVM_CAP_ARM_PTRAUTH_GENERIC 172 | ||
993 | 996 | ||
994 | #ifdef KVM_CAP_IRQ_ROUTING | 997 | #ifdef KVM_CAP_IRQ_ROUTING |
995 | 998 | ||
@@ -1147,6 +1150,7 @@ struct kvm_dirty_tlb { | |||
1147 | #define KVM_REG_SIZE_U256 0x0050000000000000ULL | 1150 | #define KVM_REG_SIZE_U256 0x0050000000000000ULL |
1148 | #define KVM_REG_SIZE_U512 0x0060000000000000ULL | 1151 | #define KVM_REG_SIZE_U512 0x0060000000000000ULL |
1149 | #define KVM_REG_SIZE_U1024 0x0070000000000000ULL | 1152 | #define KVM_REG_SIZE_U1024 0x0070000000000000ULL |
1153 | #define KVM_REG_SIZE_U2048 0x0080000000000000ULL | ||
1150 | 1154 | ||
1151 | struct kvm_reg_list { | 1155 | struct kvm_reg_list { |
1152 | __u64 n; /* number of regs */ | 1156 | __u64 n; /* number of regs */ |
@@ -1444,6 +1448,9 @@ struct kvm_enc_region { | |||
1444 | /* Available with KVM_CAP_HYPERV_CPUID */ | 1448 | /* Available with KVM_CAP_HYPERV_CPUID */ |
1445 | #define KVM_GET_SUPPORTED_HV_CPUID _IOWR(KVMIO, 0xc1, struct kvm_cpuid2) | 1449 | #define KVM_GET_SUPPORTED_HV_CPUID _IOWR(KVMIO, 0xc1, struct kvm_cpuid2) |
1446 | 1450 | ||
1451 | /* Available with KVM_CAP_ARM_SVE */ | ||
1452 | #define KVM_ARM_VCPU_FINALIZE _IOW(KVMIO, 0xc2, int) | ||
1453 | |||
1447 | /* Secure Encrypted Virtualization command */ | 1454 | /* Secure Encrypted Virtualization command */ |
1448 | enum sev_cmd_id { | 1455 | enum sev_cmd_id { |
1449 | /* Guest initialization commands */ | 1456 | /* Guest initialization commands */ |
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index be4ec5f3ba5f..e5312f47d8e1 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c | |||
@@ -56,7 +56,7 @@ | |||
56 | __asm__(".arch_extension virt"); | 56 | __asm__(".arch_extension virt"); |
57 | #endif | 57 | #endif |
58 | 58 | ||
59 | DEFINE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state); | 59 | DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data); |
60 | static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); | 60 | static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); |
61 | 61 | ||
62 | /* Per-CPU variable containing the currently running vcpu. */ | 62 | /* Per-CPU variable containing the currently running vcpu. */ |
@@ -357,8 +357,10 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
357 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 357 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
358 | { | 358 | { |
359 | int *last_ran; | 359 | int *last_ran; |
360 | kvm_host_data_t *cpu_data; | ||
360 | 361 | ||
361 | last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran); | 362 | last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran); |
363 | cpu_data = this_cpu_ptr(&kvm_host_data); | ||
362 | 364 | ||
363 | /* | 365 | /* |
364 | * We might get preempted before the vCPU actually runs, but | 366 | * We might get preempted before the vCPU actually runs, but |
@@ -370,18 +372,21 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
370 | } | 372 | } |
371 | 373 | ||
372 | vcpu->cpu = cpu; | 374 | vcpu->cpu = cpu; |
373 | vcpu->arch.host_cpu_context = this_cpu_ptr(&kvm_host_cpu_state); | 375 | vcpu->arch.host_cpu_context = &cpu_data->host_ctxt; |
374 | 376 | ||
375 | kvm_arm_set_running_vcpu(vcpu); | 377 | kvm_arm_set_running_vcpu(vcpu); |
376 | kvm_vgic_load(vcpu); | 378 | kvm_vgic_load(vcpu); |
377 | kvm_timer_vcpu_load(vcpu); | 379 | kvm_timer_vcpu_load(vcpu); |
378 | kvm_vcpu_load_sysregs(vcpu); | 380 | kvm_vcpu_load_sysregs(vcpu); |
379 | kvm_arch_vcpu_load_fp(vcpu); | 381 | kvm_arch_vcpu_load_fp(vcpu); |
382 | kvm_vcpu_pmu_restore_guest(vcpu); | ||
380 | 383 | ||
381 | if (single_task_running()) | 384 | if (single_task_running()) |
382 | vcpu_clear_wfe_traps(vcpu); | 385 | vcpu_clear_wfe_traps(vcpu); |
383 | else | 386 | else |
384 | vcpu_set_wfe_traps(vcpu); | 387 | vcpu_set_wfe_traps(vcpu); |
388 | |||
389 | vcpu_ptrauth_setup_lazy(vcpu); | ||
385 | } | 390 | } |
386 | 391 | ||
387 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | 392 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
@@ -390,6 +395,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |||
390 | kvm_vcpu_put_sysregs(vcpu); | 395 | kvm_vcpu_put_sysregs(vcpu); |
391 | kvm_timer_vcpu_put(vcpu); | 396 | kvm_timer_vcpu_put(vcpu); |
392 | kvm_vgic_put(vcpu); | 397 | kvm_vgic_put(vcpu); |
398 | kvm_vcpu_pmu_restore_host(vcpu); | ||
393 | 399 | ||
394 | vcpu->cpu = -1; | 400 | vcpu->cpu = -1; |
395 | 401 | ||
@@ -542,6 +548,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) | |||
542 | if (likely(vcpu->arch.has_run_once)) | 548 | if (likely(vcpu->arch.has_run_once)) |
543 | return 0; | 549 | return 0; |
544 | 550 | ||
551 | if (!kvm_arm_vcpu_is_finalized(vcpu)) | ||
552 | return -EPERM; | ||
553 | |||
545 | vcpu->arch.has_run_once = true; | 554 | vcpu->arch.has_run_once = true; |
546 | 555 | ||
547 | if (likely(irqchip_in_kernel(kvm))) { | 556 | if (likely(irqchip_in_kernel(kvm))) { |
@@ -1113,6 +1122,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
1113 | if (unlikely(!kvm_vcpu_initialized(vcpu))) | 1122 | if (unlikely(!kvm_vcpu_initialized(vcpu))) |
1114 | break; | 1123 | break; |
1115 | 1124 | ||
1125 | r = -EPERM; | ||
1126 | if (!kvm_arm_vcpu_is_finalized(vcpu)) | ||
1127 | break; | ||
1128 | |||
1116 | r = -EFAULT; | 1129 | r = -EFAULT; |
1117 | if (copy_from_user(®_list, user_list, sizeof(reg_list))) | 1130 | if (copy_from_user(®_list, user_list, sizeof(reg_list))) |
1118 | break; | 1131 | break; |
@@ -1166,6 +1179,17 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
1166 | 1179 | ||
1167 | return kvm_arm_vcpu_set_events(vcpu, &events); | 1180 | return kvm_arm_vcpu_set_events(vcpu, &events); |
1168 | } | 1181 | } |
1182 | case KVM_ARM_VCPU_FINALIZE: { | ||
1183 | int what; | ||
1184 | |||
1185 | if (!kvm_vcpu_initialized(vcpu)) | ||
1186 | return -ENOEXEC; | ||
1187 | |||
1188 | if (get_user(what, (const int __user *)argp)) | ||
1189 | return -EFAULT; | ||
1190 | |||
1191 | return kvm_arm_vcpu_finalize(vcpu, what); | ||
1192 | } | ||
1169 | default: | 1193 | default: |
1170 | r = -EINVAL; | 1194 | r = -EINVAL; |
1171 | } | 1195 | } |
@@ -1546,11 +1570,11 @@ static int init_hyp_mode(void) | |||
1546 | } | 1570 | } |
1547 | 1571 | ||
1548 | for_each_possible_cpu(cpu) { | 1572 | for_each_possible_cpu(cpu) { |
1549 | kvm_cpu_context_t *cpu_ctxt; | 1573 | kvm_host_data_t *cpu_data; |
1550 | 1574 | ||
1551 | cpu_ctxt = per_cpu_ptr(&kvm_host_cpu_state, cpu); | 1575 | cpu_data = per_cpu_ptr(&kvm_host_data, cpu); |
1552 | kvm_init_host_cpu_context(cpu_ctxt, cpu); | 1576 | kvm_init_host_cpu_context(&cpu_data->host_ctxt, cpu); |
1553 | err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP); | 1577 | err = create_hyp_mappings(cpu_data, cpu_data + 1, PAGE_HYP); |
1554 | 1578 | ||
1555 | if (err) { | 1579 | if (err) { |
1556 | kvm_err("Cannot map host CPU state: %d\n", err); | 1580 | kvm_err("Cannot map host CPU state: %d\n", err); |
@@ -1661,6 +1685,10 @@ int kvm_arch_init(void *opaque) | |||
1661 | if (err) | 1685 | if (err) |
1662 | return err; | 1686 | return err; |
1663 | 1687 | ||
1688 | err = kvm_arm_init_sve(); | ||
1689 | if (err) | ||
1690 | return err; | ||
1691 | |||
1664 | if (!in_hyp_mode) { | 1692 | if (!in_hyp_mode) { |
1665 | err = init_hyp_mode(); | 1693 | err = init_hyp_mode(); |
1666 | if (err) | 1694 | if (err) |