summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-02-08 13:44:25 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-02-08 13:44:25 -0500
commitc0136321924dd338bb8fc5661c4b0e27441a8d04 (patch)
tree099cc60dfe6793309b21474a00e4d10087a7cae8 /virt
parent846ade7dd2e630a309a8c57302046e8c4037b8df (diff)
parent3a0a397ff5ff8b56ca9f7908b75dee6bf0b5fabb (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull more arm64 updates from Catalin Marinas: "As I mentioned in the last pull request, there's a second batch of security updates for arm64 with mitigations for Spectre/v1 and an improved one for Spectre/v2 (via a newly defined firmware interface API). Spectre v1 mitigation: - back-end version of array_index_mask_nospec() - masking of the syscall number to restrict speculation through the syscall table - masking of __user pointers prior to deference in uaccess routines Spectre v2 mitigation update: - using the new firmware SMC calling convention specification update - removing the current PSCI GET_VERSION firmware call mitigation as vendors are deploying new SMCCC-capable firmware - additional branch predictor hardening for synchronous exceptions and interrupts while in user mode Meltdown v3 mitigation update: - Cavium Thunder X is unaffected but a hardware erratum gets in the way. The kernel now starts with the page tables mapped as global and switches to non-global if kpti needs to be enabled. Other: - Theoretical trylock bug fixed" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (38 commits) arm64: Kill PSCI_GET_VERSION as a variant-2 workaround arm64: Add ARM_SMCCC_ARCH_WORKAROUND_1 BP hardening support arm/arm64: smccc: Implement SMCCC v1.1 inline primitive arm/arm64: smccc: Make function identifiers an unsigned quantity firmware/psci: Expose SMCCC version through psci_ops firmware/psci: Expose PSCI conduit arm64: KVM: Add SMCCC_ARCH_WORKAROUND_1 fast handling arm64: KVM: Report SMCCC_ARCH_WORKAROUND_1 BP hardening support arm/arm64: KVM: Turn kvm_psci_version into a static inline arm/arm64: KVM: Advertise SMCCC v1.1 arm/arm64: KVM: Implement PSCI 1.0 support arm/arm64: KVM: Add smccc accessors to PSCI code arm/arm64: KVM: Add PSCI_VERSION helper arm/arm64: KVM: Consolidate the PSCI include files arm64: KVM: Increment PC after handling an SMC trap arm: KVM: Fix SMCCC handling of unimplemented SMC/HVC calls arm64: KVM: Fix SMCCC handling of unimplemented SMC/HVC calls arm64: entry: Apply BP hardening for suspicious interrupts from EL0 arm64: entry: Apply BP hardening for high-priority synchronous exceptions arm64: futex: Mask __user pointers prior to dereference ...
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/arm.c2
-rw-r--r--virt/kvm/arm/psci.c143
2 files changed, 120 insertions, 25 deletions
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 08464b2fba1d..7e3941f2ecde 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -31,6 +31,7 @@
31#include <linux/irqbypass.h> 31#include <linux/irqbypass.h>
32#include <trace/events/kvm.h> 32#include <trace/events/kvm.h>
33#include <kvm/arm_pmu.h> 33#include <kvm/arm_pmu.h>
34#include <kvm/arm_psci.h>
34 35
35#define CREATE_TRACE_POINTS 36#define CREATE_TRACE_POINTS
36#include "trace.h" 37#include "trace.h"
@@ -46,7 +47,6 @@
46#include <asm/kvm_mmu.h> 47#include <asm/kvm_mmu.h>
47#include <asm/kvm_emulate.h> 48#include <asm/kvm_emulate.h>
48#include <asm/kvm_coproc.h> 49#include <asm/kvm_coproc.h>
49#include <asm/kvm_psci.h>
50#include <asm/sections.h> 50#include <asm/sections.h>
51 51
52#ifdef REQUIRES_VIRT 52#ifdef REQUIRES_VIRT
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
index f1e363bab5e8..6919352cbf15 100644
--- a/virt/kvm/arm/psci.c
+++ b/virt/kvm/arm/psci.c
@@ -15,16 +15,16 @@
15 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#include <linux/arm-smccc.h>
18#include <linux/preempt.h> 19#include <linux/preempt.h>
19#include <linux/kvm_host.h> 20#include <linux/kvm_host.h>
20#include <linux/wait.h> 21#include <linux/wait.h>
21 22
22#include <asm/cputype.h> 23#include <asm/cputype.h>
23#include <asm/kvm_emulate.h> 24#include <asm/kvm_emulate.h>
24#include <asm/kvm_psci.h>
25#include <asm/kvm_host.h> 25#include <asm/kvm_host.h>
26 26
27#include <uapi/linux/psci.h> 27#include <kvm/arm_psci.h>
28 28
29/* 29/*
30 * This is an implementation of the Power State Coordination Interface 30 * This is an implementation of the Power State Coordination Interface
@@ -33,6 +33,38 @@
33 33
34#define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1) 34#define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
35 35
36static u32 smccc_get_function(struct kvm_vcpu *vcpu)
37{
38 return vcpu_get_reg(vcpu, 0);
39}
40
41static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu)
42{
43 return vcpu_get_reg(vcpu, 1);
44}
45
46static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu)
47{
48 return vcpu_get_reg(vcpu, 2);
49}
50
51static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu)
52{
53 return vcpu_get_reg(vcpu, 3);
54}
55
56static void smccc_set_retval(struct kvm_vcpu *vcpu,
57 unsigned long a0,
58 unsigned long a1,
59 unsigned long a2,
60 unsigned long a3)
61{
62 vcpu_set_reg(vcpu, 0, a0);
63 vcpu_set_reg(vcpu, 1, a1);
64 vcpu_set_reg(vcpu, 2, a2);
65 vcpu_set_reg(vcpu, 3, a3);
66}
67
36static unsigned long psci_affinity_mask(unsigned long affinity_level) 68static unsigned long psci_affinity_mask(unsigned long affinity_level)
37{ 69{
38 if (affinity_level <= 3) 70 if (affinity_level <= 3)
@@ -78,7 +110,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
78 unsigned long context_id; 110 unsigned long context_id;
79 phys_addr_t target_pc; 111 phys_addr_t target_pc;
80 112
81 cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK; 113 cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
82 if (vcpu_mode_is_32bit(source_vcpu)) 114 if (vcpu_mode_is_32bit(source_vcpu))
83 cpu_id &= ~((u32) 0); 115 cpu_id &= ~((u32) 0);
84 116
@@ -91,14 +123,14 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
91 if (!vcpu) 123 if (!vcpu)
92 return PSCI_RET_INVALID_PARAMS; 124 return PSCI_RET_INVALID_PARAMS;
93 if (!vcpu->arch.power_off) { 125 if (!vcpu->arch.power_off) {
94 if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1) 126 if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1)
95 return PSCI_RET_ALREADY_ON; 127 return PSCI_RET_ALREADY_ON;
96 else 128 else
97 return PSCI_RET_INVALID_PARAMS; 129 return PSCI_RET_INVALID_PARAMS;
98 } 130 }
99 131
100 target_pc = vcpu_get_reg(source_vcpu, 2); 132 target_pc = smccc_get_arg2(source_vcpu);
101 context_id = vcpu_get_reg(source_vcpu, 3); 133 context_id = smccc_get_arg3(source_vcpu);
102 134
103 kvm_reset_vcpu(vcpu); 135 kvm_reset_vcpu(vcpu);
104 136
@@ -117,7 +149,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
117 * NOTE: We always update r0 (or x0) because for PSCI v0.1 149 * NOTE: We always update r0 (or x0) because for PSCI v0.1
118 * the general puspose registers are undefined upon CPU_ON. 150 * the general puspose registers are undefined upon CPU_ON.
119 */ 151 */
120 vcpu_set_reg(vcpu, 0, context_id); 152 smccc_set_retval(vcpu, context_id, 0, 0, 0);
121 vcpu->arch.power_off = false; 153 vcpu->arch.power_off = false;
122 smp_mb(); /* Make sure the above is visible */ 154 smp_mb(); /* Make sure the above is visible */
123 155
@@ -137,8 +169,8 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
137 struct kvm *kvm = vcpu->kvm; 169 struct kvm *kvm = vcpu->kvm;
138 struct kvm_vcpu *tmp; 170 struct kvm_vcpu *tmp;
139 171
140 target_affinity = vcpu_get_reg(vcpu, 1); 172 target_affinity = smccc_get_arg1(vcpu);
141 lowest_affinity_level = vcpu_get_reg(vcpu, 2); 173 lowest_affinity_level = smccc_get_arg2(vcpu);
142 174
143 /* Determine target affinity mask */ 175 /* Determine target affinity mask */
144 target_affinity_mask = psci_affinity_mask(lowest_affinity_level); 176 target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
@@ -200,18 +232,10 @@ static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
200 kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET); 232 kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
201} 233}
202 234
203int kvm_psci_version(struct kvm_vcpu *vcpu)
204{
205 if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
206 return KVM_ARM_PSCI_0_2;
207
208 return KVM_ARM_PSCI_0_1;
209}
210
211static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) 235static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
212{ 236{
213 struct kvm *kvm = vcpu->kvm; 237 struct kvm *kvm = vcpu->kvm;
214 unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); 238 u32 psci_fn = smccc_get_function(vcpu);
215 unsigned long val; 239 unsigned long val;
216 int ret = 1; 240 int ret = 1;
217 241
@@ -221,7 +245,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
221 * Bits[31:16] = Major Version = 0 245 * Bits[31:16] = Major Version = 0
222 * Bits[15:0] = Minor Version = 2 246 * Bits[15:0] = Minor Version = 2
223 */ 247 */
224 val = 2; 248 val = KVM_ARM_PSCI_0_2;
225 break; 249 break;
226 case PSCI_0_2_FN_CPU_SUSPEND: 250 case PSCI_0_2_FN_CPU_SUSPEND:
227 case PSCI_0_2_FN64_CPU_SUSPEND: 251 case PSCI_0_2_FN64_CPU_SUSPEND:
@@ -278,14 +302,56 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
278 break; 302 break;
279 } 303 }
280 304
281 vcpu_set_reg(vcpu, 0, val); 305 smccc_set_retval(vcpu, val, 0, 0, 0);
306 return ret;
307}
308
309static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu)
310{
311 u32 psci_fn = smccc_get_function(vcpu);
312 u32 feature;
313 unsigned long val;
314 int ret = 1;
315
316 switch(psci_fn) {
317 case PSCI_0_2_FN_PSCI_VERSION:
318 val = KVM_ARM_PSCI_1_0;
319 break;
320 case PSCI_1_0_FN_PSCI_FEATURES:
321 feature = smccc_get_arg1(vcpu);
322 switch(feature) {
323 case PSCI_0_2_FN_PSCI_VERSION:
324 case PSCI_0_2_FN_CPU_SUSPEND:
325 case PSCI_0_2_FN64_CPU_SUSPEND:
326 case PSCI_0_2_FN_CPU_OFF:
327 case PSCI_0_2_FN_CPU_ON:
328 case PSCI_0_2_FN64_CPU_ON:
329 case PSCI_0_2_FN_AFFINITY_INFO:
330 case PSCI_0_2_FN64_AFFINITY_INFO:
331 case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
332 case PSCI_0_2_FN_SYSTEM_OFF:
333 case PSCI_0_2_FN_SYSTEM_RESET:
334 case PSCI_1_0_FN_PSCI_FEATURES:
335 case ARM_SMCCC_VERSION_FUNC_ID:
336 val = 0;
337 break;
338 default:
339 val = PSCI_RET_NOT_SUPPORTED;
340 break;
341 }
342 break;
343 default:
344 return kvm_psci_0_2_call(vcpu);
345 }
346
347 smccc_set_retval(vcpu, val, 0, 0, 0);
282 return ret; 348 return ret;
283} 349}
284 350
285static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) 351static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
286{ 352{
287 struct kvm *kvm = vcpu->kvm; 353 struct kvm *kvm = vcpu->kvm;
288 unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); 354 u32 psci_fn = smccc_get_function(vcpu);
289 unsigned long val; 355 unsigned long val;
290 356
291 switch (psci_fn) { 357 switch (psci_fn) {
@@ -303,7 +369,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
303 break; 369 break;
304 } 370 }
305 371
306 vcpu_set_reg(vcpu, 0, val); 372 smccc_set_retval(vcpu, val, 0, 0, 0);
307 return 1; 373 return 1;
308} 374}
309 375
@@ -321,9 +387,11 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
321 * Errors: 387 * Errors:
322 * -EINVAL: Unrecognized PSCI function 388 * -EINVAL: Unrecognized PSCI function
323 */ 389 */
324int kvm_psci_call(struct kvm_vcpu *vcpu) 390static int kvm_psci_call(struct kvm_vcpu *vcpu)
325{ 391{
326 switch (kvm_psci_version(vcpu)) { 392 switch (kvm_psci_version(vcpu, vcpu->kvm)) {
393 case KVM_ARM_PSCI_1_0:
394 return kvm_psci_1_0_call(vcpu);
327 case KVM_ARM_PSCI_0_2: 395 case KVM_ARM_PSCI_0_2:
328 return kvm_psci_0_2_call(vcpu); 396 return kvm_psci_0_2_call(vcpu);
329 case KVM_ARM_PSCI_0_1: 397 case KVM_ARM_PSCI_0_1:
@@ -332,3 +400,30 @@ int kvm_psci_call(struct kvm_vcpu *vcpu)
332 return -EINVAL; 400 return -EINVAL;
333 }; 401 };
334} 402}
403
404int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
405{
406 u32 func_id = smccc_get_function(vcpu);
407 u32 val = PSCI_RET_NOT_SUPPORTED;
408 u32 feature;
409
410 switch (func_id) {
411 case ARM_SMCCC_VERSION_FUNC_ID:
412 val = ARM_SMCCC_VERSION_1_1;
413 break;
414 case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
415 feature = smccc_get_arg1(vcpu);
416 switch(feature) {
417 case ARM_SMCCC_ARCH_WORKAROUND_1:
418 if (kvm_arm_harden_branch_predictor())
419 val = 0;
420 break;
421 }
422 break;
423 default:
424 return kvm_psci_call(vcpu);
425 }
426
427 smccc_set_retval(vcpu, val, 0, 0, 0);
428 return 1;
429}