diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-02-08 13:44:25 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-02-08 13:44:25 -0500 |
commit | c0136321924dd338bb8fc5661c4b0e27441a8d04 (patch) | |
tree | 099cc60dfe6793309b21474a00e4d10087a7cae8 | |
parent | 846ade7dd2e630a309a8c57302046e8c4037b8df (diff) | |
parent | 3a0a397ff5ff8b56ca9f7908b75dee6bf0b5fabb (diff) |
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull more arm64 updates from Catalin Marinas:
"As I mentioned in the last pull request, there's a second batch of
security updates for arm64 with mitigations for Spectre/v1 and an
improved one for Spectre/v2 (via a newly defined firmware interface
API).
Spectre v1 mitigation:
- back-end version of array_index_mask_nospec()
- masking of the syscall number to restrict speculation through the
syscall table
- masking of __user pointers prior to deference in uaccess routines
Spectre v2 mitigation update:
- using the new firmware SMC calling convention specification update
- removing the current PSCI GET_VERSION firmware call mitigation as
vendors are deploying new SMCCC-capable firmware
- additional branch predictor hardening for synchronous exceptions
and interrupts while in user mode
Meltdown v3 mitigation update:
- Cavium Thunder X is unaffected but a hardware erratum gets in the
way. The kernel now starts with the page tables mapped as global
and switches to non-global if kpti needs to be enabled.
Other:
- Theoretical trylock bug fixed"
* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (38 commits)
arm64: Kill PSCI_GET_VERSION as a variant-2 workaround
arm64: Add ARM_SMCCC_ARCH_WORKAROUND_1 BP hardening support
arm/arm64: smccc: Implement SMCCC v1.1 inline primitive
arm/arm64: smccc: Make function identifiers an unsigned quantity
firmware/psci: Expose SMCCC version through psci_ops
firmware/psci: Expose PSCI conduit
arm64: KVM: Add SMCCC_ARCH_WORKAROUND_1 fast handling
arm64: KVM: Report SMCCC_ARCH_WORKAROUND_1 BP hardening support
arm/arm64: KVM: Turn kvm_psci_version into a static inline
arm/arm64: KVM: Advertise SMCCC v1.1
arm/arm64: KVM: Implement PSCI 1.0 support
arm/arm64: KVM: Add smccc accessors to PSCI code
arm/arm64: KVM: Add PSCI_VERSION helper
arm/arm64: KVM: Consolidate the PSCI include files
arm64: KVM: Increment PC after handling an SMC trap
arm: KVM: Fix SMCCC handling of unimplemented SMC/HVC calls
arm64: KVM: Fix SMCCC handling of unimplemented SMC/HVC calls
arm64: entry: Apply BP hardening for suspicious interrupts from EL0
arm64: entry: Apply BP hardening for high-priority synchronous exceptions
arm64: futex: Mask __user pointers prior to dereference
...
38 files changed, 1032 insertions, 292 deletions
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index acbf9ec7b396..ef54013b5b9f 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
@@ -306,4 +306,11 @@ static inline void kvm_fpsimd_flush_cpu_state(void) {} | |||
306 | 306 | ||
307 | static inline void kvm_arm_vhe_guest_enter(void) {} | 307 | static inline void kvm_arm_vhe_guest_enter(void) {} |
308 | static inline void kvm_arm_vhe_guest_exit(void) {} | 308 | static inline void kvm_arm_vhe_guest_exit(void) {} |
309 | |||
310 | static inline bool kvm_arm_harden_branch_predictor(void) | ||
311 | { | ||
312 | /* No way to detect it yet, pretend it is not there. */ | ||
313 | return false; | ||
314 | } | ||
315 | |||
309 | #endif /* __ARM_KVM_HOST_H__ */ | 316 | #endif /* __ARM_KVM_HOST_H__ */ |
diff --git a/arch/arm/include/asm/kvm_psci.h b/arch/arm/include/asm/kvm_psci.h deleted file mode 100644 index 6bda945d31fa..000000000000 --- a/arch/arm/include/asm/kvm_psci.h +++ /dev/null | |||
@@ -1,27 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #ifndef __ARM_KVM_PSCI_H__ | ||
19 | #define __ARM_KVM_PSCI_H__ | ||
20 | |||
21 | #define KVM_ARM_PSCI_0_1 1 | ||
22 | #define KVM_ARM_PSCI_0_2 2 | ||
23 | |||
24 | int kvm_psci_version(struct kvm_vcpu *vcpu); | ||
25 | int kvm_psci_call(struct kvm_vcpu *vcpu); | ||
26 | |||
27 | #endif /* __ARM_KVM_PSCI_H__ */ | ||
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index cf8bf6bf87c4..910bd8dabb3c 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <asm/kvm_emulate.h> | 21 | #include <asm/kvm_emulate.h> |
22 | #include <asm/kvm_coproc.h> | 22 | #include <asm/kvm_coproc.h> |
23 | #include <asm/kvm_mmu.h> | 23 | #include <asm/kvm_mmu.h> |
24 | #include <asm/kvm_psci.h> | 24 | #include <kvm/arm_psci.h> |
25 | #include <trace/events/kvm.h> | 25 | #include <trace/events/kvm.h> |
26 | 26 | ||
27 | #include "trace.h" | 27 | #include "trace.h" |
@@ -36,9 +36,9 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
36 | kvm_vcpu_hvc_get_imm(vcpu)); | 36 | kvm_vcpu_hvc_get_imm(vcpu)); |
37 | vcpu->stat.hvc_exit_stat++; | 37 | vcpu->stat.hvc_exit_stat++; |
38 | 38 | ||
39 | ret = kvm_psci_call(vcpu); | 39 | ret = kvm_hvc_call_handler(vcpu); |
40 | if (ret < 0) { | 40 | if (ret < 0) { |
41 | kvm_inject_undefined(vcpu); | 41 | vcpu_set_reg(vcpu, 0, ~0UL); |
42 | return 1; | 42 | return 1; |
43 | } | 43 | } |
44 | 44 | ||
@@ -47,7 +47,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
47 | 47 | ||
48 | static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) | 48 | static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) |
49 | { | 49 | { |
50 | kvm_inject_undefined(vcpu); | 50 | /* |
51 | * "If an SMC instruction executed at Non-secure EL1 is | ||
52 | * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a | ||
53 | * Trap exception, not a Secure Monitor Call exception [...]" | ||
54 | * | ||
55 | * We need to advance the PC after the trap, as it would | ||
56 | * otherwise return to the same address... | ||
57 | */ | ||
58 | vcpu_set_reg(vcpu, 0, ~0UL); | ||
59 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | ||
51 | return 1; | 60 | return 1; |
52 | } | 61 | } |
53 | 62 | ||
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 3873dd7b5a32..1241fb211293 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h | |||
@@ -116,6 +116,24 @@ | |||
116 | .endm | 116 | .endm |
117 | 117 | ||
118 | /* | 118 | /* |
119 | * Value prediction barrier | ||
120 | */ | ||
121 | .macro csdb | ||
122 | hint #20 | ||
123 | .endm | ||
124 | |||
125 | /* | ||
126 | * Sanitise a 64-bit bounded index wrt speculation, returning zero if out | ||
127 | * of bounds. | ||
128 | */ | ||
129 | .macro mask_nospec64, idx, limit, tmp | ||
130 | sub \tmp, \idx, \limit | ||
131 | bic \tmp, \tmp, \idx | ||
132 | and \idx, \idx, \tmp, asr #63 | ||
133 | csdb | ||
134 | .endm | ||
135 | |||
136 | /* | ||
119 | * NOP sequence | 137 | * NOP sequence |
120 | */ | 138 | */ |
121 | .macro nops, num | 139 | .macro nops, num |
@@ -514,7 +532,7 @@ alternative_endif | |||
514 | * phys: physical address, preserved | 532 | * phys: physical address, preserved |
515 | * ttbr: returns the TTBR value | 533 | * ttbr: returns the TTBR value |
516 | */ | 534 | */ |
517 | .macro phys_to_ttbr, phys, ttbr | 535 | .macro phys_to_ttbr, ttbr, phys |
518 | #ifdef CONFIG_ARM64_PA_BITS_52 | 536 | #ifdef CONFIG_ARM64_PA_BITS_52 |
519 | orr \ttbr, \phys, \phys, lsr #46 | 537 | orr \ttbr, \phys, \phys, lsr #46 |
520 | and \ttbr, \ttbr, #TTBR_BADDR_MASK_52 | 538 | and \ttbr, \ttbr, #TTBR_BADDR_MASK_52 |
@@ -523,6 +541,29 @@ alternative_endif | |||
523 | #endif | 541 | #endif |
524 | .endm | 542 | .endm |
525 | 543 | ||
544 | .macro phys_to_pte, pte, phys | ||
545 | #ifdef CONFIG_ARM64_PA_BITS_52 | ||
546 | /* | ||
547 | * We assume \phys is 64K aligned and this is guaranteed by only | ||
548 | * supporting this configuration with 64K pages. | ||
549 | */ | ||
550 | orr \pte, \phys, \phys, lsr #36 | ||
551 | and \pte, \pte, #PTE_ADDR_MASK | ||
552 | #else | ||
553 | mov \pte, \phys | ||
554 | #endif | ||
555 | .endm | ||
556 | |||
557 | .macro pte_to_phys, phys, pte | ||
558 | #ifdef CONFIG_ARM64_PA_BITS_52 | ||
559 | ubfiz \phys, \pte, #(48 - 16 - 12), #16 | ||
560 | bfxil \phys, \pte, #16, #32 | ||
561 | lsl \phys, \phys, #16 | ||
562 | #else | ||
563 | and \phys, \pte, #PTE_ADDR_MASK | ||
564 | #endif | ||
565 | .endm | ||
566 | |||
526 | /** | 567 | /** |
527 | * Errata workaround prior to disable MMU. Insert an ISB immediately prior | 568 | * Errata workaround prior to disable MMU. Insert an ISB immediately prior |
528 | * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0. | 569 | * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0. |
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 77651c49ef44..f11518af96a9 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h | |||
@@ -32,6 +32,7 @@ | |||
32 | #define dsb(opt) asm volatile("dsb " #opt : : : "memory") | 32 | #define dsb(opt) asm volatile("dsb " #opt : : : "memory") |
33 | 33 | ||
34 | #define psb_csync() asm volatile("hint #17" : : : "memory") | 34 | #define psb_csync() asm volatile("hint #17" : : : "memory") |
35 | #define csdb() asm volatile("hint #20" : : : "memory") | ||
35 | 36 | ||
36 | #define mb() dsb(sy) | 37 | #define mb() dsb(sy) |
37 | #define rmb() dsb(ld) | 38 | #define rmb() dsb(ld) |
@@ -40,6 +41,27 @@ | |||
40 | #define dma_rmb() dmb(oshld) | 41 | #define dma_rmb() dmb(oshld) |
41 | #define dma_wmb() dmb(oshst) | 42 | #define dma_wmb() dmb(oshst) |
42 | 43 | ||
44 | /* | ||
45 | * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz | ||
46 | * and 0 otherwise. | ||
47 | */ | ||
48 | #define array_index_mask_nospec array_index_mask_nospec | ||
49 | static inline unsigned long array_index_mask_nospec(unsigned long idx, | ||
50 | unsigned long sz) | ||
51 | { | ||
52 | unsigned long mask; | ||
53 | |||
54 | asm volatile( | ||
55 | " cmp %1, %2\n" | ||
56 | " sbc %0, xzr, xzr\n" | ||
57 | : "=r" (mask) | ||
58 | : "r" (idx), "Ir" (sz) | ||
59 | : "cc"); | ||
60 | |||
61 | csdb(); | ||
62 | return mask; | ||
63 | } | ||
64 | |||
43 | #define __smp_mb() dmb(ish) | 65 | #define __smp_mb() dmb(ish) |
44 | #define __smp_rmb() dmb(ishld) | 66 | #define __smp_rmb() dmb(ishld) |
45 | #define __smp_wmb() dmb(ishst) | 67 | #define __smp_wmb() dmb(ishst) |
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 5bb2fd4674e7..07fe2479d310 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h | |||
@@ -48,9 +48,10 @@ do { \ | |||
48 | } while (0) | 48 | } while (0) |
49 | 49 | ||
50 | static inline int | 50 | static inline int |
51 | arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) | 51 | arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr) |
52 | { | 52 | { |
53 | int oldval = 0, ret, tmp; | 53 | int oldval = 0, ret, tmp; |
54 | u32 __user *uaddr = __uaccess_mask_ptr(_uaddr); | ||
54 | 55 | ||
55 | pagefault_disable(); | 56 | pagefault_disable(); |
56 | 57 | ||
@@ -88,15 +89,17 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) | |||
88 | } | 89 | } |
89 | 90 | ||
90 | static inline int | 91 | static inline int |
91 | futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | 92 | futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, |
92 | u32 oldval, u32 newval) | 93 | u32 oldval, u32 newval) |
93 | { | 94 | { |
94 | int ret = 0; | 95 | int ret = 0; |
95 | u32 val, tmp; | 96 | u32 val, tmp; |
97 | u32 __user *uaddr; | ||
96 | 98 | ||
97 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) | 99 | if (!access_ok(VERIFY_WRITE, _uaddr, sizeof(u32))) |
98 | return -EFAULT; | 100 | return -EFAULT; |
99 | 101 | ||
102 | uaddr = __uaccess_mask_ptr(_uaddr); | ||
100 | uaccess_enable(); | 103 | uaccess_enable(); |
101 | asm volatile("// futex_atomic_cmpxchg_inatomic\n" | 104 | asm volatile("// futex_atomic_cmpxchg_inatomic\n" |
102 | " prfm pstl1strm, %2\n" | 105 | " prfm pstl1strm, %2\n" |
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 82386e860dd2..a780f6714b44 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h | |||
@@ -123,16 +123,8 @@ | |||
123 | /* | 123 | /* |
124 | * Initial memory map attributes. | 124 | * Initial memory map attributes. |
125 | */ | 125 | */ |
126 | #define _SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) | 126 | #define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) |
127 | #define _SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) | 127 | #define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) |
128 | |||
129 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 | ||
130 | #define SWAPPER_PTE_FLAGS (_SWAPPER_PTE_FLAGS | PTE_NG) | ||
131 | #define SWAPPER_PMD_FLAGS (_SWAPPER_PMD_FLAGS | PMD_SECT_NG) | ||
132 | #else | ||
133 | #define SWAPPER_PTE_FLAGS _SWAPPER_PTE_FLAGS | ||
134 | #define SWAPPER_PMD_FLAGS _SWAPPER_PMD_FLAGS | ||
135 | #endif | ||
136 | 128 | ||
137 | #if ARM64_SWAPPER_USES_SECTION_MAPS | 129 | #if ARM64_SWAPPER_USES_SECTION_MAPS |
138 | #define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS) | 130 | #define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS) |
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 4485ae8e98de..a73f63aca68e 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
@@ -415,4 +415,10 @@ static inline void kvm_arm_vhe_guest_exit(void) | |||
415 | { | 415 | { |
416 | local_daif_restore(DAIF_PROCCTX_NOIRQ); | 416 | local_daif_restore(DAIF_PROCCTX_NOIRQ); |
417 | } | 417 | } |
418 | |||
419 | static inline bool kvm_arm_harden_branch_predictor(void) | ||
420 | { | ||
421 | return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR); | ||
422 | } | ||
423 | |||
418 | #endif /* __ARM64_KVM_HOST_H__ */ | 424 | #endif /* __ARM64_KVM_HOST_H__ */ |
diff --git a/arch/arm64/include/asm/kvm_psci.h b/arch/arm64/include/asm/kvm_psci.h deleted file mode 100644 index bc39e557c56c..000000000000 --- a/arch/arm64/include/asm/kvm_psci.h +++ /dev/null | |||
@@ -1,27 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #ifndef __ARM64_KVM_PSCI_H__ | ||
19 | #define __ARM64_KVM_PSCI_H__ | ||
20 | |||
21 | #define KVM_ARM_PSCI_0_1 1 | ||
22 | #define KVM_ARM_PSCI_0_2 2 | ||
23 | |||
24 | int kvm_psci_version(struct kvm_vcpu *vcpu); | ||
25 | int kvm_psci_call(struct kvm_vcpu *vcpu); | ||
26 | |||
27 | #endif /* __ARM64_KVM_PSCI_H__ */ | ||
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h index 22a926825e3f..2db84df5eb42 100644 --- a/arch/arm64/include/asm/pgtable-prot.h +++ b/arch/arm64/include/asm/pgtable-prot.h | |||
@@ -37,13 +37,11 @@ | |||
37 | #define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) | 37 | #define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) |
38 | #define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) | 38 | #define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) |
39 | 39 | ||
40 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 | 40 | #define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0) |
41 | #define PROT_DEFAULT (_PROT_DEFAULT | PTE_NG) | 41 | #define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0) |
42 | #define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_SECT_NG) | 42 | |
43 | #else | 43 | #define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG) |
44 | #define PROT_DEFAULT _PROT_DEFAULT | 44 | #define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG) |
45 | #define PROT_SECT_DEFAULT _PROT_SECT_DEFAULT | ||
46 | #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ | ||
47 | 45 | ||
48 | #define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) | 46 | #define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) |
49 | #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) | 47 | #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) |
@@ -55,22 +53,22 @@ | |||
55 | #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) | 53 | #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) |
56 | #define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) | 54 | #define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) |
57 | 55 | ||
58 | #define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) | 56 | #define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) |
59 | #define _HYP_PAGE_DEFAULT (_PAGE_DEFAULT & ~PTE_NG) | 57 | #define _HYP_PAGE_DEFAULT _PAGE_DEFAULT |
60 | 58 | ||
61 | #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) | 59 | #define PAGE_KERNEL __pgprot(PROT_NORMAL) |
62 | #define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY) | 60 | #define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY) |
63 | #define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY) | 61 | #define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY) |
64 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) | 62 | #define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN) |
65 | #define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) | 63 | #define PAGE_KERNEL_EXEC_CONT __pgprot((PROT_NORMAL & ~PTE_PXN) | PTE_CONT) |
66 | 64 | ||
67 | #define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN) | 65 | #define PAGE_HYP __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN) |
68 | #define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY) | 66 | #define PAGE_HYP_EXEC __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY) |
69 | #define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN) | 67 | #define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN) |
70 | #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) | 68 | #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) |
71 | 69 | ||
72 | #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) | 70 | #define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) |
73 | #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) | 71 | #define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN) |
74 | 72 | ||
75 | #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) | 73 | #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) |
76 | #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) | 74 | #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) |
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 6db43ebd648d..fce604e3e599 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h | |||
@@ -21,6 +21,9 @@ | |||
21 | 21 | ||
22 | #define TASK_SIZE_64 (UL(1) << VA_BITS) | 22 | #define TASK_SIZE_64 (UL(1) << VA_BITS) |
23 | 23 | ||
24 | #define KERNEL_DS UL(-1) | ||
25 | #define USER_DS (TASK_SIZE_64 - 1) | ||
26 | |||
24 | #ifndef __ASSEMBLY__ | 27 | #ifndef __ASSEMBLY__ |
25 | 28 | ||
26 | /* | 29 | /* |
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index fdb827c7832f..ebdae15d665d 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h | |||
@@ -87,8 +87,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
87 | " cbnz %w1, 1f\n" | 87 | " cbnz %w1, 1f\n" |
88 | " add %w1, %w0, %3\n" | 88 | " add %w1, %w0, %3\n" |
89 | " casa %w0, %w1, %2\n" | 89 | " casa %w0, %w1, %2\n" |
90 | " and %w1, %w1, #0xffff\n" | 90 | " sub %w1, %w1, %3\n" |
91 | " eor %w1, %w1, %w0, lsr #16\n" | 91 | " eor %w1, %w1, %w0\n" |
92 | "1:") | 92 | "1:") |
93 | : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) | 93 | : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) |
94 | : "I" (1 << TICKET_SHIFT) | 94 | : "I" (1 << TICKET_SHIFT) |
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 59fda5292936..543e11f0f657 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h | |||
@@ -35,16 +35,20 @@ | |||
35 | #include <asm/compiler.h> | 35 | #include <asm/compiler.h> |
36 | #include <asm/extable.h> | 36 | #include <asm/extable.h> |
37 | 37 | ||
38 | #define KERNEL_DS (-1UL) | ||
39 | #define get_ds() (KERNEL_DS) | 38 | #define get_ds() (KERNEL_DS) |
40 | |||
41 | #define USER_DS TASK_SIZE_64 | ||
42 | #define get_fs() (current_thread_info()->addr_limit) | 39 | #define get_fs() (current_thread_info()->addr_limit) |
43 | 40 | ||
44 | static inline void set_fs(mm_segment_t fs) | 41 | static inline void set_fs(mm_segment_t fs) |
45 | { | 42 | { |
46 | current_thread_info()->addr_limit = fs; | 43 | current_thread_info()->addr_limit = fs; |
47 | 44 | ||
45 | /* | ||
46 | * Prevent a mispredicted conditional call to set_fs from forwarding | ||
47 | * the wrong address limit to access_ok under speculation. | ||
48 | */ | ||
49 | dsb(nsh); | ||
50 | isb(); | ||
51 | |||
48 | /* On user-mode return, check fs is correct */ | 52 | /* On user-mode return, check fs is correct */ |
49 | set_thread_flag(TIF_FSCHECK); | 53 | set_thread_flag(TIF_FSCHECK); |
50 | 54 | ||
@@ -66,22 +70,32 @@ static inline void set_fs(mm_segment_t fs) | |||
66 | * Returns 1 if the range is valid, 0 otherwise. | 70 | * Returns 1 if the range is valid, 0 otherwise. |
67 | * | 71 | * |
68 | * This is equivalent to the following test: | 72 | * This is equivalent to the following test: |
69 | * (u65)addr + (u65)size <= current->addr_limit | 73 | * (u65)addr + (u65)size <= (u65)current->addr_limit + 1 |
70 | * | ||
71 | * This needs 65-bit arithmetic. | ||
72 | */ | 74 | */ |
73 | #define __range_ok(addr, size) \ | 75 | static inline unsigned long __range_ok(unsigned long addr, unsigned long size) |
74 | ({ \ | 76 | { |
75 | unsigned long __addr = (unsigned long)(addr); \ | 77 | unsigned long limit = current_thread_info()->addr_limit; |
76 | unsigned long flag, roksum; \ | 78 | |
77 | __chk_user_ptr(addr); \ | 79 | __chk_user_ptr(addr); |
78 | asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \ | 80 | asm volatile( |
79 | : "=&r" (flag), "=&r" (roksum) \ | 81 | // A + B <= C + 1 for all A,B,C, in four easy steps: |
80 | : "1" (__addr), "Ir" (size), \ | 82 | // 1: X = A + B; X' = X % 2^64 |
81 | "r" (current_thread_info()->addr_limit) \ | 83 | " adds %0, %0, %2\n" |
82 | : "cc"); \ | 84 | // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4 |
83 | flag; \ | 85 | " csel %1, xzr, %1, hi\n" |
84 | }) | 86 | // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X' |
87 | // to compensate for the carry flag being set in step 4. For | ||
88 | // X > 2^64, X' merely has to remain nonzero, which it does. | ||
89 | " csinv %0, %0, xzr, cc\n" | ||
90 | // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1 | ||
91 | // comes from the carry in being clear. Otherwise, we are | ||
92 | // testing X' - C == 0, subject to the previous adjustments. | ||
93 | " sbcs xzr, %0, %1\n" | ||
94 | " cset %0, ls\n" | ||
95 | : "+r" (addr), "+r" (limit) : "Ir" (size) : "cc"); | ||
96 | |||
97 | return addr; | ||
98 | } | ||
85 | 99 | ||
86 | /* | 100 | /* |
87 | * When dealing with data aborts, watchpoints, or instruction traps we may end | 101 | * When dealing with data aborts, watchpoints, or instruction traps we may end |
@@ -90,7 +104,7 @@ static inline void set_fs(mm_segment_t fs) | |||
90 | */ | 104 | */ |
91 | #define untagged_addr(addr) sign_extend64(addr, 55) | 105 | #define untagged_addr(addr) sign_extend64(addr, 55) |
92 | 106 | ||
93 | #define access_ok(type, addr, size) __range_ok(addr, size) | 107 | #define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size) |
94 | #define user_addr_max get_fs | 108 | #define user_addr_max get_fs |
95 | 109 | ||
96 | #define _ASM_EXTABLE(from, to) \ | 110 | #define _ASM_EXTABLE(from, to) \ |
@@ -221,6 +235,26 @@ static inline void uaccess_enable_not_uao(void) | |||
221 | } | 235 | } |
222 | 236 | ||
223 | /* | 237 | /* |
238 | * Sanitise a uaccess pointer such that it becomes NULL if above the | ||
239 | * current addr_limit. | ||
240 | */ | ||
241 | #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr) | ||
242 | static inline void __user *__uaccess_mask_ptr(const void __user *ptr) | ||
243 | { | ||
244 | void __user *safe_ptr; | ||
245 | |||
246 | asm volatile( | ||
247 | " bics xzr, %1, %2\n" | ||
248 | " csel %0, %1, xzr, eq\n" | ||
249 | : "=&r" (safe_ptr) | ||
250 | : "r" (ptr), "r" (current_thread_info()->addr_limit) | ||
251 | : "cc"); | ||
252 | |||
253 | csdb(); | ||
254 | return safe_ptr; | ||
255 | } | ||
256 | |||
257 | /* | ||
224 | * The "__xxx" versions of the user access functions do not verify the address | 258 | * The "__xxx" versions of the user access functions do not verify the address |
225 | * space - it must have been done previously with a separate "access_ok()" | 259 | * space - it must have been done previously with a separate "access_ok()" |
226 | * call. | 260 | * call. |
@@ -272,28 +306,33 @@ do { \ | |||
272 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ | 306 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
273 | } while (0) | 307 | } while (0) |
274 | 308 | ||
275 | #define __get_user(x, ptr) \ | 309 | #define __get_user_check(x, ptr, err) \ |
276 | ({ \ | 310 | ({ \ |
277 | int __gu_err = 0; \ | 311 | __typeof__(*(ptr)) __user *__p = (ptr); \ |
278 | __get_user_err((x), (ptr), __gu_err); \ | 312 | might_fault(); \ |
279 | __gu_err; \ | 313 | if (access_ok(VERIFY_READ, __p, sizeof(*__p))) { \ |
314 | __p = uaccess_mask_ptr(__p); \ | ||
315 | __get_user_err((x), __p, (err)); \ | ||
316 | } else { \ | ||
317 | (x) = 0; (err) = -EFAULT; \ | ||
318 | } \ | ||
280 | }) | 319 | }) |
281 | 320 | ||
282 | #define __get_user_error(x, ptr, err) \ | 321 | #define __get_user_error(x, ptr, err) \ |
283 | ({ \ | 322 | ({ \ |
284 | __get_user_err((x), (ptr), (err)); \ | 323 | __get_user_check((x), (ptr), (err)); \ |
285 | (void)0; \ | 324 | (void)0; \ |
286 | }) | 325 | }) |
287 | 326 | ||
288 | #define get_user(x, ptr) \ | 327 | #define __get_user(x, ptr) \ |
289 | ({ \ | 328 | ({ \ |
290 | __typeof__(*(ptr)) __user *__p = (ptr); \ | 329 | int __gu_err = 0; \ |
291 | might_fault(); \ | 330 | __get_user_check((x), (ptr), __gu_err); \ |
292 | access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \ | 331 | __gu_err; \ |
293 | __get_user((x), __p) : \ | ||
294 | ((x) = 0, -EFAULT); \ | ||
295 | }) | 332 | }) |
296 | 333 | ||
334 | #define get_user __get_user | ||
335 | |||
297 | #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ | 336 | #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ |
298 | asm volatile( \ | 337 | asm volatile( \ |
299 | "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ | 338 | "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ |
@@ -336,43 +375,63 @@ do { \ | |||
336 | uaccess_disable_not_uao(); \ | 375 | uaccess_disable_not_uao(); \ |
337 | } while (0) | 376 | } while (0) |
338 | 377 | ||
339 | #define __put_user(x, ptr) \ | 378 | #define __put_user_check(x, ptr, err) \ |
340 | ({ \ | 379 | ({ \ |
341 | int __pu_err = 0; \ | 380 | __typeof__(*(ptr)) __user *__p = (ptr); \ |
342 | __put_user_err((x), (ptr), __pu_err); \ | 381 | might_fault(); \ |
343 | __pu_err; \ | 382 | if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) { \ |
383 | __p = uaccess_mask_ptr(__p); \ | ||
384 | __put_user_err((x), __p, (err)); \ | ||
385 | } else { \ | ||
386 | (err) = -EFAULT; \ | ||
387 | } \ | ||
344 | }) | 388 | }) |
345 | 389 | ||
346 | #define __put_user_error(x, ptr, err) \ | 390 | #define __put_user_error(x, ptr, err) \ |
347 | ({ \ | 391 | ({ \ |
348 | __put_user_err((x), (ptr), (err)); \ | 392 | __put_user_check((x), (ptr), (err)); \ |
349 | (void)0; \ | 393 | (void)0; \ |
350 | }) | 394 | }) |
351 | 395 | ||
352 | #define put_user(x, ptr) \ | 396 | #define __put_user(x, ptr) \ |
353 | ({ \ | 397 | ({ \ |
354 | __typeof__(*(ptr)) __user *__p = (ptr); \ | 398 | int __pu_err = 0; \ |
355 | might_fault(); \ | 399 | __put_user_check((x), (ptr), __pu_err); \ |
356 | access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \ | 400 | __pu_err; \ |
357 | __put_user((x), __p) : \ | ||
358 | -EFAULT; \ | ||
359 | }) | 401 | }) |
360 | 402 | ||
403 | #define put_user __put_user | ||
404 | |||
361 | extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); | 405 | extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); |
362 | #define raw_copy_from_user __arch_copy_from_user | 406 | #define raw_copy_from_user(to, from, n) \ |
407 | ({ \ | ||
408 | __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \ | ||
409 | }) | ||
410 | |||
363 | extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); | 411 | extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); |
364 | #define raw_copy_to_user __arch_copy_to_user | 412 | #define raw_copy_to_user(to, from, n) \ |
365 | extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n); | 413 | ({ \ |
366 | extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); | 414 | __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \ |
415 | }) | ||
416 | |||
417 | extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n); | ||
418 | #define raw_copy_in_user(to, from, n) \ | ||
419 | ({ \ | ||
420 | __arch_copy_in_user(__uaccess_mask_ptr(to), \ | ||
421 | __uaccess_mask_ptr(from), (n)); \ | ||
422 | }) | ||
423 | |||
367 | #define INLINE_COPY_TO_USER | 424 | #define INLINE_COPY_TO_USER |
368 | #define INLINE_COPY_FROM_USER | 425 | #define INLINE_COPY_FROM_USER |
369 | 426 | ||
370 | static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) | 427 | extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n); |
428 | static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n) | ||
371 | { | 429 | { |
372 | if (access_ok(VERIFY_WRITE, to, n)) | 430 | if (access_ok(VERIFY_WRITE, to, n)) |
373 | n = __clear_user(to, n); | 431 | n = __arch_clear_user(__uaccess_mask_ptr(to), n); |
374 | return n; | 432 | return n; |
375 | } | 433 | } |
434 | #define clear_user __clear_user | ||
376 | 435 | ||
377 | extern long strncpy_from_user(char *dest, const char __user *src, long count); | 436 | extern long strncpy_from_user(char *dest, const char __user *src, long count); |
378 | 437 | ||
@@ -386,7 +445,7 @@ extern unsigned long __must_check __copy_user_flushcache(void *to, const void __ | |||
386 | static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) | 445 | static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) |
387 | { | 446 | { |
388 | kasan_check_write(dst, size); | 447 | kasan_check_write(dst, size); |
389 | return __copy_user_flushcache(dst, src, size); | 448 | return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size); |
390 | } | 449 | } |
391 | #endif | 450 | #endif |
392 | 451 | ||
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index 67368c7329c0..66be504edb6c 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c | |||
@@ -37,8 +37,8 @@ EXPORT_SYMBOL(clear_page); | |||
37 | /* user mem (segment) */ | 37 | /* user mem (segment) */ |
38 | EXPORT_SYMBOL(__arch_copy_from_user); | 38 | EXPORT_SYMBOL(__arch_copy_from_user); |
39 | EXPORT_SYMBOL(__arch_copy_to_user); | 39 | EXPORT_SYMBOL(__arch_copy_to_user); |
40 | EXPORT_SYMBOL(__clear_user); | 40 | EXPORT_SYMBOL(__arch_clear_user); |
41 | EXPORT_SYMBOL(raw_copy_in_user); | 41 | EXPORT_SYMBOL(__arch_copy_in_user); |
42 | 42 | ||
43 | /* physical memory */ | 43 | /* physical memory */ |
44 | EXPORT_SYMBOL(memstart_addr); | 44 | EXPORT_SYMBOL(memstart_addr); |
diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S index 76225c2611ea..e5de33513b5d 100644 --- a/arch/arm64/kernel/bpi.S +++ b/arch/arm64/kernel/bpi.S | |||
@@ -17,6 +17,7 @@ | |||
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/linkage.h> | 19 | #include <linux/linkage.h> |
20 | #include <linux/arm-smccc.h> | ||
20 | 21 | ||
21 | .macro ventry target | 22 | .macro ventry target |
22 | .rept 31 | 23 | .rept 31 |
@@ -53,30 +54,6 @@ ENTRY(__bp_harden_hyp_vecs_start) | |||
53 | vectors __kvm_hyp_vector | 54 | vectors __kvm_hyp_vector |
54 | .endr | 55 | .endr |
55 | ENTRY(__bp_harden_hyp_vecs_end) | 56 | ENTRY(__bp_harden_hyp_vecs_end) |
56 | ENTRY(__psci_hyp_bp_inval_start) | ||
57 | sub sp, sp, #(8 * 18) | ||
58 | stp x16, x17, [sp, #(16 * 0)] | ||
59 | stp x14, x15, [sp, #(16 * 1)] | ||
60 | stp x12, x13, [sp, #(16 * 2)] | ||
61 | stp x10, x11, [sp, #(16 * 3)] | ||
62 | stp x8, x9, [sp, #(16 * 4)] | ||
63 | stp x6, x7, [sp, #(16 * 5)] | ||
64 | stp x4, x5, [sp, #(16 * 6)] | ||
65 | stp x2, x3, [sp, #(16 * 7)] | ||
66 | stp x0, x1, [sp, #(16 * 8)] | ||
67 | mov x0, #0x84000000 | ||
68 | smc #0 | ||
69 | ldp x16, x17, [sp, #(16 * 0)] | ||
70 | ldp x14, x15, [sp, #(16 * 1)] | ||
71 | ldp x12, x13, [sp, #(16 * 2)] | ||
72 | ldp x10, x11, [sp, #(16 * 3)] | ||
73 | ldp x8, x9, [sp, #(16 * 4)] | ||
74 | ldp x6, x7, [sp, #(16 * 5)] | ||
75 | ldp x4, x5, [sp, #(16 * 6)] | ||
76 | ldp x2, x3, [sp, #(16 * 7)] | ||
77 | ldp x0, x1, [sp, #(16 * 8)] | ||
78 | add sp, sp, #(8 * 18) | ||
79 | ENTRY(__psci_hyp_bp_inval_end) | ||
80 | 57 | ||
81 | ENTRY(__qcom_hyp_sanitize_link_stack_start) | 58 | ENTRY(__qcom_hyp_sanitize_link_stack_start) |
82 | stp x29, x30, [sp, #-16]! | 59 | stp x29, x30, [sp, #-16]! |
@@ -85,3 +62,22 @@ ENTRY(__qcom_hyp_sanitize_link_stack_start) | |||
85 | .endr | 62 | .endr |
86 | ldp x29, x30, [sp], #16 | 63 | ldp x29, x30, [sp], #16 |
87 | ENTRY(__qcom_hyp_sanitize_link_stack_end) | 64 | ENTRY(__qcom_hyp_sanitize_link_stack_end) |
65 | |||
66 | .macro smccc_workaround_1 inst | ||
67 | sub sp, sp, #(8 * 4) | ||
68 | stp x2, x3, [sp, #(8 * 0)] | ||
69 | stp x0, x1, [sp, #(8 * 2)] | ||
70 | mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1 | ||
71 | \inst #0 | ||
72 | ldp x2, x3, [sp, #(8 * 0)] | ||
73 | ldp x0, x1, [sp, #(8 * 2)] | ||
74 | add sp, sp, #(8 * 4) | ||
75 | .endm | ||
76 | |||
77 | ENTRY(__smccc_workaround_1_smc_start) | ||
78 | smccc_workaround_1 smc | ||
79 | ENTRY(__smccc_workaround_1_smc_end) | ||
80 | |||
81 | ENTRY(__smccc_workaround_1_hvc_start) | ||
82 | smccc_workaround_1 hvc | ||
83 | ENTRY(__smccc_workaround_1_hvc_end) | ||
diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S index 2a752cb2a0f3..8021b46c9743 100644 --- a/arch/arm64/kernel/cpu-reset.S +++ b/arch/arm64/kernel/cpu-reset.S | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <asm/virt.h> | 16 | #include <asm/virt.h> |
17 | 17 | ||
18 | .text | 18 | .text |
19 | .pushsection .idmap.text, "ax" | 19 | .pushsection .idmap.text, "awx" |
20 | 20 | ||
21 | /* | 21 | /* |
22 | * __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for | 22 | * __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for |
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index ed6881882231..07823595b7f0 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c | |||
@@ -67,9 +67,12 @@ static int cpu_enable_trap_ctr_access(void *__unused) | |||
67 | DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); | 67 | DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); |
68 | 68 | ||
69 | #ifdef CONFIG_KVM | 69 | #ifdef CONFIG_KVM |
70 | extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[]; | ||
71 | extern char __qcom_hyp_sanitize_link_stack_start[]; | 70 | extern char __qcom_hyp_sanitize_link_stack_start[]; |
72 | extern char __qcom_hyp_sanitize_link_stack_end[]; | 71 | extern char __qcom_hyp_sanitize_link_stack_end[]; |
72 | extern char __smccc_workaround_1_smc_start[]; | ||
73 | extern char __smccc_workaround_1_smc_end[]; | ||
74 | extern char __smccc_workaround_1_hvc_start[]; | ||
75 | extern char __smccc_workaround_1_hvc_end[]; | ||
73 | 76 | ||
74 | static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, | 77 | static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, |
75 | const char *hyp_vecs_end) | 78 | const char *hyp_vecs_end) |
@@ -112,10 +115,12 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn, | |||
112 | spin_unlock(&bp_lock); | 115 | spin_unlock(&bp_lock); |
113 | } | 116 | } |
114 | #else | 117 | #else |
115 | #define __psci_hyp_bp_inval_start NULL | ||
116 | #define __psci_hyp_bp_inval_end NULL | ||
117 | #define __qcom_hyp_sanitize_link_stack_start NULL | 118 | #define __qcom_hyp_sanitize_link_stack_start NULL |
118 | #define __qcom_hyp_sanitize_link_stack_end NULL | 119 | #define __qcom_hyp_sanitize_link_stack_end NULL |
120 | #define __smccc_workaround_1_smc_start NULL | ||
121 | #define __smccc_workaround_1_smc_end NULL | ||
122 | #define __smccc_workaround_1_hvc_start NULL | ||
123 | #define __smccc_workaround_1_hvc_end NULL | ||
119 | 124 | ||
120 | static void __install_bp_hardening_cb(bp_hardening_cb_t fn, | 125 | static void __install_bp_hardening_cb(bp_hardening_cb_t fn, |
121 | const char *hyp_vecs_start, | 126 | const char *hyp_vecs_start, |
@@ -142,17 +147,59 @@ static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry, | |||
142 | __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end); | 147 | __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end); |
143 | } | 148 | } |
144 | 149 | ||
150 | #include <uapi/linux/psci.h> | ||
151 | #include <linux/arm-smccc.h> | ||
145 | #include <linux/psci.h> | 152 | #include <linux/psci.h> |
146 | 153 | ||
147 | static int enable_psci_bp_hardening(void *data) | 154 | static void call_smc_arch_workaround_1(void) |
155 | { | ||
156 | arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); | ||
157 | } | ||
158 | |||
159 | static void call_hvc_arch_workaround_1(void) | ||
160 | { | ||
161 | arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); | ||
162 | } | ||
163 | |||
164 | static int enable_smccc_arch_workaround_1(void *data) | ||
148 | { | 165 | { |
149 | const struct arm64_cpu_capabilities *entry = data; | 166 | const struct arm64_cpu_capabilities *entry = data; |
167 | bp_hardening_cb_t cb; | ||
168 | void *smccc_start, *smccc_end; | ||
169 | struct arm_smccc_res res; | ||
170 | |||
171 | if (!entry->matches(entry, SCOPE_LOCAL_CPU)) | ||
172 | return 0; | ||
173 | |||
174 | if (psci_ops.smccc_version == SMCCC_VERSION_1_0) | ||
175 | return 0; | ||
176 | |||
177 | switch (psci_ops.conduit) { | ||
178 | case PSCI_CONDUIT_HVC: | ||
179 | arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, | ||
180 | ARM_SMCCC_ARCH_WORKAROUND_1, &res); | ||
181 | if (res.a0) | ||
182 | return 0; | ||
183 | cb = call_hvc_arch_workaround_1; | ||
184 | smccc_start = __smccc_workaround_1_hvc_start; | ||
185 | smccc_end = __smccc_workaround_1_hvc_end; | ||
186 | break; | ||
187 | |||
188 | case PSCI_CONDUIT_SMC: | ||
189 | arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, | ||
190 | ARM_SMCCC_ARCH_WORKAROUND_1, &res); | ||
191 | if (res.a0) | ||
192 | return 0; | ||
193 | cb = call_smc_arch_workaround_1; | ||
194 | smccc_start = __smccc_workaround_1_smc_start; | ||
195 | smccc_end = __smccc_workaround_1_smc_end; | ||
196 | break; | ||
197 | |||
198 | default: | ||
199 | return 0; | ||
200 | } | ||
150 | 201 | ||
151 | if (psci_ops.get_version) | 202 | install_bp_hardening_cb(entry, cb, smccc_start, smccc_end); |
152 | install_bp_hardening_cb(entry, | ||
153 | (bp_hardening_cb_t)psci_ops.get_version, | ||
154 | __psci_hyp_bp_inval_start, | ||
155 | __psci_hyp_bp_inval_end); | ||
156 | 203 | ||
157 | return 0; | 204 | return 0; |
158 | } | 205 | } |
@@ -333,22 +380,22 @@ const struct arm64_cpu_capabilities arm64_errata[] = { | |||
333 | { | 380 | { |
334 | .capability = ARM64_HARDEN_BRANCH_PREDICTOR, | 381 | .capability = ARM64_HARDEN_BRANCH_PREDICTOR, |
335 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), | 382 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), |
336 | .enable = enable_psci_bp_hardening, | 383 | .enable = enable_smccc_arch_workaround_1, |
337 | }, | 384 | }, |
338 | { | 385 | { |
339 | .capability = ARM64_HARDEN_BRANCH_PREDICTOR, | 386 | .capability = ARM64_HARDEN_BRANCH_PREDICTOR, |
340 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), | 387 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), |
341 | .enable = enable_psci_bp_hardening, | 388 | .enable = enable_smccc_arch_workaround_1, |
342 | }, | 389 | }, |
343 | { | 390 | { |
344 | .capability = ARM64_HARDEN_BRANCH_PREDICTOR, | 391 | .capability = ARM64_HARDEN_BRANCH_PREDICTOR, |
345 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), | 392 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), |
346 | .enable = enable_psci_bp_hardening, | 393 | .enable = enable_smccc_arch_workaround_1, |
347 | }, | 394 | }, |
348 | { | 395 | { |
349 | .capability = ARM64_HARDEN_BRANCH_PREDICTOR, | 396 | .capability = ARM64_HARDEN_BRANCH_PREDICTOR, |
350 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), | 397 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), |
351 | .enable = enable_psci_bp_hardening, | 398 | .enable = enable_smccc_arch_workaround_1, |
352 | }, | 399 | }, |
353 | { | 400 | { |
354 | .capability = ARM64_HARDEN_BRANCH_PREDICTOR, | 401 | .capability = ARM64_HARDEN_BRANCH_PREDICTOR, |
@@ -362,12 +409,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = { | |||
362 | { | 409 | { |
363 | .capability = ARM64_HARDEN_BRANCH_PREDICTOR, | 410 | .capability = ARM64_HARDEN_BRANCH_PREDICTOR, |
364 | MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), | 411 | MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), |
365 | .enable = enable_psci_bp_hardening, | 412 | .enable = enable_smccc_arch_workaround_1, |
366 | }, | 413 | }, |
367 | { | 414 | { |
368 | .capability = ARM64_HARDEN_BRANCH_PREDICTOR, | 415 | .capability = ARM64_HARDEN_BRANCH_PREDICTOR, |
369 | MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), | 416 | MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), |
370 | .enable = enable_psci_bp_hardening, | 417 | .enable = enable_smccc_arch_workaround_1, |
371 | }, | 418 | }, |
372 | #endif | 419 | #endif |
373 | { | 420 | { |
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 0fb6a3151443..29b1f873e337 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c | |||
@@ -856,12 +856,23 @@ static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */ | |||
856 | static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, | 856 | static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, |
857 | int __unused) | 857 | int __unused) |
858 | { | 858 | { |
859 | char const *str = "command line option"; | ||
859 | u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); | 860 | u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); |
860 | 861 | ||
861 | /* Forced on command line? */ | 862 | /* |
863 | * For reasons that aren't entirely clear, enabling KPTI on Cavium | ||
864 | * ThunderX leads to apparent I-cache corruption of kernel text, which | ||
865 | * ends as well as you might imagine. Don't even try. | ||
866 | */ | ||
867 | if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) { | ||
868 | str = "ARM64_WORKAROUND_CAVIUM_27456"; | ||
869 | __kpti_forced = -1; | ||
870 | } | ||
871 | |||
872 | /* Forced? */ | ||
862 | if (__kpti_forced) { | 873 | if (__kpti_forced) { |
863 | pr_info_once("kernel page table isolation forced %s by command line option\n", | 874 | pr_info_once("kernel page table isolation forced %s by %s\n", |
864 | __kpti_forced > 0 ? "ON" : "OFF"); | 875 | __kpti_forced > 0 ? "ON" : "OFF", str); |
865 | return __kpti_forced > 0; | 876 | return __kpti_forced > 0; |
866 | } | 877 | } |
867 | 878 | ||
@@ -881,6 +892,30 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, | |||
881 | ID_AA64PFR0_CSV3_SHIFT); | 892 | ID_AA64PFR0_CSV3_SHIFT); |
882 | } | 893 | } |
883 | 894 | ||
895 | static int kpti_install_ng_mappings(void *__unused) | ||
896 | { | ||
897 | typedef void (kpti_remap_fn)(int, int, phys_addr_t); | ||
898 | extern kpti_remap_fn idmap_kpti_install_ng_mappings; | ||
899 | kpti_remap_fn *remap_fn; | ||
900 | |||
901 | static bool kpti_applied = false; | ||
902 | int cpu = smp_processor_id(); | ||
903 | |||
904 | if (kpti_applied) | ||
905 | return 0; | ||
906 | |||
907 | remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings); | ||
908 | |||
909 | cpu_install_idmap(); | ||
910 | remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir)); | ||
911 | cpu_uninstall_idmap(); | ||
912 | |||
913 | if (!cpu) | ||
914 | kpti_applied = true; | ||
915 | |||
916 | return 0; | ||
917 | } | ||
918 | |||
884 | static int __init parse_kpti(char *str) | 919 | static int __init parse_kpti(char *str) |
885 | { | 920 | { |
886 | bool enabled; | 921 | bool enabled; |
@@ -1004,6 +1039,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { | |||
1004 | .capability = ARM64_UNMAP_KERNEL_AT_EL0, | 1039 | .capability = ARM64_UNMAP_KERNEL_AT_EL0, |
1005 | .def_scope = SCOPE_SYSTEM, | 1040 | .def_scope = SCOPE_SYSTEM, |
1006 | .matches = unmap_kernel_at_el0, | 1041 | .matches = unmap_kernel_at_el0, |
1042 | .enable = kpti_install_ng_mappings, | ||
1007 | }, | 1043 | }, |
1008 | #endif | 1044 | #endif |
1009 | { | 1045 | { |
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index cccd2788e631..ec2ee720e33e 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S | |||
@@ -167,10 +167,10 @@ alternative_else_nop_endif | |||
167 | .else | 167 | .else |
168 | add x21, sp, #S_FRAME_SIZE | 168 | add x21, sp, #S_FRAME_SIZE |
169 | get_thread_info tsk | 169 | get_thread_info tsk |
170 | /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */ | 170 | /* Save the task's original addr_limit and set USER_DS */ |
171 | ldr x20, [tsk, #TSK_TI_ADDR_LIMIT] | 171 | ldr x20, [tsk, #TSK_TI_ADDR_LIMIT] |
172 | str x20, [sp, #S_ORIG_ADDR_LIMIT] | 172 | str x20, [sp, #S_ORIG_ADDR_LIMIT] |
173 | mov x20, #TASK_SIZE_64 | 173 | mov x20, #USER_DS |
174 | str x20, [tsk, #TSK_TI_ADDR_LIMIT] | 174 | str x20, [tsk, #TSK_TI_ADDR_LIMIT] |
175 | /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */ | 175 | /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */ |
176 | .endif /* \el == 0 */ | 176 | .endif /* \el == 0 */ |
@@ -382,6 +382,7 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 | |||
382 | * x7 is reserved for the system call number in 32-bit mode. | 382 | * x7 is reserved for the system call number in 32-bit mode. |
383 | */ | 383 | */ |
384 | wsc_nr .req w25 // number of system calls | 384 | wsc_nr .req w25 // number of system calls |
385 | xsc_nr .req x25 // number of system calls (zero-extended) | ||
385 | wscno .req w26 // syscall number | 386 | wscno .req w26 // syscall number |
386 | xscno .req x26 // syscall number (zero-extended) | 387 | xscno .req x26 // syscall number (zero-extended) |
387 | stbl .req x27 // syscall table pointer | 388 | stbl .req x27 // syscall table pointer |
@@ -770,7 +771,10 @@ el0_sp_pc: | |||
770 | * Stack or PC alignment exception handling | 771 | * Stack or PC alignment exception handling |
771 | */ | 772 | */ |
772 | mrs x26, far_el1 | 773 | mrs x26, far_el1 |
773 | enable_daif | 774 | enable_da_f |
775 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
776 | bl trace_hardirqs_off | ||
777 | #endif | ||
774 | ct_user_exit | 778 | ct_user_exit |
775 | mov x0, x26 | 779 | mov x0, x26 |
776 | mov x1, x25 | 780 | mov x1, x25 |
@@ -828,6 +832,11 @@ el0_irq_naked: | |||
828 | #endif | 832 | #endif |
829 | 833 | ||
830 | ct_user_exit | 834 | ct_user_exit |
835 | #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR | ||
836 | tbz x22, #55, 1f | ||
837 | bl do_el0_irq_bp_hardening | ||
838 | 1: | ||
839 | #endif | ||
831 | irq_handler | 840 | irq_handler |
832 | 841 | ||
833 | #ifdef CONFIG_TRACE_IRQFLAGS | 842 | #ifdef CONFIG_TRACE_IRQFLAGS |
@@ -939,6 +948,7 @@ el0_svc_naked: // compat entry point | |||
939 | b.ne __sys_trace | 948 | b.ne __sys_trace |
940 | cmp wscno, wsc_nr // check upper syscall limit | 949 | cmp wscno, wsc_nr // check upper syscall limit |
941 | b.hs ni_sys | 950 | b.hs ni_sys |
951 | mask_nospec64 xscno, xsc_nr, x19 // enforce bounds for syscall number | ||
942 | ldr x16, [stbl, xscno, lsl #3] // address in the syscall table | 952 | ldr x16, [stbl, xscno, lsl #3] // address in the syscall table |
943 | blr x16 // call sys_* routine | 953 | blr x16 // call sys_* routine |
944 | b ret_fast_syscall | 954 | b ret_fast_syscall |
@@ -1017,16 +1027,9 @@ alternative_else_nop_endif | |||
1017 | orr \tmp, \tmp, #USER_ASID_FLAG | 1027 | orr \tmp, \tmp, #USER_ASID_FLAG |
1018 | msr ttbr1_el1, \tmp | 1028 | msr ttbr1_el1, \tmp |
1019 | /* | 1029 | /* |
1020 | * We avoid running the post_ttbr_update_workaround here because the | 1030 | * We avoid running the post_ttbr_update_workaround here because |
1021 | * user and kernel ASIDs don't have conflicting mappings, so any | 1031 | * it's only needed by Cavium ThunderX, which requires KPTI to be |
1022 | * "blessing" as described in: | 1032 | * disabled. |
1023 | * | ||
1024 | * http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com | ||
1025 | * | ||
1026 | * will not hurt correctness. Whilst this may partially defeat the | ||
1027 | * point of using split ASIDs in the first place, it avoids | ||
1028 | * the hit of invalidating the entire I-cache on every return to | ||
1029 | * userspace. | ||
1030 | */ | 1033 | */ |
1031 | .endm | 1034 | .endm |
1032 | 1035 | ||
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index ba3ab04788dc..2b6b8b24e5ab 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
@@ -148,26 +148,6 @@ preserve_boot_args: | |||
148 | ENDPROC(preserve_boot_args) | 148 | ENDPROC(preserve_boot_args) |
149 | 149 | ||
150 | /* | 150 | /* |
151 | * Macro to arrange a physical address in a page table entry, taking care of | ||
152 | * 52-bit addresses. | ||
153 | * | ||
154 | * Preserves: phys | ||
155 | * Returns: pte | ||
156 | */ | ||
157 | .macro phys_to_pte, phys, pte | ||
158 | #ifdef CONFIG_ARM64_PA_BITS_52 | ||
159 | /* | ||
160 | * We assume \phys is 64K aligned and this is guaranteed by only | ||
161 | * supporting this configuration with 64K pages. | ||
162 | */ | ||
163 | orr \pte, \phys, \phys, lsr #36 | ||
164 | and \pte, \pte, #PTE_ADDR_MASK | ||
165 | #else | ||
166 | mov \pte, \phys | ||
167 | #endif | ||
168 | .endm | ||
169 | |||
170 | /* | ||
171 | * Macro to create a table entry to the next page. | 151 | * Macro to create a table entry to the next page. |
172 | * | 152 | * |
173 | * tbl: page table address | 153 | * tbl: page table address |
@@ -181,7 +161,7 @@ ENDPROC(preserve_boot_args) | |||
181 | */ | 161 | */ |
182 | .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2 | 162 | .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2 |
183 | add \tmp1, \tbl, #PAGE_SIZE | 163 | add \tmp1, \tbl, #PAGE_SIZE |
184 | phys_to_pte \tmp1, \tmp2 | 164 | phys_to_pte \tmp2, \tmp1 |
185 | orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type | 165 | orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type |
186 | lsr \tmp1, \virt, #\shift | 166 | lsr \tmp1, \virt, #\shift |
187 | sub \ptrs, \ptrs, #1 | 167 | sub \ptrs, \ptrs, #1 |
@@ -207,7 +187,7 @@ ENDPROC(preserve_boot_args) | |||
207 | * Returns: rtbl | 187 | * Returns: rtbl |
208 | */ | 188 | */ |
209 | .macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1 | 189 | .macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1 |
210 | .Lpe\@: phys_to_pte \rtbl, \tmp1 | 190 | .Lpe\@: phys_to_pte \tmp1, \rtbl |
211 | orr \tmp1, \tmp1, \flags // tmp1 = table entry | 191 | orr \tmp1, \tmp1, \flags // tmp1 = table entry |
212 | str \tmp1, [\tbl, \index, lsl #3] | 192 | str \tmp1, [\tbl, \index, lsl #3] |
213 | add \rtbl, \rtbl, \inc // rtbl = pa next level | 193 | add \rtbl, \rtbl, \inc // rtbl = pa next level |
@@ -475,7 +455,7 @@ ENDPROC(__primary_switched) | |||
475 | * end early head section, begin head code that is also used for | 455 | * end early head section, begin head code that is also used for |
476 | * hotplug and needs to have the same protections as the text region | 456 | * hotplug and needs to have the same protections as the text region |
477 | */ | 457 | */ |
478 | .section ".idmap.text","ax" | 458 | .section ".idmap.text","awx" |
479 | 459 | ||
480 | ENTRY(kimage_vaddr) | 460 | ENTRY(kimage_vaddr) |
481 | .quad _text - TEXT_OFFSET | 461 | .quad _text - TEXT_OFFSET |
@@ -776,8 +756,8 @@ ENTRY(__enable_mmu) | |||
776 | update_early_cpu_boot_status 0, x1, x2 | 756 | update_early_cpu_boot_status 0, x1, x2 |
777 | adrp x1, idmap_pg_dir | 757 | adrp x1, idmap_pg_dir |
778 | adrp x2, swapper_pg_dir | 758 | adrp x2, swapper_pg_dir |
779 | phys_to_ttbr x1, x3 | 759 | phys_to_ttbr x3, x1 |
780 | phys_to_ttbr x2, x4 | 760 | phys_to_ttbr x4, x2 |
781 | msr ttbr0_el1, x3 // load TTBR0 | 761 | msr ttbr0_el1, x3 // load TTBR0 |
782 | msr ttbr1_el1, x4 // load TTBR1 | 762 | msr ttbr1_el1, x4 // load TTBR1 |
783 | isb | 763 | isb |
diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S index 84f5d52fddda..dd14ab8c9f72 100644 --- a/arch/arm64/kernel/hibernate-asm.S +++ b/arch/arm64/kernel/hibernate-asm.S | |||
@@ -34,12 +34,12 @@ | |||
34 | * each stage of the walk. | 34 | * each stage of the walk. |
35 | */ | 35 | */ |
36 | .macro break_before_make_ttbr_switch zero_page, page_table, tmp | 36 | .macro break_before_make_ttbr_switch zero_page, page_table, tmp |
37 | phys_to_ttbr \zero_page, \tmp | 37 | phys_to_ttbr \tmp, \zero_page |
38 | msr ttbr1_el1, \tmp | 38 | msr ttbr1_el1, \tmp |
39 | isb | 39 | isb |
40 | tlbi vmalle1 | 40 | tlbi vmalle1 |
41 | dsb nsh | 41 | dsb nsh |
42 | phys_to_ttbr \page_table, \tmp | 42 | phys_to_ttbr \tmp, \page_table |
43 | msr ttbr1_el1, \tmp | 43 | msr ttbr1_el1, \tmp |
44 | isb | 44 | isb |
45 | .endm | 45 | .endm |
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index 10dd16d7902d..bebec8ef9372 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S | |||
@@ -96,7 +96,7 @@ ENTRY(__cpu_suspend_enter) | |||
96 | ret | 96 | ret |
97 | ENDPROC(__cpu_suspend_enter) | 97 | ENDPROC(__cpu_suspend_enter) |
98 | 98 | ||
99 | .pushsection ".idmap.text", "ax" | 99 | .pushsection ".idmap.text", "awx" |
100 | ENTRY(cpu_resume) | 100 | ENTRY(cpu_resume) |
101 | bl el2_setup // if in EL2 drop to EL1 cleanly | 101 | bl el2_setup // if in EL2 drop to EL1 cleanly |
102 | bl __cpu_setup | 102 | bl __cpu_setup |
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 520b0dad3c62..e5e741bfffe1 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c | |||
@@ -22,13 +22,14 @@ | |||
22 | #include <linux/kvm.h> | 22 | #include <linux/kvm.h> |
23 | #include <linux/kvm_host.h> | 23 | #include <linux/kvm_host.h> |
24 | 24 | ||
25 | #include <kvm/arm_psci.h> | ||
26 | |||
25 | #include <asm/esr.h> | 27 | #include <asm/esr.h> |
26 | #include <asm/exception.h> | 28 | #include <asm/exception.h> |
27 | #include <asm/kvm_asm.h> | 29 | #include <asm/kvm_asm.h> |
28 | #include <asm/kvm_coproc.h> | 30 | #include <asm/kvm_coproc.h> |
29 | #include <asm/kvm_emulate.h> | 31 | #include <asm/kvm_emulate.h> |
30 | #include <asm/kvm_mmu.h> | 32 | #include <asm/kvm_mmu.h> |
31 | #include <asm/kvm_psci.h> | ||
32 | #include <asm/debug-monitors.h> | 33 | #include <asm/debug-monitors.h> |
33 | #include <asm/traps.h> | 34 | #include <asm/traps.h> |
34 | 35 | ||
@@ -51,7 +52,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
51 | kvm_vcpu_hvc_get_imm(vcpu)); | 52 | kvm_vcpu_hvc_get_imm(vcpu)); |
52 | vcpu->stat.hvc_exit_stat++; | 53 | vcpu->stat.hvc_exit_stat++; |
53 | 54 | ||
54 | ret = kvm_psci_call(vcpu); | 55 | ret = kvm_hvc_call_handler(vcpu); |
55 | if (ret < 0) { | 56 | if (ret < 0) { |
56 | vcpu_set_reg(vcpu, 0, ~0UL); | 57 | vcpu_set_reg(vcpu, 0, ~0UL); |
57 | return 1; | 58 | return 1; |
@@ -62,7 +63,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
62 | 63 | ||
63 | static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) | 64 | static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) |
64 | { | 65 | { |
66 | /* | ||
67 | * "If an SMC instruction executed at Non-secure EL1 is | ||
68 | * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a | ||
69 | * Trap exception, not a Secure Monitor Call exception [...]" | ||
70 | * | ||
71 | * We need to advance the PC after the trap, as it would | ||
72 | * otherwise return to the same address... | ||
73 | */ | ||
65 | vcpu_set_reg(vcpu, 0, ~0UL); | 74 | vcpu_set_reg(vcpu, 0, ~0UL); |
75 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | ||
66 | return 1; | 76 | return 1; |
67 | } | 77 | } |
68 | 78 | ||
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index e086c6eff8c6..5aa9ccf6db99 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S | |||
@@ -63,7 +63,7 @@ __do_hyp_init: | |||
63 | cmp x0, #HVC_STUB_HCALL_NR | 63 | cmp x0, #HVC_STUB_HCALL_NR |
64 | b.lo __kvm_handle_stub_hvc | 64 | b.lo __kvm_handle_stub_hvc |
65 | 65 | ||
66 | phys_to_ttbr x0, x4 | 66 | phys_to_ttbr x4, x0 |
67 | msr ttbr0_el2, x4 | 67 | msr ttbr0_el2, x4 |
68 | 68 | ||
69 | mrs x4, tcr_el1 | 69 | mrs x4, tcr_el1 |
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index e4f37b9dd47c..f36464bd57c5 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S | |||
@@ -15,6 +15,7 @@ | |||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <linux/arm-smccc.h> | ||
18 | #include <linux/linkage.h> | 19 | #include <linux/linkage.h> |
19 | 20 | ||
20 | #include <asm/alternative.h> | 21 | #include <asm/alternative.h> |
@@ -64,10 +65,11 @@ alternative_endif | |||
64 | lsr x0, x1, #ESR_ELx_EC_SHIFT | 65 | lsr x0, x1, #ESR_ELx_EC_SHIFT |
65 | 66 | ||
66 | cmp x0, #ESR_ELx_EC_HVC64 | 67 | cmp x0, #ESR_ELx_EC_HVC64 |
68 | ccmp x0, #ESR_ELx_EC_HVC32, #4, ne | ||
67 | b.ne el1_trap | 69 | b.ne el1_trap |
68 | 70 | ||
69 | mrs x1, vttbr_el2 // If vttbr is valid, the 64bit guest | 71 | mrs x1, vttbr_el2 // If vttbr is valid, the guest |
70 | cbnz x1, el1_trap // called HVC | 72 | cbnz x1, el1_hvc_guest // called HVC |
71 | 73 | ||
72 | /* Here, we're pretty sure the host called HVC. */ | 74 | /* Here, we're pretty sure the host called HVC. */ |
73 | ldp x0, x1, [sp], #16 | 75 | ldp x0, x1, [sp], #16 |
@@ -100,6 +102,20 @@ alternative_endif | |||
100 | 102 | ||
101 | eret | 103 | eret |
102 | 104 | ||
105 | el1_hvc_guest: | ||
106 | /* | ||
107 | * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1. | ||
108 | * The workaround has already been applied on the host, | ||
109 | * so let's quickly get back to the guest. We don't bother | ||
110 | * restoring x1, as it can be clobbered anyway. | ||
111 | */ | ||
112 | ldr x1, [sp] // Guest's x0 | ||
113 | eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1 | ||
114 | cbnz w1, el1_trap | ||
115 | mov x0, x1 | ||
116 | add sp, sp, #16 | ||
117 | eret | ||
118 | |||
103 | el1_trap: | 119 | el1_trap: |
104 | /* | 120 | /* |
105 | * x0: ESR_EC | 121 | * x0: ESR_EC |
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 036e1f3d77a6..cac6a0500162 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c | |||
@@ -19,6 +19,8 @@ | |||
19 | #include <linux/jump_label.h> | 19 | #include <linux/jump_label.h> |
20 | #include <uapi/linux/psci.h> | 20 | #include <uapi/linux/psci.h> |
21 | 21 | ||
22 | #include <kvm/arm_psci.h> | ||
23 | |||
22 | #include <asm/kvm_asm.h> | 24 | #include <asm/kvm_asm.h> |
23 | #include <asm/kvm_emulate.h> | 25 | #include <asm/kvm_emulate.h> |
24 | #include <asm/kvm_hyp.h> | 26 | #include <asm/kvm_hyp.h> |
@@ -348,18 +350,6 @@ again: | |||
348 | if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu)) | 350 | if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu)) |
349 | goto again; | 351 | goto again; |
350 | 352 | ||
351 | if (exit_code == ARM_EXCEPTION_TRAP && | ||
352 | (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC64 || | ||
353 | kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC32) && | ||
354 | vcpu_get_reg(vcpu, 0) == PSCI_0_2_FN_PSCI_VERSION) { | ||
355 | u64 val = PSCI_RET_NOT_SUPPORTED; | ||
356 | if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) | ||
357 | val = 2; | ||
358 | |||
359 | vcpu_set_reg(vcpu, 0, val); | ||
360 | goto again; | ||
361 | } | ||
362 | |||
363 | if (static_branch_unlikely(&vgic_v2_cpuif_trap) && | 353 | if (static_branch_unlikely(&vgic_v2_cpuif_trap) && |
364 | exit_code == ARM_EXCEPTION_TRAP) { | 354 | exit_code == ARM_EXCEPTION_TRAP) { |
365 | bool valid; | 355 | bool valid; |
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S index 3d69a8d41fa5..21ba0b29621b 100644 --- a/arch/arm64/lib/clear_user.S +++ b/arch/arm64/lib/clear_user.S | |||
@@ -21,7 +21,7 @@ | |||
21 | 21 | ||
22 | .text | 22 | .text |
23 | 23 | ||
24 | /* Prototype: int __clear_user(void *addr, size_t sz) | 24 | /* Prototype: int __arch_clear_user(void *addr, size_t sz) |
25 | * Purpose : clear some user memory | 25 | * Purpose : clear some user memory |
26 | * Params : addr - user memory address to clear | 26 | * Params : addr - user memory address to clear |
27 | * : sz - number of bytes to clear | 27 | * : sz - number of bytes to clear |
@@ -29,7 +29,7 @@ | |||
29 | * | 29 | * |
30 | * Alignment fixed up by hardware. | 30 | * Alignment fixed up by hardware. |
31 | */ | 31 | */ |
32 | ENTRY(__clear_user) | 32 | ENTRY(__arch_clear_user) |
33 | uaccess_enable_not_uao x2, x3, x4 | 33 | uaccess_enable_not_uao x2, x3, x4 |
34 | mov x2, x1 // save the size for fixup return | 34 | mov x2, x1 // save the size for fixup return |
35 | subs x1, x1, #8 | 35 | subs x1, x1, #8 |
@@ -52,7 +52,7 @@ uao_user_alternative 9f, strb, sttrb, wzr, x0, 0 | |||
52 | 5: mov x0, #0 | 52 | 5: mov x0, #0 |
53 | uaccess_disable_not_uao x2, x3 | 53 | uaccess_disable_not_uao x2, x3 |
54 | ret | 54 | ret |
55 | ENDPROC(__clear_user) | 55 | ENDPROC(__arch_clear_user) |
56 | 56 | ||
57 | .section .fixup,"ax" | 57 | .section .fixup,"ax" |
58 | .align 2 | 58 | .align 2 |
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index fbb090f431a5..54b75deb1d16 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S | |||
@@ -64,14 +64,15 @@ | |||
64 | .endm | 64 | .endm |
65 | 65 | ||
66 | end .req x5 | 66 | end .req x5 |
67 | ENTRY(raw_copy_in_user) | 67 | |
68 | ENTRY(__arch_copy_in_user) | ||
68 | uaccess_enable_not_uao x3, x4, x5 | 69 | uaccess_enable_not_uao x3, x4, x5 |
69 | add end, x0, x2 | 70 | add end, x0, x2 |
70 | #include "copy_template.S" | 71 | #include "copy_template.S" |
71 | uaccess_disable_not_uao x3, x4 | 72 | uaccess_disable_not_uao x3, x4 |
72 | mov x0, #0 | 73 | mov x0, #0 |
73 | ret | 74 | ret |
74 | ENDPROC(raw_copy_in_user) | 75 | ENDPROC(__arch_copy_in_user) |
75 | 76 | ||
76 | .section .fixup,"ax" | 77 | .section .fixup,"ax" |
77 | .align 2 | 78 | .align 2 |
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index ce441d29e7f6..f76bb2c3c943 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c | |||
@@ -240,7 +240,7 @@ static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs, | |||
240 | if (fsc_type == ESR_ELx_FSC_PERM) | 240 | if (fsc_type == ESR_ELx_FSC_PERM) |
241 | return true; | 241 | return true; |
242 | 242 | ||
243 | if (addr < USER_DS && system_uses_ttbr0_pan()) | 243 | if (addr < TASK_SIZE && system_uses_ttbr0_pan()) |
244 | return fsc_type == ESR_ELx_FSC_FAULT && | 244 | return fsc_type == ESR_ELx_FSC_FAULT && |
245 | (regs->pstate & PSR_PAN_BIT); | 245 | (regs->pstate & PSR_PAN_BIT); |
246 | 246 | ||
@@ -414,7 +414,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, | |||
414 | mm_flags |= FAULT_FLAG_WRITE; | 414 | mm_flags |= FAULT_FLAG_WRITE; |
415 | } | 415 | } |
416 | 416 | ||
417 | if (addr < USER_DS && is_permission_fault(esr, regs, addr)) { | 417 | if (addr < TASK_SIZE && is_permission_fault(esr, regs, addr)) { |
418 | /* regs->orig_addr_limit may be 0 if we entered from EL0 */ | 418 | /* regs->orig_addr_limit may be 0 if we entered from EL0 */ |
419 | if (regs->orig_addr_limit == KERNEL_DS) | 419 | if (regs->orig_addr_limit == KERNEL_DS) |
420 | die("Accessing user space memory with fs=KERNEL_DS", regs, esr); | 420 | die("Accessing user space memory with fs=KERNEL_DS", regs, esr); |
@@ -707,6 +707,12 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, | |||
707 | arm64_notify_die("", regs, &info, esr); | 707 | arm64_notify_die("", regs, &info, esr); |
708 | } | 708 | } |
709 | 709 | ||
710 | asmlinkage void __exception do_el0_irq_bp_hardening(void) | ||
711 | { | ||
712 | /* PC has already been checked in entry.S */ | ||
713 | arm64_apply_bp_hardening(); | ||
714 | } | ||
715 | |||
710 | asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr, | 716 | asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr, |
711 | unsigned int esr, | 717 | unsigned int esr, |
712 | struct pt_regs *regs) | 718 | struct pt_regs *regs) |
@@ -731,6 +737,12 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr, | |||
731 | struct siginfo info; | 737 | struct siginfo info; |
732 | struct task_struct *tsk = current; | 738 | struct task_struct *tsk = current; |
733 | 739 | ||
740 | if (user_mode(regs)) { | ||
741 | if (instruction_pointer(regs) > TASK_SIZE) | ||
742 | arm64_apply_bp_hardening(); | ||
743 | local_irq_enable(); | ||
744 | } | ||
745 | |||
734 | if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS)) | 746 | if (show_unhandled_signals && unhandled_signal(tsk, SIGBUS)) |
735 | pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n", | 747 | pr_info_ratelimited("%s[%d]: %s exception: pc=%p sp=%p\n", |
736 | tsk->comm, task_pid_nr(tsk), | 748 | tsk->comm, task_pid_nr(tsk), |
@@ -790,6 +802,9 @@ asmlinkage int __exception do_debug_exception(unsigned long addr, | |||
790 | if (interrupts_enabled(regs)) | 802 | if (interrupts_enabled(regs)) |
791 | trace_hardirqs_off(); | 803 | trace_hardirqs_off(); |
792 | 804 | ||
805 | if (user_mode(regs) && instruction_pointer(regs) > TASK_SIZE) | ||
806 | arm64_apply_bp_hardening(); | ||
807 | |||
793 | if (!inf->fn(addr, esr, regs)) { | 808 | if (!inf->fn(addr, esr, regs)) { |
794 | rv = 1; | 809 | rv = 1; |
795 | } else { | 810 | } else { |
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 4e369dfb83b1..4694cda823c9 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
@@ -118,6 +118,10 @@ static bool pgattr_change_is_safe(u64 old, u64 new) | |||
118 | if ((old | new) & PTE_CONT) | 118 | if ((old | new) & PTE_CONT) |
119 | return false; | 119 | return false; |
120 | 120 | ||
121 | /* Transitioning from Global to Non-Global is safe */ | ||
122 | if (((old ^ new) == PTE_NG) && (new & PTE_NG)) | ||
123 | return true; | ||
124 | |||
121 | return ((old ^ new) & ~mask) == 0; | 125 | return ((old ^ new) & ~mask) == 0; |
122 | } | 126 | } |
123 | 127 | ||
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 9f177aac6390..71baed7e592a 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S | |||
@@ -90,7 +90,7 @@ ENDPROC(cpu_do_suspend) | |||
90 | * | 90 | * |
91 | * x0: Address of context pointer | 91 | * x0: Address of context pointer |
92 | */ | 92 | */ |
93 | .pushsection ".idmap.text", "ax" | 93 | .pushsection ".idmap.text", "awx" |
94 | ENTRY(cpu_do_resume) | 94 | ENTRY(cpu_do_resume) |
95 | ldp x2, x3, [x0] | 95 | ldp x2, x3, [x0] |
96 | ldp x4, x5, [x0, #16] | 96 | ldp x4, x5, [x0, #16] |
@@ -153,7 +153,7 @@ ENDPROC(cpu_do_resume) | |||
153 | ENTRY(cpu_do_switch_mm) | 153 | ENTRY(cpu_do_switch_mm) |
154 | mrs x2, ttbr1_el1 | 154 | mrs x2, ttbr1_el1 |
155 | mmid x1, x1 // get mm->context.id | 155 | mmid x1, x1 // get mm->context.id |
156 | phys_to_ttbr x0, x3 | 156 | phys_to_ttbr x3, x0 |
157 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN | 157 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN |
158 | bfi x3, x1, #48, #16 // set the ASID field in TTBR0 | 158 | bfi x3, x1, #48, #16 // set the ASID field in TTBR0 |
159 | #endif | 159 | #endif |
@@ -165,7 +165,18 @@ ENTRY(cpu_do_switch_mm) | |||
165 | b post_ttbr_update_workaround // Back to C code... | 165 | b post_ttbr_update_workaround // Back to C code... |
166 | ENDPROC(cpu_do_switch_mm) | 166 | ENDPROC(cpu_do_switch_mm) |
167 | 167 | ||
168 | .pushsection ".idmap.text", "ax" | 168 | .pushsection ".idmap.text", "awx" |
169 | |||
170 | .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 | ||
171 | adrp \tmp1, empty_zero_page | ||
172 | phys_to_ttbr \tmp2, \tmp1 | ||
173 | msr ttbr1_el1, \tmp2 | ||
174 | isb | ||
175 | tlbi vmalle1 | ||
176 | dsb nsh | ||
177 | isb | ||
178 | .endm | ||
179 | |||
169 | /* | 180 | /* |
170 | * void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd) | 181 | * void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd) |
171 | * | 182 | * |
@@ -175,24 +186,201 @@ ENDPROC(cpu_do_switch_mm) | |||
175 | ENTRY(idmap_cpu_replace_ttbr1) | 186 | ENTRY(idmap_cpu_replace_ttbr1) |
176 | save_and_disable_daif flags=x2 | 187 | save_and_disable_daif flags=x2 |
177 | 188 | ||
178 | adrp x1, empty_zero_page | 189 | __idmap_cpu_set_reserved_ttbr1 x1, x3 |
179 | phys_to_ttbr x1, x3 | 190 | |
191 | phys_to_ttbr x3, x0 | ||
180 | msr ttbr1_el1, x3 | 192 | msr ttbr1_el1, x3 |
181 | isb | 193 | isb |
182 | 194 | ||
183 | tlbi vmalle1 | 195 | restore_daif x2 |
184 | dsb nsh | 196 | |
197 | ret | ||
198 | ENDPROC(idmap_cpu_replace_ttbr1) | ||
199 | .popsection | ||
200 | |||
201 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 | ||
202 | .pushsection ".idmap.text", "awx" | ||
203 | |||
204 | .macro __idmap_kpti_get_pgtable_ent, type | ||
205 | dc cvac, cur_\()\type\()p // Ensure any existing dirty | ||
206 | dmb sy // lines are written back before | ||
207 | ldr \type, [cur_\()\type\()p] // loading the entry | ||
208 | tbz \type, #0, next_\()\type // Skip invalid entries | ||
209 | .endm | ||
210 | |||
211 | .macro __idmap_kpti_put_pgtable_ent_ng, type | ||
212 | orr \type, \type, #PTE_NG // Same bit for blocks and pages | ||
213 | str \type, [cur_\()\type\()p] // Update the entry and ensure it | ||
214 | dc civac, cur_\()\type\()p // is visible to all CPUs. | ||
215 | .endm | ||
216 | |||
217 | /* | ||
218 | * void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper) | ||
219 | * | ||
220 | * Called exactly once from stop_machine context by each CPU found during boot. | ||
221 | */ | ||
222 | __idmap_kpti_flag: | ||
223 | .long 1 | ||
224 | ENTRY(idmap_kpti_install_ng_mappings) | ||
225 | cpu .req w0 | ||
226 | num_cpus .req w1 | ||
227 | swapper_pa .req x2 | ||
228 | swapper_ttb .req x3 | ||
229 | flag_ptr .req x4 | ||
230 | cur_pgdp .req x5 | ||
231 | end_pgdp .req x6 | ||
232 | pgd .req x7 | ||
233 | cur_pudp .req x8 | ||
234 | end_pudp .req x9 | ||
235 | pud .req x10 | ||
236 | cur_pmdp .req x11 | ||
237 | end_pmdp .req x12 | ||
238 | pmd .req x13 | ||
239 | cur_ptep .req x14 | ||
240 | end_ptep .req x15 | ||
241 | pte .req x16 | ||
242 | |||
243 | mrs swapper_ttb, ttbr1_el1 | ||
244 | adr flag_ptr, __idmap_kpti_flag | ||
245 | |||
246 | cbnz cpu, __idmap_kpti_secondary | ||
247 | |||
248 | /* We're the boot CPU. Wait for the others to catch up */ | ||
249 | sevl | ||
250 | 1: wfe | ||
251 | ldaxr w18, [flag_ptr] | ||
252 | eor w18, w18, num_cpus | ||
253 | cbnz w18, 1b | ||
254 | |||
255 | /* We need to walk swapper, so turn off the MMU. */ | ||
256 | pre_disable_mmu_workaround | ||
257 | mrs x18, sctlr_el1 | ||
258 | bic x18, x18, #SCTLR_ELx_M | ||
259 | msr sctlr_el1, x18 | ||
185 | isb | 260 | isb |
186 | 261 | ||
187 | phys_to_ttbr x0, x3 | 262 | /* Everybody is enjoying the idmap, so we can rewrite swapper. */ |
188 | msr ttbr1_el1, x3 | 263 | /* PGD */ |
264 | mov cur_pgdp, swapper_pa | ||
265 | add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8) | ||
266 | do_pgd: __idmap_kpti_get_pgtable_ent pgd | ||
267 | tbnz pgd, #1, walk_puds | ||
268 | __idmap_kpti_put_pgtable_ent_ng pgd | ||
269 | next_pgd: | ||
270 | add cur_pgdp, cur_pgdp, #8 | ||
271 | cmp cur_pgdp, end_pgdp | ||
272 | b.ne do_pgd | ||
273 | |||
274 | /* Publish the updated tables and nuke all the TLBs */ | ||
275 | dsb sy | ||
276 | tlbi vmalle1is | ||
277 | dsb ish | ||
189 | isb | 278 | isb |
190 | 279 | ||
191 | restore_daif x2 | 280 | /* We're done: fire up the MMU again */ |
281 | mrs x18, sctlr_el1 | ||
282 | orr x18, x18, #SCTLR_ELx_M | ||
283 | msr sctlr_el1, x18 | ||
284 | isb | ||
192 | 285 | ||
286 | /* Set the flag to zero to indicate that we're all done */ | ||
287 | str wzr, [flag_ptr] | ||
193 | ret | 288 | ret |
194 | ENDPROC(idmap_cpu_replace_ttbr1) | 289 | |
290 | /* PUD */ | ||
291 | walk_puds: | ||
292 | .if CONFIG_PGTABLE_LEVELS > 3 | ||
293 | pte_to_phys cur_pudp, pgd | ||
294 | add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8) | ||
295 | do_pud: __idmap_kpti_get_pgtable_ent pud | ||
296 | tbnz pud, #1, walk_pmds | ||
297 | __idmap_kpti_put_pgtable_ent_ng pud | ||
298 | next_pud: | ||
299 | add cur_pudp, cur_pudp, 8 | ||
300 | cmp cur_pudp, end_pudp | ||
301 | b.ne do_pud | ||
302 | b next_pgd | ||
303 | .else /* CONFIG_PGTABLE_LEVELS <= 3 */ | ||
304 | mov pud, pgd | ||
305 | b walk_pmds | ||
306 | next_pud: | ||
307 | b next_pgd | ||
308 | .endif | ||
309 | |||
310 | /* PMD */ | ||
311 | walk_pmds: | ||
312 | .if CONFIG_PGTABLE_LEVELS > 2 | ||
313 | pte_to_phys cur_pmdp, pud | ||
314 | add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8) | ||
315 | do_pmd: __idmap_kpti_get_pgtable_ent pmd | ||
316 | tbnz pmd, #1, walk_ptes | ||
317 | __idmap_kpti_put_pgtable_ent_ng pmd | ||
318 | next_pmd: | ||
319 | add cur_pmdp, cur_pmdp, #8 | ||
320 | cmp cur_pmdp, end_pmdp | ||
321 | b.ne do_pmd | ||
322 | b next_pud | ||
323 | .else /* CONFIG_PGTABLE_LEVELS <= 2 */ | ||
324 | mov pmd, pud | ||
325 | b walk_ptes | ||
326 | next_pmd: | ||
327 | b next_pud | ||
328 | .endif | ||
329 | |||
330 | /* PTE */ | ||
331 | walk_ptes: | ||
332 | pte_to_phys cur_ptep, pmd | ||
333 | add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8) | ||
334 | do_pte: __idmap_kpti_get_pgtable_ent pte | ||
335 | __idmap_kpti_put_pgtable_ent_ng pte | ||
336 | next_pte: | ||
337 | add cur_ptep, cur_ptep, #8 | ||
338 | cmp cur_ptep, end_ptep | ||
339 | b.ne do_pte | ||
340 | b next_pmd | ||
341 | |||
342 | /* Secondary CPUs end up here */ | ||
343 | __idmap_kpti_secondary: | ||
344 | /* Uninstall swapper before surgery begins */ | ||
345 | __idmap_cpu_set_reserved_ttbr1 x18, x17 | ||
346 | |||
347 | /* Increment the flag to let the boot CPU we're ready */ | ||
348 | 1: ldxr w18, [flag_ptr] | ||
349 | add w18, w18, #1 | ||
350 | stxr w17, w18, [flag_ptr] | ||
351 | cbnz w17, 1b | ||
352 | |||
353 | /* Wait for the boot CPU to finish messing around with swapper */ | ||
354 | sevl | ||
355 | 1: wfe | ||
356 | ldxr w18, [flag_ptr] | ||
357 | cbnz w18, 1b | ||
358 | |||
359 | /* All done, act like nothing happened */ | ||
360 | msr ttbr1_el1, swapper_ttb | ||
361 | isb | ||
362 | ret | ||
363 | |||
364 | .unreq cpu | ||
365 | .unreq num_cpus | ||
366 | .unreq swapper_pa | ||
367 | .unreq swapper_ttb | ||
368 | .unreq flag_ptr | ||
369 | .unreq cur_pgdp | ||
370 | .unreq end_pgdp | ||
371 | .unreq pgd | ||
372 | .unreq cur_pudp | ||
373 | .unreq end_pudp | ||
374 | .unreq pud | ||
375 | .unreq cur_pmdp | ||
376 | .unreq end_pmdp | ||
377 | .unreq pmd | ||
378 | .unreq cur_ptep | ||
379 | .unreq end_ptep | ||
380 | .unreq pte | ||
381 | ENDPROC(idmap_kpti_install_ng_mappings) | ||
195 | .popsection | 382 | .popsection |
383 | #endif | ||
196 | 384 | ||
197 | /* | 385 | /* |
198 | * __cpu_setup | 386 | * __cpu_setup |
@@ -200,7 +388,7 @@ ENDPROC(idmap_cpu_replace_ttbr1) | |||
200 | * Initialise the processor for turning the MMU on. Return in x0 the | 388 | * Initialise the processor for turning the MMU on. Return in x0 the |
201 | * value of the SCTLR_EL1 register. | 389 | * value of the SCTLR_EL1 register. |
202 | */ | 390 | */ |
203 | .pushsection ".idmap.text", "ax" | 391 | .pushsection ".idmap.text", "awx" |
204 | ENTRY(__cpu_setup) | 392 | ENTRY(__cpu_setup) |
205 | tlbi vmalle1 // Invalidate local TLB | 393 | tlbi vmalle1 // Invalidate local TLB |
206 | dsb nsh | 394 | dsb nsh |
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c index 8b25d31e8401..c80ec1d03274 100644 --- a/drivers/firmware/psci.c +++ b/drivers/firmware/psci.c | |||
@@ -59,7 +59,10 @@ bool psci_tos_resident_on(int cpu) | |||
59 | return cpu == resident_cpu; | 59 | return cpu == resident_cpu; |
60 | } | 60 | } |
61 | 61 | ||
62 | struct psci_operations psci_ops; | 62 | struct psci_operations psci_ops = { |
63 | .conduit = PSCI_CONDUIT_NONE, | ||
64 | .smccc_version = SMCCC_VERSION_1_0, | ||
65 | }; | ||
63 | 66 | ||
64 | typedef unsigned long (psci_fn)(unsigned long, unsigned long, | 67 | typedef unsigned long (psci_fn)(unsigned long, unsigned long, |
65 | unsigned long, unsigned long); | 68 | unsigned long, unsigned long); |
@@ -210,6 +213,22 @@ static unsigned long psci_migrate_info_up_cpu(void) | |||
210 | 0, 0, 0); | 213 | 0, 0, 0); |
211 | } | 214 | } |
212 | 215 | ||
216 | static void set_conduit(enum psci_conduit conduit) | ||
217 | { | ||
218 | switch (conduit) { | ||
219 | case PSCI_CONDUIT_HVC: | ||
220 | invoke_psci_fn = __invoke_psci_fn_hvc; | ||
221 | break; | ||
222 | case PSCI_CONDUIT_SMC: | ||
223 | invoke_psci_fn = __invoke_psci_fn_smc; | ||
224 | break; | ||
225 | default: | ||
226 | WARN(1, "Unexpected PSCI conduit %d\n", conduit); | ||
227 | } | ||
228 | |||
229 | psci_ops.conduit = conduit; | ||
230 | } | ||
231 | |||
213 | static int get_set_conduit_method(struct device_node *np) | 232 | static int get_set_conduit_method(struct device_node *np) |
214 | { | 233 | { |
215 | const char *method; | 234 | const char *method; |
@@ -222,9 +241,9 @@ static int get_set_conduit_method(struct device_node *np) | |||
222 | } | 241 | } |
223 | 242 | ||
224 | if (!strcmp("hvc", method)) { | 243 | if (!strcmp("hvc", method)) { |
225 | invoke_psci_fn = __invoke_psci_fn_hvc; | 244 | set_conduit(PSCI_CONDUIT_HVC); |
226 | } else if (!strcmp("smc", method)) { | 245 | } else if (!strcmp("smc", method)) { |
227 | invoke_psci_fn = __invoke_psci_fn_smc; | 246 | set_conduit(PSCI_CONDUIT_SMC); |
228 | } else { | 247 | } else { |
229 | pr_warn("invalid \"method\" property: %s\n", method); | 248 | pr_warn("invalid \"method\" property: %s\n", method); |
230 | return -EINVAL; | 249 | return -EINVAL; |
@@ -493,6 +512,31 @@ static void __init psci_init_migrate(void) | |||
493 | pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid); | 512 | pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid); |
494 | } | 513 | } |
495 | 514 | ||
515 | static void __init psci_init_smccc(void) | ||
516 | { | ||
517 | u32 ver = ARM_SMCCC_VERSION_1_0; | ||
518 | int feature; | ||
519 | |||
520 | feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID); | ||
521 | |||
522 | if (feature != PSCI_RET_NOT_SUPPORTED) { | ||
523 | u32 ret; | ||
524 | ret = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0); | ||
525 | if (ret == ARM_SMCCC_VERSION_1_1) { | ||
526 | psci_ops.smccc_version = SMCCC_VERSION_1_1; | ||
527 | ver = ret; | ||
528 | } | ||
529 | } | ||
530 | |||
531 | /* | ||
532 | * Conveniently, the SMCCC and PSCI versions are encoded the | ||
533 | * same way. No, this isn't accidental. | ||
534 | */ | ||
535 | pr_info("SMC Calling Convention v%d.%d\n", | ||
536 | PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver)); | ||
537 | |||
538 | } | ||
539 | |||
496 | static void __init psci_0_2_set_functions(void) | 540 | static void __init psci_0_2_set_functions(void) |
497 | { | 541 | { |
498 | pr_info("Using standard PSCI v0.2 function IDs\n"); | 542 | pr_info("Using standard PSCI v0.2 function IDs\n"); |
@@ -541,6 +585,7 @@ static int __init psci_probe(void) | |||
541 | psci_init_migrate(); | 585 | psci_init_migrate(); |
542 | 586 | ||
543 | if (PSCI_VERSION_MAJOR(ver) >= 1) { | 587 | if (PSCI_VERSION_MAJOR(ver) >= 1) { |
588 | psci_init_smccc(); | ||
544 | psci_init_cpu_suspend(); | 589 | psci_init_cpu_suspend(); |
545 | psci_init_system_suspend(); | 590 | psci_init_system_suspend(); |
546 | } | 591 | } |
@@ -654,9 +699,9 @@ int __init psci_acpi_init(void) | |||
654 | pr_info("probing for conduit method from ACPI.\n"); | 699 | pr_info("probing for conduit method from ACPI.\n"); |
655 | 700 | ||
656 | if (acpi_psci_use_hvc()) | 701 | if (acpi_psci_use_hvc()) |
657 | invoke_psci_fn = __invoke_psci_fn_hvc; | 702 | set_conduit(PSCI_CONDUIT_HVC); |
658 | else | 703 | else |
659 | invoke_psci_fn = __invoke_psci_fn_smc; | 704 | set_conduit(PSCI_CONDUIT_SMC); |
660 | 705 | ||
661 | return psci_probe(); | 706 | return psci_probe(); |
662 | } | 707 | } |
diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h new file mode 100644 index 000000000000..e518e4e3dfb5 --- /dev/null +++ b/include/kvm/arm_psci.h | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012,2013 - ARM Ltd | ||
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #ifndef __KVM_ARM_PSCI_H__ | ||
19 | #define __KVM_ARM_PSCI_H__ | ||
20 | |||
21 | #include <linux/kvm_host.h> | ||
22 | #include <uapi/linux/psci.h> | ||
23 | |||
24 | #define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1) | ||
25 | #define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2) | ||
26 | #define KVM_ARM_PSCI_1_0 PSCI_VERSION(1, 0) | ||
27 | |||
28 | #define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0 | ||
29 | |||
30 | /* | ||
31 | * We need the KVM pointer independently from the vcpu as we can call | ||
32 | * this from HYP, and need to apply kern_hyp_va on it... | ||
33 | */ | ||
34 | static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm) | ||
35 | { | ||
36 | /* | ||
37 | * Our PSCI implementation stays the same across versions from | ||
38 | * v0.2 onward, only adding the few mandatory functions (such | ||
39 | * as FEATURES with 1.0) that are required by newer | ||
40 | * revisions. It is thus safe to return the latest. | ||
41 | */ | ||
42 | if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) | ||
43 | return KVM_ARM_PSCI_LATEST; | ||
44 | |||
45 | return KVM_ARM_PSCI_0_1; | ||
46 | } | ||
47 | |||
48 | |||
49 | int kvm_hvc_call_handler(struct kvm_vcpu *vcpu); | ||
50 | |||
51 | #endif /* __KVM_ARM_PSCI_H__ */ | ||
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 4c5bca38c653..a031897fca76 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h | |||
@@ -14,14 +14,16 @@ | |||
14 | #ifndef __LINUX_ARM_SMCCC_H | 14 | #ifndef __LINUX_ARM_SMCCC_H |
15 | #define __LINUX_ARM_SMCCC_H | 15 | #define __LINUX_ARM_SMCCC_H |
16 | 16 | ||
17 | #include <uapi/linux/const.h> | ||
18 | |||
17 | /* | 19 | /* |
18 | * This file provides common defines for ARM SMC Calling Convention as | 20 | * This file provides common defines for ARM SMC Calling Convention as |
19 | * specified in | 21 | * specified in |
20 | * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html | 22 | * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html |
21 | */ | 23 | */ |
22 | 24 | ||
23 | #define ARM_SMCCC_STD_CALL 0 | 25 | #define ARM_SMCCC_STD_CALL _AC(0,U) |
24 | #define ARM_SMCCC_FAST_CALL 1 | 26 | #define ARM_SMCCC_FAST_CALL _AC(1,U) |
25 | #define ARM_SMCCC_TYPE_SHIFT 31 | 27 | #define ARM_SMCCC_TYPE_SHIFT 31 |
26 | 28 | ||
27 | #define ARM_SMCCC_SMC_32 0 | 29 | #define ARM_SMCCC_SMC_32 0 |
@@ -60,6 +62,24 @@ | |||
60 | #define ARM_SMCCC_QUIRK_NONE 0 | 62 | #define ARM_SMCCC_QUIRK_NONE 0 |
61 | #define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */ | 63 | #define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */ |
62 | 64 | ||
65 | #define ARM_SMCCC_VERSION_1_0 0x10000 | ||
66 | #define ARM_SMCCC_VERSION_1_1 0x10001 | ||
67 | |||
68 | #define ARM_SMCCC_VERSION_FUNC_ID \ | ||
69 | ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ | ||
70 | ARM_SMCCC_SMC_32, \ | ||
71 | 0, 0) | ||
72 | |||
73 | #define ARM_SMCCC_ARCH_FEATURES_FUNC_ID \ | ||
74 | ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ | ||
75 | ARM_SMCCC_SMC_32, \ | ||
76 | 0, 1) | ||
77 | |||
78 | #define ARM_SMCCC_ARCH_WORKAROUND_1 \ | ||
79 | ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ | ||
80 | ARM_SMCCC_SMC_32, \ | ||
81 | 0, 0x8000) | ||
82 | |||
63 | #ifndef __ASSEMBLY__ | 83 | #ifndef __ASSEMBLY__ |
64 | 84 | ||
65 | #include <linux/linkage.h> | 85 | #include <linux/linkage.h> |
@@ -130,5 +150,146 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, | |||
130 | 150 | ||
131 | #define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__) | 151 | #define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__) |
132 | 152 | ||
153 | /* SMCCC v1.1 implementation madness follows */ | ||
154 | #ifdef CONFIG_ARM64 | ||
155 | |||
156 | #define SMCCC_SMC_INST "smc #0" | ||
157 | #define SMCCC_HVC_INST "hvc #0" | ||
158 | |||
159 | #elif defined(CONFIG_ARM) | ||
160 | #include <asm/opcodes-sec.h> | ||
161 | #include <asm/opcodes-virt.h> | ||
162 | |||
163 | #define SMCCC_SMC_INST __SMC(0) | ||
164 | #define SMCCC_HVC_INST __HVC(0) | ||
165 | |||
166 | #endif | ||
167 | |||
168 | #define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x | ||
169 | |||
170 | #define __count_args(...) \ | ||
171 | ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0) | ||
172 | |||
173 | #define __constraint_write_0 \ | ||
174 | "+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3) | ||
175 | #define __constraint_write_1 \ | ||
176 | "+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3) | ||
177 | #define __constraint_write_2 \ | ||
178 | "+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3) | ||
179 | #define __constraint_write_3 \ | ||
180 | "+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3) | ||
181 | #define __constraint_write_4 __constraint_write_3 | ||
182 | #define __constraint_write_5 __constraint_write_4 | ||
183 | #define __constraint_write_6 __constraint_write_5 | ||
184 | #define __constraint_write_7 __constraint_write_6 | ||
185 | |||
186 | #define __constraint_read_0 | ||
187 | #define __constraint_read_1 | ||
188 | #define __constraint_read_2 | ||
189 | #define __constraint_read_3 | ||
190 | #define __constraint_read_4 "r" (r4) | ||
191 | #define __constraint_read_5 __constraint_read_4, "r" (r5) | ||
192 | #define __constraint_read_6 __constraint_read_5, "r" (r6) | ||
193 | #define __constraint_read_7 __constraint_read_6, "r" (r7) | ||
194 | |||
195 | #define __declare_arg_0(a0, res) \ | ||
196 | struct arm_smccc_res *___res = res; \ | ||
197 | register u32 r0 asm("r0") = a0; \ | ||
198 | register unsigned long r1 asm("r1"); \ | ||
199 | register unsigned long r2 asm("r2"); \ | ||
200 | register unsigned long r3 asm("r3") | ||
201 | |||
202 | #define __declare_arg_1(a0, a1, res) \ | ||
203 | struct arm_smccc_res *___res = res; \ | ||
204 | register u32 r0 asm("r0") = a0; \ | ||
205 | register typeof(a1) r1 asm("r1") = a1; \ | ||
206 | register unsigned long r2 asm("r2"); \ | ||
207 | register unsigned long r3 asm("r3") | ||
208 | |||
209 | #define __declare_arg_2(a0, a1, a2, res) \ | ||
210 | struct arm_smccc_res *___res = res; \ | ||
211 | register u32 r0 asm("r0") = a0; \ | ||
212 | register typeof(a1) r1 asm("r1") = a1; \ | ||
213 | register typeof(a2) r2 asm("r2") = a2; \ | ||
214 | register unsigned long r3 asm("r3") | ||
215 | |||
216 | #define __declare_arg_3(a0, a1, a2, a3, res) \ | ||
217 | struct arm_smccc_res *___res = res; \ | ||
218 | register u32 r0 asm("r0") = a0; \ | ||
219 | register typeof(a1) r1 asm("r1") = a1; \ | ||
220 | register typeof(a2) r2 asm("r2") = a2; \ | ||
221 | register typeof(a3) r3 asm("r3") = a3 | ||
222 | |||
223 | #define __declare_arg_4(a0, a1, a2, a3, a4, res) \ | ||
224 | __declare_arg_3(a0, a1, a2, a3, res); \ | ||
225 | register typeof(a4) r4 asm("r4") = a4 | ||
226 | |||
227 | #define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \ | ||
228 | __declare_arg_4(a0, a1, a2, a3, a4, res); \ | ||
229 | register typeof(a5) r5 asm("r5") = a5 | ||
230 | |||
231 | #define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \ | ||
232 | __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \ | ||
233 | register typeof(a6) r6 asm("r6") = a6 | ||
234 | |||
235 | #define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \ | ||
236 | __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \ | ||
237 | register typeof(a7) r7 asm("r7") = a7 | ||
238 | |||
239 | #define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__) | ||
240 | #define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__) | ||
241 | |||
242 | #define ___constraints(count) \ | ||
243 | : __constraint_write_ ## count \ | ||
244 | : __constraint_read_ ## count \ | ||
245 | : "memory" | ||
246 | #define __constraints(count) ___constraints(count) | ||
247 | |||
248 | /* | ||
249 | * We have an output list that is not necessarily used, and GCC feels | ||
250 | * entitled to optimise the whole sequence away. "volatile" is what | ||
251 | * makes it stick. | ||
252 | */ | ||
253 | #define __arm_smccc_1_1(inst, ...) \ | ||
254 | do { \ | ||
255 | __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \ | ||
256 | asm volatile(inst "\n" \ | ||
257 | __constraints(__count_args(__VA_ARGS__))); \ | ||
258 | if (___res) \ | ||
259 | *___res = (typeof(*___res)){r0, r1, r2, r3}; \ | ||
260 | } while (0) | ||
261 | |||
262 | /* | ||
263 | * arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call | ||
264 | * | ||
265 | * This is a variadic macro taking one to eight source arguments, and | ||
266 | * an optional return structure. | ||
267 | * | ||
268 | * @a0-a7: arguments passed in registers 0 to 7 | ||
269 | * @res: result values from registers 0 to 3 | ||
270 | * | ||
271 | * This macro is used to make SMC calls following SMC Calling Convention v1.1. | ||
272 | * The content of the supplied param are copied to registers 0 to 7 prior | ||
273 | * to the SMC instruction. The return values are updated with the content | ||
274 | * from register 0 to 3 on return from the SMC instruction if not NULL. | ||
275 | */ | ||
276 | #define arm_smccc_1_1_smc(...) __arm_smccc_1_1(SMCCC_SMC_INST, __VA_ARGS__) | ||
277 | |||
278 | /* | ||
279 | * arm_smccc_1_1_hvc() - make an SMCCC v1.1 compliant HVC call | ||
280 | * | ||
281 | * This is a variadic macro taking one to eight source arguments, and | ||
282 | * an optional return structure. | ||
283 | * | ||
284 | * @a0-a7: arguments passed in registers 0 to 7 | ||
285 | * @res: result values from registers 0 to 3 | ||
286 | * | ||
287 | * This macro is used to make HVC calls following SMC Calling Convention v1.1. | ||
288 | * The content of the supplied param are copied to registers 0 to 7 prior | ||
289 | * to the HVC instruction. The return values are updated with the content | ||
290 | * from register 0 to 3 on return from the HVC instruction if not NULL. | ||
291 | */ | ||
292 | #define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__) | ||
293 | |||
133 | #endif /*__ASSEMBLY__*/ | 294 | #endif /*__ASSEMBLY__*/ |
134 | #endif /*__LINUX_ARM_SMCCC_H*/ | 295 | #endif /*__LINUX_ARM_SMCCC_H*/ |
diff --git a/include/linux/psci.h b/include/linux/psci.h index f724fd8c78e8..8b1b3b5935ab 100644 --- a/include/linux/psci.h +++ b/include/linux/psci.h | |||
@@ -25,6 +25,17 @@ bool psci_tos_resident_on(int cpu); | |||
25 | int psci_cpu_init_idle(unsigned int cpu); | 25 | int psci_cpu_init_idle(unsigned int cpu); |
26 | int psci_cpu_suspend_enter(unsigned long index); | 26 | int psci_cpu_suspend_enter(unsigned long index); |
27 | 27 | ||
28 | enum psci_conduit { | ||
29 | PSCI_CONDUIT_NONE, | ||
30 | PSCI_CONDUIT_SMC, | ||
31 | PSCI_CONDUIT_HVC, | ||
32 | }; | ||
33 | |||
34 | enum smccc_version { | ||
35 | SMCCC_VERSION_1_0, | ||
36 | SMCCC_VERSION_1_1, | ||
37 | }; | ||
38 | |||
28 | struct psci_operations { | 39 | struct psci_operations { |
29 | u32 (*get_version)(void); | 40 | u32 (*get_version)(void); |
30 | int (*cpu_suspend)(u32 state, unsigned long entry_point); | 41 | int (*cpu_suspend)(u32 state, unsigned long entry_point); |
@@ -34,6 +45,8 @@ struct psci_operations { | |||
34 | int (*affinity_info)(unsigned long target_affinity, | 45 | int (*affinity_info)(unsigned long target_affinity, |
35 | unsigned long lowest_affinity_level); | 46 | unsigned long lowest_affinity_level); |
36 | int (*migrate_info_type)(void); | 47 | int (*migrate_info_type)(void); |
48 | enum psci_conduit conduit; | ||
49 | enum smccc_version smccc_version; | ||
37 | }; | 50 | }; |
38 | 51 | ||
39 | extern struct psci_operations psci_ops; | 52 | extern struct psci_operations psci_ops; |
diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h index 760e52a9640f..b3bcabe380da 100644 --- a/include/uapi/linux/psci.h +++ b/include/uapi/linux/psci.h | |||
@@ -88,6 +88,9 @@ | |||
88 | (((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT) | 88 | (((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT) |
89 | #define PSCI_VERSION_MINOR(ver) \ | 89 | #define PSCI_VERSION_MINOR(ver) \ |
90 | ((ver) & PSCI_VERSION_MINOR_MASK) | 90 | ((ver) & PSCI_VERSION_MINOR_MASK) |
91 | #define PSCI_VERSION(maj, min) \ | ||
92 | ((((maj) << PSCI_VERSION_MAJOR_SHIFT) & PSCI_VERSION_MAJOR_MASK) | \ | ||
93 | ((min) & PSCI_VERSION_MINOR_MASK)) | ||
91 | 94 | ||
92 | /* PSCI features decoding (>=1.0) */ | 95 | /* PSCI features decoding (>=1.0) */ |
93 | #define PSCI_1_0_FEATURES_CPU_SUSPEND_PF_SHIFT 1 | 96 | #define PSCI_1_0_FEATURES_CPU_SUSPEND_PF_SHIFT 1 |
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 08464b2fba1d..7e3941f2ecde 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/irqbypass.h> | 31 | #include <linux/irqbypass.h> |
32 | #include <trace/events/kvm.h> | 32 | #include <trace/events/kvm.h> |
33 | #include <kvm/arm_pmu.h> | 33 | #include <kvm/arm_pmu.h> |
34 | #include <kvm/arm_psci.h> | ||
34 | 35 | ||
35 | #define CREATE_TRACE_POINTS | 36 | #define CREATE_TRACE_POINTS |
36 | #include "trace.h" | 37 | #include "trace.h" |
@@ -46,7 +47,6 @@ | |||
46 | #include <asm/kvm_mmu.h> | 47 | #include <asm/kvm_mmu.h> |
47 | #include <asm/kvm_emulate.h> | 48 | #include <asm/kvm_emulate.h> |
48 | #include <asm/kvm_coproc.h> | 49 | #include <asm/kvm_coproc.h> |
49 | #include <asm/kvm_psci.h> | ||
50 | #include <asm/sections.h> | 50 | #include <asm/sections.h> |
51 | 51 | ||
52 | #ifdef REQUIRES_VIRT | 52 | #ifdef REQUIRES_VIRT |
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c index f1e363bab5e8..6919352cbf15 100644 --- a/virt/kvm/arm/psci.c +++ b/virt/kvm/arm/psci.c | |||
@@ -15,16 +15,16 @@ | |||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <linux/arm-smccc.h> | ||
18 | #include <linux/preempt.h> | 19 | #include <linux/preempt.h> |
19 | #include <linux/kvm_host.h> | 20 | #include <linux/kvm_host.h> |
20 | #include <linux/wait.h> | 21 | #include <linux/wait.h> |
21 | 22 | ||
22 | #include <asm/cputype.h> | 23 | #include <asm/cputype.h> |
23 | #include <asm/kvm_emulate.h> | 24 | #include <asm/kvm_emulate.h> |
24 | #include <asm/kvm_psci.h> | ||
25 | #include <asm/kvm_host.h> | 25 | #include <asm/kvm_host.h> |
26 | 26 | ||
27 | #include <uapi/linux/psci.h> | 27 | #include <kvm/arm_psci.h> |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * This is an implementation of the Power State Coordination Interface | 30 | * This is an implementation of the Power State Coordination Interface |
@@ -33,6 +33,38 @@ | |||
33 | 33 | ||
34 | #define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1) | 34 | #define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1) |
35 | 35 | ||
36 | static u32 smccc_get_function(struct kvm_vcpu *vcpu) | ||
37 | { | ||
38 | return vcpu_get_reg(vcpu, 0); | ||
39 | } | ||
40 | |||
41 | static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu) | ||
42 | { | ||
43 | return vcpu_get_reg(vcpu, 1); | ||
44 | } | ||
45 | |||
46 | static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu) | ||
47 | { | ||
48 | return vcpu_get_reg(vcpu, 2); | ||
49 | } | ||
50 | |||
51 | static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu) | ||
52 | { | ||
53 | return vcpu_get_reg(vcpu, 3); | ||
54 | } | ||
55 | |||
56 | static void smccc_set_retval(struct kvm_vcpu *vcpu, | ||
57 | unsigned long a0, | ||
58 | unsigned long a1, | ||
59 | unsigned long a2, | ||
60 | unsigned long a3) | ||
61 | { | ||
62 | vcpu_set_reg(vcpu, 0, a0); | ||
63 | vcpu_set_reg(vcpu, 1, a1); | ||
64 | vcpu_set_reg(vcpu, 2, a2); | ||
65 | vcpu_set_reg(vcpu, 3, a3); | ||
66 | } | ||
67 | |||
36 | static unsigned long psci_affinity_mask(unsigned long affinity_level) | 68 | static unsigned long psci_affinity_mask(unsigned long affinity_level) |
37 | { | 69 | { |
38 | if (affinity_level <= 3) | 70 | if (affinity_level <= 3) |
@@ -78,7 +110,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) | |||
78 | unsigned long context_id; | 110 | unsigned long context_id; |
79 | phys_addr_t target_pc; | 111 | phys_addr_t target_pc; |
80 | 112 | ||
81 | cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK; | 113 | cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK; |
82 | if (vcpu_mode_is_32bit(source_vcpu)) | 114 | if (vcpu_mode_is_32bit(source_vcpu)) |
83 | cpu_id &= ~((u32) 0); | 115 | cpu_id &= ~((u32) 0); |
84 | 116 | ||
@@ -91,14 +123,14 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) | |||
91 | if (!vcpu) | 123 | if (!vcpu) |
92 | return PSCI_RET_INVALID_PARAMS; | 124 | return PSCI_RET_INVALID_PARAMS; |
93 | if (!vcpu->arch.power_off) { | 125 | if (!vcpu->arch.power_off) { |
94 | if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1) | 126 | if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1) |
95 | return PSCI_RET_ALREADY_ON; | 127 | return PSCI_RET_ALREADY_ON; |
96 | else | 128 | else |
97 | return PSCI_RET_INVALID_PARAMS; | 129 | return PSCI_RET_INVALID_PARAMS; |
98 | } | 130 | } |
99 | 131 | ||
100 | target_pc = vcpu_get_reg(source_vcpu, 2); | 132 | target_pc = smccc_get_arg2(source_vcpu); |
101 | context_id = vcpu_get_reg(source_vcpu, 3); | 133 | context_id = smccc_get_arg3(source_vcpu); |
102 | 134 | ||
103 | kvm_reset_vcpu(vcpu); | 135 | kvm_reset_vcpu(vcpu); |
104 | 136 | ||
@@ -117,7 +149,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) | |||
117 | * NOTE: We always update r0 (or x0) because for PSCI v0.1 | 149 | * NOTE: We always update r0 (or x0) because for PSCI v0.1 |
118 | * the general puspose registers are undefined upon CPU_ON. | 150 | * the general puspose registers are undefined upon CPU_ON. |
119 | */ | 151 | */ |
120 | vcpu_set_reg(vcpu, 0, context_id); | 152 | smccc_set_retval(vcpu, context_id, 0, 0, 0); |
121 | vcpu->arch.power_off = false; | 153 | vcpu->arch.power_off = false; |
122 | smp_mb(); /* Make sure the above is visible */ | 154 | smp_mb(); /* Make sure the above is visible */ |
123 | 155 | ||
@@ -137,8 +169,8 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) | |||
137 | struct kvm *kvm = vcpu->kvm; | 169 | struct kvm *kvm = vcpu->kvm; |
138 | struct kvm_vcpu *tmp; | 170 | struct kvm_vcpu *tmp; |
139 | 171 | ||
140 | target_affinity = vcpu_get_reg(vcpu, 1); | 172 | target_affinity = smccc_get_arg1(vcpu); |
141 | lowest_affinity_level = vcpu_get_reg(vcpu, 2); | 173 | lowest_affinity_level = smccc_get_arg2(vcpu); |
142 | 174 | ||
143 | /* Determine target affinity mask */ | 175 | /* Determine target affinity mask */ |
144 | target_affinity_mask = psci_affinity_mask(lowest_affinity_level); | 176 | target_affinity_mask = psci_affinity_mask(lowest_affinity_level); |
@@ -200,18 +232,10 @@ static void kvm_psci_system_reset(struct kvm_vcpu *vcpu) | |||
200 | kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET); | 232 | kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET); |
201 | } | 233 | } |
202 | 234 | ||
203 | int kvm_psci_version(struct kvm_vcpu *vcpu) | ||
204 | { | ||
205 | if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) | ||
206 | return KVM_ARM_PSCI_0_2; | ||
207 | |||
208 | return KVM_ARM_PSCI_0_1; | ||
209 | } | ||
210 | |||
211 | static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) | 235 | static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) |
212 | { | 236 | { |
213 | struct kvm *kvm = vcpu->kvm; | 237 | struct kvm *kvm = vcpu->kvm; |
214 | unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); | 238 | u32 psci_fn = smccc_get_function(vcpu); |
215 | unsigned long val; | 239 | unsigned long val; |
216 | int ret = 1; | 240 | int ret = 1; |
217 | 241 | ||
@@ -221,7 +245,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) | |||
221 | * Bits[31:16] = Major Version = 0 | 245 | * Bits[31:16] = Major Version = 0 |
222 | * Bits[15:0] = Minor Version = 2 | 246 | * Bits[15:0] = Minor Version = 2 |
223 | */ | 247 | */ |
224 | val = 2; | 248 | val = KVM_ARM_PSCI_0_2; |
225 | break; | 249 | break; |
226 | case PSCI_0_2_FN_CPU_SUSPEND: | 250 | case PSCI_0_2_FN_CPU_SUSPEND: |
227 | case PSCI_0_2_FN64_CPU_SUSPEND: | 251 | case PSCI_0_2_FN64_CPU_SUSPEND: |
@@ -278,14 +302,56 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) | |||
278 | break; | 302 | break; |
279 | } | 303 | } |
280 | 304 | ||
281 | vcpu_set_reg(vcpu, 0, val); | 305 | smccc_set_retval(vcpu, val, 0, 0, 0); |
306 | return ret; | ||
307 | } | ||
308 | |||
309 | static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu) | ||
310 | { | ||
311 | u32 psci_fn = smccc_get_function(vcpu); | ||
312 | u32 feature; | ||
313 | unsigned long val; | ||
314 | int ret = 1; | ||
315 | |||
316 | switch(psci_fn) { | ||
317 | case PSCI_0_2_FN_PSCI_VERSION: | ||
318 | val = KVM_ARM_PSCI_1_0; | ||
319 | break; | ||
320 | case PSCI_1_0_FN_PSCI_FEATURES: | ||
321 | feature = smccc_get_arg1(vcpu); | ||
322 | switch(feature) { | ||
323 | case PSCI_0_2_FN_PSCI_VERSION: | ||
324 | case PSCI_0_2_FN_CPU_SUSPEND: | ||
325 | case PSCI_0_2_FN64_CPU_SUSPEND: | ||
326 | case PSCI_0_2_FN_CPU_OFF: | ||
327 | case PSCI_0_2_FN_CPU_ON: | ||
328 | case PSCI_0_2_FN64_CPU_ON: | ||
329 | case PSCI_0_2_FN_AFFINITY_INFO: | ||
330 | case PSCI_0_2_FN64_AFFINITY_INFO: | ||
331 | case PSCI_0_2_FN_MIGRATE_INFO_TYPE: | ||
332 | case PSCI_0_2_FN_SYSTEM_OFF: | ||
333 | case PSCI_0_2_FN_SYSTEM_RESET: | ||
334 | case PSCI_1_0_FN_PSCI_FEATURES: | ||
335 | case ARM_SMCCC_VERSION_FUNC_ID: | ||
336 | val = 0; | ||
337 | break; | ||
338 | default: | ||
339 | val = PSCI_RET_NOT_SUPPORTED; | ||
340 | break; | ||
341 | } | ||
342 | break; | ||
343 | default: | ||
344 | return kvm_psci_0_2_call(vcpu); | ||
345 | } | ||
346 | |||
347 | smccc_set_retval(vcpu, val, 0, 0, 0); | ||
282 | return ret; | 348 | return ret; |
283 | } | 349 | } |
284 | 350 | ||
285 | static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) | 351 | static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) |
286 | { | 352 | { |
287 | struct kvm *kvm = vcpu->kvm; | 353 | struct kvm *kvm = vcpu->kvm; |
288 | unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); | 354 | u32 psci_fn = smccc_get_function(vcpu); |
289 | unsigned long val; | 355 | unsigned long val; |
290 | 356 | ||
291 | switch (psci_fn) { | 357 | switch (psci_fn) { |
@@ -303,7 +369,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) | |||
303 | break; | 369 | break; |
304 | } | 370 | } |
305 | 371 | ||
306 | vcpu_set_reg(vcpu, 0, val); | 372 | smccc_set_retval(vcpu, val, 0, 0, 0); |
307 | return 1; | 373 | return 1; |
308 | } | 374 | } |
309 | 375 | ||
@@ -321,9 +387,11 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) | |||
321 | * Errors: | 387 | * Errors: |
322 | * -EINVAL: Unrecognized PSCI function | 388 | * -EINVAL: Unrecognized PSCI function |
323 | */ | 389 | */ |
324 | int kvm_psci_call(struct kvm_vcpu *vcpu) | 390 | static int kvm_psci_call(struct kvm_vcpu *vcpu) |
325 | { | 391 | { |
326 | switch (kvm_psci_version(vcpu)) { | 392 | switch (kvm_psci_version(vcpu, vcpu->kvm)) { |
393 | case KVM_ARM_PSCI_1_0: | ||
394 | return kvm_psci_1_0_call(vcpu); | ||
327 | case KVM_ARM_PSCI_0_2: | 395 | case KVM_ARM_PSCI_0_2: |
328 | return kvm_psci_0_2_call(vcpu); | 396 | return kvm_psci_0_2_call(vcpu); |
329 | case KVM_ARM_PSCI_0_1: | 397 | case KVM_ARM_PSCI_0_1: |
@@ -332,3 +400,30 @@ int kvm_psci_call(struct kvm_vcpu *vcpu) | |||
332 | return -EINVAL; | 400 | return -EINVAL; |
333 | }; | 401 | }; |
334 | } | 402 | } |
403 | |||
404 | int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) | ||
405 | { | ||
406 | u32 func_id = smccc_get_function(vcpu); | ||
407 | u32 val = PSCI_RET_NOT_SUPPORTED; | ||
408 | u32 feature; | ||
409 | |||
410 | switch (func_id) { | ||
411 | case ARM_SMCCC_VERSION_FUNC_ID: | ||
412 | val = ARM_SMCCC_VERSION_1_1; | ||
413 | break; | ||
414 | case ARM_SMCCC_ARCH_FEATURES_FUNC_ID: | ||
415 | feature = smccc_get_arg1(vcpu); | ||
416 | switch(feature) { | ||
417 | case ARM_SMCCC_ARCH_WORKAROUND_1: | ||
418 | if (kvm_arm_harden_branch_predictor()) | ||
419 | val = 0; | ||
420 | break; | ||
421 | } | ||
422 | break; | ||
423 | default: | ||
424 | return kvm_psci_call(vcpu); | ||
425 | } | ||
426 | |||
427 | smccc_set_retval(vcpu, val, 0, 0, 0); | ||
428 | return 1; | ||
429 | } | ||