aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-04-13 14:24:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-13 14:24:18 -0400
commite4e57f20fa12ce044fa2b9fec204098799485539 (patch)
tree4271c5e7fb20dfee874c81d35e8b47cd3125a36e
parent6c21e4334adaf1ea0f74349be01adddf40e36a27 (diff)
parent24534b3511828c66215fdf1533d77a7bf2e1fdb2 (diff)
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull more arm64 updates from Will Deacon: "A few late updates to address some issues arising from conflicts with other trees: - Removal of Qualcomm-specific Spectre-v2 mitigation in favour of the generic SMCCC-based firmware call - Fix EL2 hardening capability checking, which was bodged to reduce conflicts with the KVM tree - Add some currently unused assembler macros for managing SIMD registers which will be used by some crypto code in the next merge window" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: assembler: add macros to conditionally yield the NEON under PREEMPT arm64: assembler: add utility macros to push/pop stack frames arm64: Move the content of bpi.S to hyp-entry.S arm64: Get rid of __smccc_workaround_1_hvc_* arm64: capabilities: Rework EL2 vector hardening entry arm64: KVM: Use SMCCC_ARCH_WORKAROUND_1 for Falkor BP hardening
-rw-r--r--arch/arm64/include/asm/assembler.h136
-rw-r--r--arch/arm64/include/asm/cpucaps.h13
-rw-r--r--arch/arm64/include/asm/kvm_asm.h2
-rw-r--r--arch/arm64/kernel/Makefile2
-rw-r--r--arch/arm64/kernel/asm-offsets.c3
-rw-r--r--arch/arm64/kernel/bpi.S102
-rw-r--r--arch/arm64/kernel/cpu_errata.c97
-rw-r--r--arch/arm64/kvm/hyp/entry.S12
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S64
-rw-r--r--arch/arm64/kvm/hyp/switch.c10
10 files changed, 242 insertions, 199 deletions
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 053d83e8db6f..0bcc98dbba56 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -565,4 +565,140 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
565#endif 565#endif
566 .endm 566 .endm
567 567
568 /*
569 * frame_push - Push @regcount callee saved registers to the stack,
570 * starting at x19, as well as x29/x30, and set x29 to
571 * the new value of sp. Add @extra bytes of stack space
572 * for locals.
573 */
574 .macro frame_push, regcount:req, extra
575 __frame st, \regcount, \extra
576 .endm
577
578 /*
579 * frame_pop - Pop the callee saved registers from the stack that were
580 * pushed in the most recent call to frame_push, as well
581 * as x29/x30 and any extra stack space that may have been
582 * allocated.
583 */
584 .macro frame_pop
585 __frame ld
586 .endm
587
588 .macro __frame_regs, reg1, reg2, op, num
589 .if .Lframe_regcount == \num
590 \op\()r \reg1, [sp, #(\num + 1) * 8]
591 .elseif .Lframe_regcount > \num
592 \op\()p \reg1, \reg2, [sp, #(\num + 1) * 8]
593 .endif
594 .endm
595
596 .macro __frame, op, regcount, extra=0
597 .ifc \op, st
598 .if (\regcount) < 0 || (\regcount) > 10
599 .error "regcount should be in the range [0 ... 10]"
600 .endif
601 .if ((\extra) % 16) != 0
602 .error "extra should be a multiple of 16 bytes"
603 .endif
604 .ifdef .Lframe_regcount
605 .if .Lframe_regcount != -1
606 .error "frame_push/frame_pop may not be nested"
607 .endif
608 .endif
609 .set .Lframe_regcount, \regcount
610 .set .Lframe_extra, \extra
611 .set .Lframe_local_offset, ((\regcount + 3) / 2) * 16
612 stp x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]!
613 mov x29, sp
614 .endif
615
616 __frame_regs x19, x20, \op, 1
617 __frame_regs x21, x22, \op, 3
618 __frame_regs x23, x24, \op, 5
619 __frame_regs x25, x26, \op, 7
620 __frame_regs x27, x28, \op, 9
621
622 .ifc \op, ld
623 .if .Lframe_regcount == -1
624 .error "frame_push/frame_pop may not be nested"
625 .endif
626 ldp x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra
627 .set .Lframe_regcount, -1
628 .endif
629 .endm
630
631/*
632 * Check whether to yield to another runnable task from kernel mode NEON code
633 * (which runs with preemption disabled).
634 *
635 * if_will_cond_yield_neon
636 * // pre-yield patchup code
637 * do_cond_yield_neon
638 * // post-yield patchup code
639 * endif_yield_neon <label>
640 *
641 * where <label> is optional, and marks the point where execution will resume
642 * after a yield has been performed. If omitted, execution resumes right after
643 * the endif_yield_neon invocation. Note that the entire sequence, including
644 * the provided patchup code, will be omitted from the image if CONFIG_PREEMPT
645 * is not defined.
646 *
647 * As a convenience, in the case where no patchup code is required, the above
648 * sequence may be abbreviated to
649 *
650 * cond_yield_neon <label>
651 *
652 * Note that the patchup code does not support assembler directives that change
653 * the output section, any use of such directives is undefined.
654 *
655 * The yield itself consists of the following:
656 * - Check whether the preempt count is exactly 1, in which case disabling
657 * preemption once will make the task preemptible. If this is not the case,
658 * yielding is pointless.
659 * - Check whether TIF_NEED_RESCHED is set, and if so, disable and re-enable
660 * kernel mode NEON (which will trigger a reschedule), and branch to the
661 * yield fixup code.
662 *
663 * This macro sequence may clobber all CPU state that is not guaranteed by the
664 * AAPCS to be preserved across an ordinary function call.
665 */
666
667 .macro cond_yield_neon, lbl
668 if_will_cond_yield_neon
669 do_cond_yield_neon
670 endif_yield_neon \lbl
671 .endm
672
673 .macro if_will_cond_yield_neon
674#ifdef CONFIG_PREEMPT
675 get_thread_info x0
676 ldr w1, [x0, #TSK_TI_PREEMPT]
677 ldr x0, [x0, #TSK_TI_FLAGS]
678 cmp w1, #PREEMPT_DISABLE_OFFSET
679 csel x0, x0, xzr, eq
680 tbnz x0, #TIF_NEED_RESCHED, .Lyield_\@ // needs rescheduling?
681 /* fall through to endif_yield_neon */
682 .subsection 1
683.Lyield_\@ :
684#else
685 .section ".discard.cond_yield_neon", "ax"
686#endif
687 .endm
688
689 .macro do_cond_yield_neon
690 bl kernel_neon_end
691 bl kernel_neon_begin
692 .endm
693
694 .macro endif_yield_neon, lbl
695 .ifnb \lbl
696 b \lbl
697 .else
698 b .Lyield_out_\@
699 .endif
700 .previous
701.Lyield_out_\@ :
702 .endm
703
568#endif /* __ASM_ASSEMBLER_H */ 704#endif /* __ASM_ASSEMBLER_H */
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index a311880feb0f..bc51b72fafd4 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -43,13 +43,12 @@
43#define ARM64_SVE 22 43#define ARM64_SVE 22
44#define ARM64_UNMAP_KERNEL_AT_EL0 23 44#define ARM64_UNMAP_KERNEL_AT_EL0 23
45#define ARM64_HARDEN_BRANCH_PREDICTOR 24 45#define ARM64_HARDEN_BRANCH_PREDICTOR 24
46#define ARM64_HARDEN_BP_POST_GUEST_EXIT 25 46#define ARM64_HAS_RAS_EXTN 25
47#define ARM64_HAS_RAS_EXTN 26 47#define ARM64_WORKAROUND_843419 26
48#define ARM64_WORKAROUND_843419 27 48#define ARM64_HAS_CACHE_IDC 27
49#define ARM64_HAS_CACHE_IDC 28 49#define ARM64_HAS_CACHE_DIC 28
50#define ARM64_HAS_CACHE_DIC 29 50#define ARM64_HW_DBM 29
51#define ARM64_HW_DBM 30
52 51
53#define ARM64_NCAPS 31 52#define ARM64_NCAPS 30
54 53
55#endif /* __ASM_CPUCAPS_H */ 54#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index d53d40704416..f6648a3e4152 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -71,8 +71,6 @@ extern u32 __kvm_get_mdcr_el2(void);
71 71
72extern u32 __init_stage2_translation(void); 72extern u32 __init_stage2_translation(void);
73 73
74extern void __qcom_hyp_sanitize_btac_predictors(void);
75
76#else /* __ASSEMBLY__ */ 74#else /* __ASSEMBLY__ */
77 75
78.macro get_host_ctxt reg, tmp 76.macro get_host_ctxt reg, tmp
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 9b55a3f24be7..bf825f38d206 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -55,8 +55,6 @@ arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
55arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 55arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
56arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o 56arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
57 57
58arm64-obj-$(CONFIG_KVM_INDIRECT_VECTORS)+= bpi.o
59
60obj-y += $(arm64-obj-y) vdso/ probes/ 58obj-y += $(arm64-obj-y) vdso/ probes/
61obj-m += $(arm64-obj-m) 59obj-m += $(arm64-obj-m)
62head-y := head.o 60head-y := head.o
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 78e1b0a70aaf..5bdda651bd05 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -23,6 +23,7 @@
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/dma-mapping.h> 24#include <linux/dma-mapping.h>
25#include <linux/kvm_host.h> 25#include <linux/kvm_host.h>
26#include <linux/preempt.h>
26#include <linux/suspend.h> 27#include <linux/suspend.h>
27#include <asm/cpufeature.h> 28#include <asm/cpufeature.h>
28#include <asm/fixmap.h> 29#include <asm/fixmap.h>
@@ -93,6 +94,8 @@ int main(void)
93 DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); 94 DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
94 DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); 95 DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
95 BLANK(); 96 BLANK();
97 DEFINE(PREEMPT_DISABLE_OFFSET, PREEMPT_DISABLE_OFFSET);
98 BLANK();
96 DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); 99 DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
97 DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); 100 DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
98 DEFINE(CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC_RAW); 101 DEFINE(CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC_RAW);
diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S
deleted file mode 100644
index bb0b67722e86..000000000000
--- a/arch/arm64/kernel/bpi.S
+++ /dev/null
@@ -1,102 +0,0 @@
1/*
2 * Contains CPU specific branch predictor invalidation sequences
3 *
4 * Copyright (C) 2018 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/linkage.h>
20#include <linux/arm-smccc.h>
21
22#include <asm/alternative.h>
23#include <asm/mmu.h>
24
25.macro hyp_ventry
26 .align 7
271: .rept 27
28 nop
29 .endr
30/*
31 * The default sequence is to directly branch to the KVM vectors,
32 * using the computed offset. This applies for VHE as well as
33 * !ARM64_HARDEN_EL2_VECTORS.
34 *
35 * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
36 * with:
37 *
38 * stp x0, x1, [sp, #-16]!
39 * movz x0, #(addr & 0xffff)
40 * movk x0, #((addr >> 16) & 0xffff), lsl #16
41 * movk x0, #((addr >> 32) & 0xffff), lsl #32
42 * br x0
43 *
44 * Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4.
45 * See kvm_patch_vector_branch for details.
46 */
47alternative_cb kvm_patch_vector_branch
48 b __kvm_hyp_vector + (1b - 0b)
49 nop
50 nop
51 nop
52 nop
53alternative_cb_end
54.endm
55
56.macro generate_vectors
570:
58 .rept 16
59 hyp_ventry
60 .endr
61 .org 0b + SZ_2K // Safety measure
62.endm
63
64
65 .text
66 .pushsection .hyp.text, "ax"
67
68 .align 11
69ENTRY(__bp_harden_hyp_vecs_start)
70 .rept BP_HARDEN_EL2_SLOTS
71 generate_vectors
72 .endr
73ENTRY(__bp_harden_hyp_vecs_end)
74
75 .popsection
76
77ENTRY(__qcom_hyp_sanitize_link_stack_start)
78 stp x29, x30, [sp, #-16]!
79 .rept 16
80 bl . + 4
81 .endr
82 ldp x29, x30, [sp], #16
83ENTRY(__qcom_hyp_sanitize_link_stack_end)
84
85.macro smccc_workaround_1 inst
86 sub sp, sp, #(8 * 4)
87 stp x2, x3, [sp, #(8 * 0)]
88 stp x0, x1, [sp, #(8 * 2)]
89 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
90 \inst #0
91 ldp x2, x3, [sp, #(8 * 0)]
92 ldp x0, x1, [sp, #(8 * 2)]
93 add sp, sp, #(8 * 4)
94.endm
95
96ENTRY(__smccc_workaround_1_smc_start)
97 smccc_workaround_1 smc
98ENTRY(__smccc_workaround_1_smc_end)
99
100ENTRY(__smccc_workaround_1_hvc_start)
101 smccc_workaround_1 hvc
102ENTRY(__smccc_workaround_1_hvc_end)
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 9262ec57f5ab..a900befadfe8 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -86,13 +86,9 @@ atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
86 86
87DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); 87DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
88 88
89#ifdef CONFIG_KVM 89#ifdef CONFIG_KVM_INDIRECT_VECTORS
90extern char __qcom_hyp_sanitize_link_stack_start[];
91extern char __qcom_hyp_sanitize_link_stack_end[];
92extern char __smccc_workaround_1_smc_start[]; 90extern char __smccc_workaround_1_smc_start[];
93extern char __smccc_workaround_1_smc_end[]; 91extern char __smccc_workaround_1_smc_end[];
94extern char __smccc_workaround_1_hvc_start[];
95extern char __smccc_workaround_1_hvc_end[];
96 92
97static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, 93static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
98 const char *hyp_vecs_end) 94 const char *hyp_vecs_end)
@@ -132,12 +128,8 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
132 spin_unlock(&bp_lock); 128 spin_unlock(&bp_lock);
133} 129}
134#else 130#else
135#define __qcom_hyp_sanitize_link_stack_start NULL
136#define __qcom_hyp_sanitize_link_stack_end NULL
137#define __smccc_workaround_1_smc_start NULL 131#define __smccc_workaround_1_smc_start NULL
138#define __smccc_workaround_1_smc_end NULL 132#define __smccc_workaround_1_smc_end NULL
139#define __smccc_workaround_1_hvc_start NULL
140#define __smccc_workaround_1_hvc_end NULL
141 133
142static void __install_bp_hardening_cb(bp_hardening_cb_t fn, 134static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
143 const char *hyp_vecs_start, 135 const char *hyp_vecs_start,
@@ -145,7 +137,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
145{ 137{
146 __this_cpu_write(bp_hardening_data.fn, fn); 138 __this_cpu_write(bp_hardening_data.fn, fn);
147} 139}
148#endif /* CONFIG_KVM */ 140#endif /* CONFIG_KVM_INDIRECT_VECTORS */
149 141
150static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry, 142static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
151 bp_hardening_cb_t fn, 143 bp_hardening_cb_t fn,
@@ -178,12 +170,25 @@ static void call_hvc_arch_workaround_1(void)
178 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); 170 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
179} 171}
180 172
173static void qcom_link_stack_sanitization(void)
174{
175 u64 tmp;
176
177 asm volatile("mov %0, x30 \n"
178 ".rept 16 \n"
179 "bl . + 4 \n"
180 ".endr \n"
181 "mov x30, %0 \n"
182 : "=&r" (tmp));
183}
184
181static void 185static void
182enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry) 186enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
183{ 187{
184 bp_hardening_cb_t cb; 188 bp_hardening_cb_t cb;
185 void *smccc_start, *smccc_end; 189 void *smccc_start, *smccc_end;
186 struct arm_smccc_res res; 190 struct arm_smccc_res res;
191 u32 midr = read_cpuid_id();
187 192
188 if (!entry->matches(entry, SCOPE_LOCAL_CPU)) 193 if (!entry->matches(entry, SCOPE_LOCAL_CPU))
189 return; 194 return;
@@ -198,8 +203,9 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
198 if ((int)res.a0 < 0) 203 if ((int)res.a0 < 0)
199 return; 204 return;
200 cb = call_hvc_arch_workaround_1; 205 cb = call_hvc_arch_workaround_1;
201 smccc_start = __smccc_workaround_1_hvc_start; 206 /* This is a guest, no need to patch KVM vectors */
202 smccc_end = __smccc_workaround_1_hvc_end; 207 smccc_start = NULL;
208 smccc_end = NULL;
203 break; 209 break;
204 210
205 case PSCI_CONDUIT_SMC: 211 case PSCI_CONDUIT_SMC:
@@ -216,30 +222,14 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
216 return; 222 return;
217 } 223 }
218 224
225 if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
226 ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
227 cb = qcom_link_stack_sanitization;
228
219 install_bp_hardening_cb(entry, cb, smccc_start, smccc_end); 229 install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
220 230
221 return; 231 return;
222} 232}
223
224static void qcom_link_stack_sanitization(void)
225{
226 u64 tmp;
227
228 asm volatile("mov %0, x30 \n"
229 ".rept 16 \n"
230 "bl . + 4 \n"
231 ".endr \n"
232 "mov x30, %0 \n"
233 : "=&r" (tmp));
234}
235
236static void
237qcom_enable_link_stack_sanitization(const struct arm64_cpu_capabilities *entry)
238{
239 install_bp_hardening_cb(entry, qcom_link_stack_sanitization,
240 __qcom_hyp_sanitize_link_stack_start,
241 __qcom_hyp_sanitize_link_stack_end);
242}
243#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ 233#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
244 234
245#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ 235#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
@@ -324,33 +314,23 @@ static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
324 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), 314 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
325 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), 315 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
326 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), 316 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
327 {},
328};
329
330static const struct midr_range qcom_bp_harden_cpus[] = {
331 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1), 317 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
332 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR), 318 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
333 {}, 319 {},
334}; 320};
335 321
336static const struct arm64_cpu_capabilities arm64_bp_harden_list[] = { 322#endif
337 { 323
338 CAP_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus), 324#ifdef CONFIG_HARDEN_EL2_VECTORS
339 .cpu_enable = enable_smccc_arch_workaround_1, 325
340 }, 326static const struct midr_range arm64_harden_el2_vectors[] = {
341 { 327 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
342 CAP_MIDR_RANGE_LIST(qcom_bp_harden_cpus), 328 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
343 .cpu_enable = qcom_enable_link_stack_sanitization,
344 },
345 {}, 329 {},
346}; 330};
347 331
348#endif 332#endif
349 333
350#ifndef ERRATA_MIDR_ALL_VERSIONS
351#define ERRATA_MIDR_ALL_VERSIONS(x) MIDR_ALL_VERSIONS(x)
352#endif
353
354const struct arm64_cpu_capabilities arm64_errata[] = { 334const struct arm64_cpu_capabilities arm64_errata[] = {
355#if defined(CONFIG_ARM64_ERRATUM_826319) || \ 335#if defined(CONFIG_ARM64_ERRATUM_826319) || \
356 defined(CONFIG_ARM64_ERRATUM_827319) || \ 336 defined(CONFIG_ARM64_ERRATUM_827319) || \
@@ -495,25 +475,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
495 { 475 {
496 .capability = ARM64_HARDEN_BRANCH_PREDICTOR, 476 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
497 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 477 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
498 .matches = multi_entry_cap_matches, 478 .cpu_enable = enable_smccc_arch_workaround_1,
499 .cpu_enable = multi_entry_cap_cpu_enable, 479 ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
500 .match_list = arm64_bp_harden_list,
501 },
502 {
503 .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
504 ERRATA_MIDR_RANGE_LIST(qcom_bp_harden_cpus),
505 }, 480 },
506#endif 481#endif
507#ifdef CONFIG_HARDEN_EL2_VECTORS 482#ifdef CONFIG_HARDEN_EL2_VECTORS
508 { 483 {
509 .desc = "Cortex-A57 EL2 vector hardening", 484 .desc = "EL2 vector hardening",
510 .capability = ARM64_HARDEN_EL2_VECTORS,
511 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
512 },
513 {
514 .desc = "Cortex-A72 EL2 vector hardening",
515 .capability = ARM64_HARDEN_EL2_VECTORS, 485 .capability = ARM64_HARDEN_EL2_VECTORS,
516 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), 486 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
487 ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
517 }, 488 },
518#endif 489#endif
519 { 490 {
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index 1f458f7c3b44..e41a161d313a 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -209,15 +209,3 @@ alternative_endif
209 209
210 eret 210 eret
211ENDPROC(__fpsimd_guest_restore) 211ENDPROC(__fpsimd_guest_restore)
212
213ENTRY(__qcom_hyp_sanitize_btac_predictors)
214 /**
215 * Call SMC64 with Silicon provider serviceID 23<<8 (0xc2001700)
216 * 0xC2000000-0xC200FFFF: assigned to SiP Service Calls
217 * b15-b0: contains SiP functionID
218 */
219 movz x0, #0x1700
220 movk x0, #0xc200, lsl #16
221 smc #0
222 ret
223ENDPROC(__qcom_hyp_sanitize_btac_predictors)
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 87dfecce82b1..bffece27b5c1 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2015 - ARM Ltd 2 * Copyright (C) 2015-2018 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com> 3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
@@ -24,6 +24,7 @@
24#include <asm/kvm_arm.h> 24#include <asm/kvm_arm.h>
25#include <asm/kvm_asm.h> 25#include <asm/kvm_asm.h>
26#include <asm/kvm_mmu.h> 26#include <asm/kvm_mmu.h>
27#include <asm/mmu.h>
27 28
28 .text 29 .text
29 .pushsection .hyp.text, "ax" 30 .pushsection .hyp.text, "ax"
@@ -237,3 +238,64 @@ ENTRY(__kvm_hyp_vector)
237 invalid_vect el1_fiq_invalid // FIQ 32-bit EL1 238 invalid_vect el1_fiq_invalid // FIQ 32-bit EL1
238 valid_vect el1_error // Error 32-bit EL1 239 valid_vect el1_error // Error 32-bit EL1
239ENDPROC(__kvm_hyp_vector) 240ENDPROC(__kvm_hyp_vector)
241
242#ifdef CONFIG_KVM_INDIRECT_VECTORS
243.macro hyp_ventry
244 .align 7
2451: .rept 27
246 nop
247 .endr
248/*
249 * The default sequence is to directly branch to the KVM vectors,
250 * using the computed offset. This applies for VHE as well as
251 * !ARM64_HARDEN_EL2_VECTORS.
252 *
253 * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
254 * with:
255 *
256 * stp x0, x1, [sp, #-16]!
257 * movz x0, #(addr & 0xffff)
258 * movk x0, #((addr >> 16) & 0xffff), lsl #16
259 * movk x0, #((addr >> 32) & 0xffff), lsl #32
260 * br x0
261 *
262 * Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4.
263 * See kvm_patch_vector_branch for details.
264 */
265alternative_cb kvm_patch_vector_branch
266 b __kvm_hyp_vector + (1b - 0b)
267 nop
268 nop
269 nop
270 nop
271alternative_cb_end
272.endm
273
274.macro generate_vectors
2750:
276 .rept 16
277 hyp_ventry
278 .endr
279 .org 0b + SZ_2K // Safety measure
280.endm
281
282 .align 11
283ENTRY(__bp_harden_hyp_vecs_start)
284 .rept BP_HARDEN_EL2_SLOTS
285 generate_vectors
286 .endr
287ENTRY(__bp_harden_hyp_vecs_end)
288
289 .popsection
290
291ENTRY(__smccc_workaround_1_smc_start)
292 sub sp, sp, #(8 * 4)
293 stp x2, x3, [sp, #(8 * 0)]
294 stp x0, x1, [sp, #(8 * 2)]
295 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
296 smc #0
297 ldp x2, x3, [sp, #(8 * 0)]
298 ldp x0, x1, [sp, #(8 * 2)]
299 add sp, sp, #(8 * 4)
300ENTRY(__smccc_workaround_1_smc_end)
301#endif
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 07b572173265..d9645236e474 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -472,16 +472,6 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
472 /* And we're baaack! */ 472 /* And we're baaack! */
473 } while (fixup_guest_exit(vcpu, &exit_code)); 473 } while (fixup_guest_exit(vcpu, &exit_code));
474 474
475 if (cpus_have_const_cap(ARM64_HARDEN_BP_POST_GUEST_EXIT)) {
476 u32 midr = read_cpuid_id();
477
478 /* Apply BTAC predictors mitigation to all Falkor chips */
479 if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
480 ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) {
481 __qcom_hyp_sanitize_btac_predictors();
482 }
483 }
484
485 fp_enabled = __fpsimd_enabled_nvhe(); 475 fp_enabled = __fpsimd_enabled_nvhe();
486 476
487 __sysreg_save_state_nvhe(guest_ctxt); 477 __sysreg_save_state_nvhe(guest_ctxt);