aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@armlinux.org.uk>2018-07-19 07:21:31 -0400
committerRussell King <rmk+kernel@armlinux.org.uk>2018-11-12 05:51:01 -0500
commit383fb3ee8024d596f488d2dbaf45e572897acbdb (patch)
treeb67a58bae630bacced733e397dab05ceef57166f
parente209950fdd065d2cc46e6338e47e52841b830cba (diff)
ARM: spectre-v2: per-CPU vtables to work around big.Little systems
In big.Little systems, some CPUs require the Spectre workarounds in paths such as the context switch, but other CPUs do not. In order to handle these differences, we need per-CPU vtables. We are unable to use the kernel's per-CPU variables to support this as per-CPU is not initialised at times when we need access to the vtables, so we have to use an array indexed by logical CPU number. We use an array-of-pointers to avoid having function pointers in the kernel's read/write .data section. Reviewed-by: Julien Thierry <julien.thierry@arm.com> Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
-rw-r--r--arch/arm/include/asm/proc-fns.h23
-rw-r--r--arch/arm/kernel/setup.c5
-rw-r--r--arch/arm/kernel/smp.c31
-rw-r--r--arch/arm/mm/proc-v7-bugs.c17
4 files changed, 61 insertions, 15 deletions
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index c259cc49c641..e1b6f280ab08 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -104,12 +104,35 @@ extern void cpu_do_resume(void *);
104#else 104#else
105 105
106extern struct processor processor; 106extern struct processor processor;
107#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
108#include <linux/smp.h>
109/*
110 * This can't be a per-cpu variable because we need to access it before
111 * per-cpu has been initialised. We have a couple of functions that are
112 * called in a pre-emptible context, and so can't use smp_processor_id()
113 * there, hence PROC_TABLE(). We insist in init_proc_vtable() that the
114 * function pointers for these are identical across all CPUs.
115 */
116extern struct processor *cpu_vtable[];
117#define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f
118#define PROC_TABLE(f) cpu_vtable[0]->f
119static inline void init_proc_vtable(const struct processor *p)
120{
121 unsigned int cpu = smp_processor_id();
122 *cpu_vtable[cpu] = *p;
123 WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
124 cpu_vtable[0]->dcache_clean_area);
125 WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
126 cpu_vtable[0]->set_pte_ext);
127}
128#else
107#define PROC_VTABLE(f) processor.f 129#define PROC_VTABLE(f) processor.f
108#define PROC_TABLE(f) processor.f 130#define PROC_TABLE(f) processor.f
109static inline void init_proc_vtable(const struct processor *p) 131static inline void init_proc_vtable(const struct processor *p)
110{ 132{
111 processor = *p; 133 processor = *p;
112} 134}
135#endif
113 136
114#define cpu_proc_init PROC_VTABLE(_proc_init) 137#define cpu_proc_init PROC_VTABLE(_proc_init)
115#define cpu_check_bugs PROC_VTABLE(check_bugs) 138#define cpu_check_bugs PROC_VTABLE(check_bugs)
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index c214bd14a1fe..cd46a595422c 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -115,6 +115,11 @@ EXPORT_SYMBOL(elf_hwcap2);
115 115
116#ifdef MULTI_CPU 116#ifdef MULTI_CPU
117struct processor processor __ro_after_init; 117struct processor processor __ro_after_init;
118#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
119struct processor *cpu_vtable[NR_CPUS] = {
120 [0] = &processor,
121};
122#endif
118#endif 123#endif
119#ifdef MULTI_TLB 124#ifdef MULTI_TLB
120struct cpu_tlb_fns cpu_tlb __ro_after_init; 125struct cpu_tlb_fns cpu_tlb __ro_after_init;
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 5ad0b67b9e33..82b879db32ee 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -42,6 +42,7 @@
42#include <asm/mmu_context.h> 42#include <asm/mmu_context.h>
43#include <asm/pgtable.h> 43#include <asm/pgtable.h>
44#include <asm/pgalloc.h> 44#include <asm/pgalloc.h>
45#include <asm/procinfo.h>
45#include <asm/processor.h> 46#include <asm/processor.h>
46#include <asm/sections.h> 47#include <asm/sections.h>
47#include <asm/tlbflush.h> 48#include <asm/tlbflush.h>
@@ -102,6 +103,30 @@ static unsigned long get_arch_pgd(pgd_t *pgd)
102#endif 103#endif
103} 104}
104 105
106#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
107static int secondary_biglittle_prepare(unsigned int cpu)
108{
109 if (!cpu_vtable[cpu])
110 cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
111
112 return cpu_vtable[cpu] ? 0 : -ENOMEM;
113}
114
115static void secondary_biglittle_init(void)
116{
117 init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
118}
119#else
120static int secondary_biglittle_prepare(unsigned int cpu)
121{
122 return 0;
123}
124
125static void secondary_biglittle_init(void)
126{
127}
128#endif
129
105int __cpu_up(unsigned int cpu, struct task_struct *idle) 130int __cpu_up(unsigned int cpu, struct task_struct *idle)
106{ 131{
107 int ret; 132 int ret;
@@ -109,6 +134,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
109 if (!smp_ops.smp_boot_secondary) 134 if (!smp_ops.smp_boot_secondary)
110 return -ENOSYS; 135 return -ENOSYS;
111 136
137 ret = secondary_biglittle_prepare(cpu);
138 if (ret)
139 return ret;
140
112 /* 141 /*
113 * We need to tell the secondary core where to find 142 * We need to tell the secondary core where to find
114 * its stack and the page tables. 143 * its stack and the page tables.
@@ -360,6 +389,8 @@ asmlinkage void secondary_start_kernel(void)
360 struct mm_struct *mm = &init_mm; 389 struct mm_struct *mm = &init_mm;
361 unsigned int cpu; 390 unsigned int cpu;
362 391
392 secondary_biglittle_init();
393
363 /* 394 /*
364 * The identity mapping is uncached (strongly ordered), so 395 * The identity mapping is uncached (strongly ordered), so
365 * switch away from it before attempting any exclusive accesses. 396 * switch away from it before attempting any exclusive accesses.
diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
index 5544b82a2e7a..9a07916af8dd 100644
--- a/arch/arm/mm/proc-v7-bugs.c
+++ b/arch/arm/mm/proc-v7-bugs.c
@@ -52,8 +52,6 @@ static void cpu_v7_spectre_init(void)
52 case ARM_CPU_PART_CORTEX_A17: 52 case ARM_CPU_PART_CORTEX_A17:
53 case ARM_CPU_PART_CORTEX_A73: 53 case ARM_CPU_PART_CORTEX_A73:
54 case ARM_CPU_PART_CORTEX_A75: 54 case ARM_CPU_PART_CORTEX_A75:
55 if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
56 goto bl_error;
57 per_cpu(harden_branch_predictor_fn, cpu) = 55 per_cpu(harden_branch_predictor_fn, cpu) =
58 harden_branch_predictor_bpiall; 56 harden_branch_predictor_bpiall;
59 spectre_v2_method = "BPIALL"; 57 spectre_v2_method = "BPIALL";
@@ -61,8 +59,6 @@ static void cpu_v7_spectre_init(void)
61 59
62 case ARM_CPU_PART_CORTEX_A15: 60 case ARM_CPU_PART_CORTEX_A15:
63 case ARM_CPU_PART_BRAHMA_B15: 61 case ARM_CPU_PART_BRAHMA_B15:
64 if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
65 goto bl_error;
66 per_cpu(harden_branch_predictor_fn, cpu) = 62 per_cpu(harden_branch_predictor_fn, cpu) =
67 harden_branch_predictor_iciallu; 63 harden_branch_predictor_iciallu;
68 spectre_v2_method = "ICIALLU"; 64 spectre_v2_method = "ICIALLU";
@@ -88,11 +84,9 @@ static void cpu_v7_spectre_init(void)
88 ARM_SMCCC_ARCH_WORKAROUND_1, &res); 84 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
89 if ((int)res.a0 != 0) 85 if ((int)res.a0 != 0)
90 break; 86 break;
91 if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
92 goto bl_error;
93 per_cpu(harden_branch_predictor_fn, cpu) = 87 per_cpu(harden_branch_predictor_fn, cpu) =
94 call_hvc_arch_workaround_1; 88 call_hvc_arch_workaround_1;
95 processor.switch_mm = cpu_v7_hvc_switch_mm; 89 cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
96 spectre_v2_method = "hypervisor"; 90 spectre_v2_method = "hypervisor";
97 break; 91 break;
98 92
@@ -101,11 +95,9 @@ static void cpu_v7_spectre_init(void)
101 ARM_SMCCC_ARCH_WORKAROUND_1, &res); 95 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
102 if ((int)res.a0 != 0) 96 if ((int)res.a0 != 0)
103 break; 97 break;
104 if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
105 goto bl_error;
106 per_cpu(harden_branch_predictor_fn, cpu) = 98 per_cpu(harden_branch_predictor_fn, cpu) =
107 call_smc_arch_workaround_1; 99 call_smc_arch_workaround_1;
108 processor.switch_mm = cpu_v7_smc_switch_mm; 100 cpu_do_switch_mm = cpu_v7_smc_switch_mm;
109 spectre_v2_method = "firmware"; 101 spectre_v2_method = "firmware";
110 break; 102 break;
111 103
@@ -119,11 +111,6 @@ static void cpu_v7_spectre_init(void)
119 if (spectre_v2_method) 111 if (spectre_v2_method)
120 pr_info("CPU%u: Spectre v2: using %s workaround\n", 112 pr_info("CPU%u: Spectre v2: using %s workaround\n",
121 smp_processor_id(), spectre_v2_method); 113 smp_processor_id(), spectre_v2_method);
122 return;
123
124bl_error:
125 pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
126 cpu);
127} 114}
128#else 115#else
129static void cpu_v7_spectre_init(void) 116static void cpu_v7_spectre_init(void)