aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/percpu.h41
-rw-r--r--arch/arm64/kernel/setup.c10
-rw-r--r--arch/arm64/kernel/smp.c6
4 files changed, 55 insertions, 3 deletions
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 519f89f5b6a3..d0ff25de67ca 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -26,7 +26,6 @@ generic-y += mman.h
26generic-y += msgbuf.h 26generic-y += msgbuf.h
27generic-y += mutex.h 27generic-y += mutex.h
28generic-y += pci.h 28generic-y += pci.h
29generic-y += percpu.h
30generic-y += poll.h 29generic-y += poll.h
31generic-y += posix_types.h 30generic-y += posix_types.h
32generic-y += resource.h 31generic-y += resource.h
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
new file mode 100644
index 000000000000..13fb0b3efc5f
--- /dev/null
+++ b/arch/arm64/include/asm/percpu.h
@@ -0,0 +1,41 @@
1/*
2 * Copyright (C) 2013 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_PERCPU_H
17#define __ASM_PERCPU_H
18
19static inline void set_my_cpu_offset(unsigned long off)
20{
21 asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
22}
23
24static inline unsigned long __my_cpu_offset(void)
25{
26 unsigned long off;
27 register unsigned long *sp asm ("sp");
28
29 /*
30 * We want to allow caching the value, so avoid using volatile and
31 * instead use a fake stack read to hazard against barrier().
32 */
33 asm("mrs %0, tpidr_el1" : "=r" (off) : "Q" (*sp));
34
35 return off;
36}
37#define __my_cpu_offset __my_cpu_offset()
38
39#include <asm-generic/percpu.h>
40
41#endif /* __ASM_PERCPU_H */
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index bd9bbd0e44ed..97d90840a7fd 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -108,6 +108,16 @@ void __init early_print(const char *str, ...)
108 printk("%s", buf); 108 printk("%s", buf);
109} 109}
110 110
111void __init smp_setup_processor_id(void)
112{
113 /*
114 * clear __my_cpu_offset on boot CPU to avoid hang caused by
115 * using percpu variable early, for example, lockdep will
116 * access percpu variable inside lock_release
117 */
118 set_my_cpu_offset(0);
119}
120
111bool arch_match_cpu_phys_id(int cpu, u64 phys_id) 121bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
112{ 122{
113 return phys_id == cpu_logical_map(cpu); 123 return phys_id == cpu_logical_map(cpu);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index a0c2ca602cf8..b5d2031c12c6 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -122,8 +122,6 @@ asmlinkage void secondary_start_kernel(void)
122 struct mm_struct *mm = &init_mm; 122 struct mm_struct *mm = &init_mm;
123 unsigned int cpu = smp_processor_id(); 123 unsigned int cpu = smp_processor_id();
124 124
125 printk("CPU%u: Booted secondary processor\n", cpu);
126
127 /* 125 /*
128 * All kernel threads share the same mm context; grab a 126 * All kernel threads share the same mm context; grab a
129 * reference and switch to it. 127 * reference and switch to it.
@@ -132,6 +130,9 @@ asmlinkage void secondary_start_kernel(void)
132 current->active_mm = mm; 130 current->active_mm = mm;
133 cpumask_set_cpu(cpu, mm_cpumask(mm)); 131 cpumask_set_cpu(cpu, mm_cpumask(mm));
134 132
133 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
134 printk("CPU%u: Booted secondary processor\n", cpu);
135
135 /* 136 /*
136 * TTBR0 is only used for the identity mapping at this stage. Make it 137 * TTBR0 is only used for the identity mapping at this stage. Make it
137 * point to zero page to avoid speculatively fetching new entries. 138 * point to zero page to avoid speculatively fetching new entries.
@@ -271,6 +272,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
271 272
272void __init smp_prepare_boot_cpu(void) 273void __init smp_prepare_boot_cpu(void)
273{ 274{
275 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
274} 276}
275 277
276static void (*smp_cross_call)(const struct cpumask *, unsigned int); 278static void (*smp_cross_call)(const struct cpumask *, unsigned int);