diff options
Diffstat (limited to 'arch/arm64')
58 files changed, 1046 insertions, 491 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 04ffafb6fbe9..88c8b6c1341a 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | config ARM64 | 1 | config ARM64 |
2 | def_bool y | 2 | def_bool y |
3 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE | 3 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE |
4 | select ARCH_USE_CMPXCHG_LOCKREF | ||
4 | select ARCH_WANT_OPTIONAL_GPIOLIB | 5 | select ARCH_WANT_OPTIONAL_GPIOLIB |
5 | select ARCH_WANT_COMPAT_IPC_PARSE_VERSION | 6 | select ARCH_WANT_COMPAT_IPC_PARSE_VERSION |
6 | select ARCH_WANT_FRAME_POINTERS | 7 | select ARCH_WANT_FRAME_POINTERS |
@@ -14,6 +15,7 @@ config ARM64 | |||
14 | select GENERIC_IOMAP | 15 | select GENERIC_IOMAP |
15 | select GENERIC_IRQ_PROBE | 16 | select GENERIC_IRQ_PROBE |
16 | select GENERIC_IRQ_SHOW | 17 | select GENERIC_IRQ_SHOW |
18 | select GENERIC_SCHED_CLOCK | ||
17 | select GENERIC_SMP_IDLE_THREAD | 19 | select GENERIC_SMP_IDLE_THREAD |
18 | select GENERIC_TIME_VSYSCALL | 20 | select GENERIC_TIME_VSYSCALL |
19 | select HARDIRQS_SW_RESEND | 21 | select HARDIRQS_SW_RESEND |
@@ -61,10 +63,6 @@ config LOCKDEP_SUPPORT | |||
61 | config TRACE_IRQFLAGS_SUPPORT | 63 | config TRACE_IRQFLAGS_SUPPORT |
62 | def_bool y | 64 | def_bool y |
63 | 65 | ||
64 | config GENERIC_LOCKBREAK | ||
65 | def_bool y | ||
66 | depends on SMP && PREEMPT | ||
67 | |||
68 | config RWSEM_GENERIC_SPINLOCK | 66 | config RWSEM_GENERIC_SPINLOCK |
69 | def_bool y | 67 | def_bool y |
70 | 68 | ||
@@ -138,9 +136,13 @@ config ARM64_64K_PAGES | |||
138 | look-up. AArch32 emulation is not available when this feature | 136 | look-up. AArch32 emulation is not available when this feature |
139 | is enabled. | 137 | is enabled. |
140 | 138 | ||
139 | config CPU_BIG_ENDIAN | ||
140 | bool "Build big-endian kernel" | ||
141 | help | ||
142 | Say Y if you plan on running a kernel in big-endian mode. | ||
143 | |||
141 | config SMP | 144 | config SMP |
142 | bool "Symmetric Multi-Processing" | 145 | bool "Symmetric Multi-Processing" |
143 | select USE_GENERIC_SMP_HELPERS | ||
144 | help | 146 | help |
145 | This enables support for systems with more than one CPU. If | 147 | This enables support for systems with more than one CPU. If |
146 | you say N here, the kernel will run on single and | 148 | you say N here, the kernel will run on single and |
@@ -160,6 +162,13 @@ config NR_CPUS | |||
160 | default "8" if ARCH_XGENE | 162 | default "8" if ARCH_XGENE |
161 | default "4" | 163 | default "4" |
162 | 164 | ||
165 | config HOTPLUG_CPU | ||
166 | bool "Support for hot-pluggable CPUs" | ||
167 | depends on SMP | ||
168 | help | ||
169 | Say Y here to experiment with turning CPUs off and on. CPUs | ||
170 | can be controlled through /sys/devices/system/cpu. | ||
171 | |||
163 | source kernel/Kconfig.preempt | 172 | source kernel/Kconfig.preempt |
164 | 173 | ||
165 | config HZ | 174 | config HZ |
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index d90cf79f233a..2fceb71ac3b7 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile | |||
@@ -20,9 +20,15 @@ LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) | |||
20 | KBUILD_DEFCONFIG := defconfig | 20 | KBUILD_DEFCONFIG := defconfig |
21 | 21 | ||
22 | KBUILD_CFLAGS += -mgeneral-regs-only | 22 | KBUILD_CFLAGS += -mgeneral-regs-only |
23 | ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) | ||
24 | KBUILD_CPPFLAGS += -mbig-endian | ||
25 | AS += -EB | ||
26 | LD += -EB | ||
27 | else | ||
23 | KBUILD_CPPFLAGS += -mlittle-endian | 28 | KBUILD_CPPFLAGS += -mlittle-endian |
24 | AS += -EL | 29 | AS += -EL |
25 | LD += -EL | 30 | LD += -EL |
31 | endif | ||
26 | 32 | ||
27 | comma = , | 33 | comma = , |
28 | 34 | ||
diff --git a/arch/arm64/boot/dts/apm-storm.dtsi b/arch/arm64/boot/dts/apm-storm.dtsi index bfdc57834929..d37d7369e260 100644 --- a/arch/arm64/boot/dts/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm-storm.dtsi | |||
@@ -103,6 +103,81 @@ | |||
103 | #size-cells = <2>; | 103 | #size-cells = <2>; |
104 | ranges; | 104 | ranges; |
105 | 105 | ||
106 | clocks { | ||
107 | #address-cells = <2>; | ||
108 | #size-cells = <2>; | ||
109 | ranges; | ||
110 | refclk: refclk { | ||
111 | compatible = "fixed-clock"; | ||
112 | #clock-cells = <1>; | ||
113 | clock-frequency = <100000000>; | ||
114 | clock-output-names = "refclk"; | ||
115 | }; | ||
116 | |||
117 | pcppll: pcppll@17000100 { | ||
118 | compatible = "apm,xgene-pcppll-clock"; | ||
119 | #clock-cells = <1>; | ||
120 | clocks = <&refclk 0>; | ||
121 | clock-names = "pcppll"; | ||
122 | reg = <0x0 0x17000100 0x0 0x1000>; | ||
123 | clock-output-names = "pcppll"; | ||
124 | type = <0>; | ||
125 | }; | ||
126 | |||
127 | socpll: socpll@17000120 { | ||
128 | compatible = "apm,xgene-socpll-clock"; | ||
129 | #clock-cells = <1>; | ||
130 | clocks = <&refclk 0>; | ||
131 | clock-names = "socpll"; | ||
132 | reg = <0x0 0x17000120 0x0 0x1000>; | ||
133 | clock-output-names = "socpll"; | ||
134 | type = <1>; | ||
135 | }; | ||
136 | |||
137 | socplldiv2: socplldiv2 { | ||
138 | compatible = "fixed-factor-clock"; | ||
139 | #clock-cells = <1>; | ||
140 | clocks = <&socpll 0>; | ||
141 | clock-names = "socplldiv2"; | ||
142 | clock-mult = <1>; | ||
143 | clock-div = <2>; | ||
144 | clock-output-names = "socplldiv2"; | ||
145 | }; | ||
146 | |||
147 | qmlclk: qmlclk { | ||
148 | compatible = "apm,xgene-device-clock"; | ||
149 | #clock-cells = <1>; | ||
150 | clocks = <&socplldiv2 0>; | ||
151 | clock-names = "qmlclk"; | ||
152 | reg = <0x0 0x1703C000 0x0 0x1000>; | ||
153 | reg-names = "csr-reg"; | ||
154 | clock-output-names = "qmlclk"; | ||
155 | }; | ||
156 | |||
157 | ethclk: ethclk { | ||
158 | compatible = "apm,xgene-device-clock"; | ||
159 | #clock-cells = <1>; | ||
160 | clocks = <&socplldiv2 0>; | ||
161 | clock-names = "ethclk"; | ||
162 | reg = <0x0 0x17000000 0x0 0x1000>; | ||
163 | reg-names = "div-reg"; | ||
164 | divider-offset = <0x238>; | ||
165 | divider-width = <0x9>; | ||
166 | divider-shift = <0x0>; | ||
167 | clock-output-names = "ethclk"; | ||
168 | }; | ||
169 | |||
170 | eth8clk: eth8clk { | ||
171 | compatible = "apm,xgene-device-clock"; | ||
172 | #clock-cells = <1>; | ||
173 | clocks = <ðclk 0>; | ||
174 | clock-names = "eth8clk"; | ||
175 | reg = <0x0 0x1702C000 0x0 0x1000>; | ||
176 | reg-names = "csr-reg"; | ||
177 | clock-output-names = "eth8clk"; | ||
178 | }; | ||
179 | }; | ||
180 | |||
106 | serial0: serial@1c020000 { | 181 | serial0: serial@1c020000 { |
107 | device_type = "serial"; | 182 | device_type = "serial"; |
108 | compatible = "ns16550"; | 183 | compatible = "ns16550"; |
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 31c81e9b792e..84139be62ae6 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig | |||
@@ -26,7 +26,7 @@ CONFIG_MODULE_UNLOAD=y | |||
26 | CONFIG_ARCH_VEXPRESS=y | 26 | CONFIG_ARCH_VEXPRESS=y |
27 | CONFIG_ARCH_XGENE=y | 27 | CONFIG_ARCH_XGENE=y |
28 | CONFIG_SMP=y | 28 | CONFIG_SMP=y |
29 | CONFIG_PREEMPT_VOLUNTARY=y | 29 | CONFIG_PREEMPT=y |
30 | CONFIG_CMDLINE="console=ttyAMA0" | 30 | CONFIG_CMDLINE="console=ttyAMA0" |
31 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | 31 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set |
32 | CONFIG_COMPAT=y | 32 | CONFIG_COMPAT=y |
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index 79a642d199f2..519f89f5b6a3 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild | |||
@@ -50,3 +50,4 @@ generic-y += unaligned.h | |||
50 | generic-y += user.h | 50 | generic-y += user.h |
51 | generic-y += vga.h | 51 | generic-y += vga.h |
52 | generic-y += xor.h | 52 | generic-y += xor.h |
53 | generic-y += preempt.h | ||
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h index c9f1d2816c2b..9400596a0f39 100644 --- a/arch/arm64/include/asm/arch_timer.h +++ b/arch/arm64/include/asm/arch_timer.h | |||
@@ -92,19 +92,49 @@ static inline u32 arch_timer_get_cntfrq(void) | |||
92 | return val; | 92 | return val; |
93 | } | 93 | } |
94 | 94 | ||
95 | static inline void arch_counter_set_user_access(void) | 95 | static inline u32 arch_timer_get_cntkctl(void) |
96 | { | 96 | { |
97 | u32 cntkctl; | 97 | u32 cntkctl; |
98 | |||
99 | /* Disable user access to the timers and the physical counter. */ | ||
100 | asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl)); | 98 | asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl)); |
101 | cntkctl &= ~((3 << 8) | (1 << 0)); | 99 | return cntkctl; |
100 | } | ||
102 | 101 | ||
103 | /* Enable user access to the virtual counter and frequency. */ | 102 | static inline void arch_timer_set_cntkctl(u32 cntkctl) |
104 | cntkctl |= (1 << 1); | 103 | { |
105 | asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl)); | 104 | asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl)); |
106 | } | 105 | } |
107 | 106 | ||
107 | static inline void arch_counter_set_user_access(void) | ||
108 | { | ||
109 | u32 cntkctl = arch_timer_get_cntkctl(); | ||
110 | |||
111 | /* Disable user access to the timers and the physical counter */ | ||
112 | /* Also disable virtual event stream */ | ||
113 | cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN | ||
114 | | ARCH_TIMER_USR_VT_ACCESS_EN | ||
115 | | ARCH_TIMER_VIRT_EVT_EN | ||
116 | | ARCH_TIMER_USR_PCT_ACCESS_EN); | ||
117 | |||
118 | /* Enable user access to the virtual counter */ | ||
119 | cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN; | ||
120 | |||
121 | arch_timer_set_cntkctl(cntkctl); | ||
122 | } | ||
123 | |||
124 | static inline void arch_timer_evtstrm_enable(int divider) | ||
125 | { | ||
126 | u32 cntkctl = arch_timer_get_cntkctl(); | ||
127 | cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK; | ||
128 | /* Set the divider and enable virtual event stream */ | ||
129 | cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT) | ||
130 | | ARCH_TIMER_VIRT_EVT_EN; | ||
131 | arch_timer_set_cntkctl(cntkctl); | ||
132 | elf_hwcap |= HWCAP_EVTSTRM; | ||
133 | #ifdef CONFIG_COMPAT | ||
134 | compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM; | ||
135 | #endif | ||
136 | } | ||
137 | |||
108 | static inline u64 arch_counter_get_cntvct(void) | 138 | static inline u64 arch_counter_get_cntvct(void) |
109 | { | 139 | { |
110 | u64 cval; | 140 | u64 cval; |
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 5aceb83b3f5c..fd3e3924041b 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h | |||
@@ -115,3 +115,34 @@ lr .req x30 // link register | |||
115 | .align 7 | 115 | .align 7 |
116 | b \label | 116 | b \label |
117 | .endm | 117 | .endm |
118 | |||
119 | /* | ||
120 | * Select code when configured for BE. | ||
121 | */ | ||
122 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
123 | #define CPU_BE(code...) code | ||
124 | #else | ||
125 | #define CPU_BE(code...) | ||
126 | #endif | ||
127 | |||
128 | /* | ||
129 | * Select code when configured for LE. | ||
130 | */ | ||
131 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
132 | #define CPU_LE(code...) | ||
133 | #else | ||
134 | #define CPU_LE(code...) code | ||
135 | #endif | ||
136 | |||
137 | /* | ||
138 | * Define a macro that constructs a 64-bit value by concatenating two | ||
139 | * 32-bit registers. Note that on big endian systems the order of the | ||
140 | * registers is swapped. | ||
141 | */ | ||
142 | #ifndef CONFIG_CPU_BIG_ENDIAN | ||
143 | .macro regs_to_64, rd, lbits, hbits | ||
144 | #else | ||
145 | .macro regs_to_64, rd, hbits, lbits | ||
146 | #endif | ||
147 | orr \rd, \lbits, \hbits, lsl #32 | ||
148 | .endm | ||
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 836364468571..01de5aaa3edc 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h | |||
@@ -126,20 +126,6 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) | |||
126 | return oldval; | 126 | return oldval; |
127 | } | 127 | } |
128 | 128 | ||
129 | static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) | ||
130 | { | ||
131 | unsigned long tmp, tmp2; | ||
132 | |||
133 | asm volatile("// atomic_clear_mask\n" | ||
134 | "1: ldxr %0, %2\n" | ||
135 | " bic %0, %0, %3\n" | ||
136 | " stxr %w1, %0, %2\n" | ||
137 | " cbnz %w1, 1b" | ||
138 | : "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr) | ||
139 | : "Ir" (mask) | ||
140 | : "cc"); | ||
141 | } | ||
142 | |||
143 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | 129 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
144 | 130 | ||
145 | static inline int __atomic_add_unless(atomic_t *v, int a, int u) | 131 | static inline int __atomic_add_unless(atomic_t *v, int a, int u) |
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index 8a8ce0e73a38..3914c0dcd09c 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h | |||
@@ -173,4 +173,6 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, | |||
173 | #define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) | 173 | #define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n)) |
174 | #define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) | 174 | #define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n)) |
175 | 175 | ||
176 | #define cmpxchg64_relaxed(ptr,o,n) cmpxchg_local((ptr),(o),(n)) | ||
177 | |||
176 | #endif /* __ASM_CMPXCHG_H */ | 178 | #endif /* __ASM_CMPXCHG_H */ |
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index 899af807ef0f..fda2704b3f9f 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h | |||
@@ -26,7 +26,11 @@ | |||
26 | #include <linux/ptrace.h> | 26 | #include <linux/ptrace.h> |
27 | 27 | ||
28 | #define COMPAT_USER_HZ 100 | 28 | #define COMPAT_USER_HZ 100 |
29 | #ifdef __AARCH64EB__ | ||
30 | #define COMPAT_UTS_MACHINE "armv8b\0\0" | ||
31 | #else | ||
29 | #define COMPAT_UTS_MACHINE "armv8l\0\0" | 32 | #define COMPAT_UTS_MACHINE "armv8l\0\0" |
33 | #endif | ||
30 | 34 | ||
31 | typedef u32 compat_size_t; | 35 | typedef u32 compat_size_t; |
32 | typedef s32 compat_ssize_t; | 36 | typedef s32 compat_ssize_t; |
@@ -73,13 +77,23 @@ struct compat_timeval { | |||
73 | }; | 77 | }; |
74 | 78 | ||
75 | struct compat_stat { | 79 | struct compat_stat { |
80 | #ifdef __AARCH64EB__ | ||
81 | short st_dev; | ||
82 | short __pad1; | ||
83 | #else | ||
76 | compat_dev_t st_dev; | 84 | compat_dev_t st_dev; |
85 | #endif | ||
77 | compat_ino_t st_ino; | 86 | compat_ino_t st_ino; |
78 | compat_mode_t st_mode; | 87 | compat_mode_t st_mode; |
79 | compat_ushort_t st_nlink; | 88 | compat_ushort_t st_nlink; |
80 | __compat_uid16_t st_uid; | 89 | __compat_uid16_t st_uid; |
81 | __compat_gid16_t st_gid; | 90 | __compat_gid16_t st_gid; |
91 | #ifdef __AARCH64EB__ | ||
92 | short st_rdev; | ||
93 | short __pad2; | ||
94 | #else | ||
82 | compat_dev_t st_rdev; | 95 | compat_dev_t st_rdev; |
96 | #endif | ||
83 | compat_off_t st_size; | 97 | compat_off_t st_size; |
84 | compat_off_t st_blksize; | 98 | compat_off_t st_blksize; |
85 | compat_off_t st_blocks; | 99 | compat_off_t st_blocks; |
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h new file mode 100644 index 000000000000..c4cdb5e5b73d --- /dev/null +++ b/arch/arm64/include/asm/cpu_ops.h | |||
@@ -0,0 +1,59 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 ARM Ltd. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
15 | */ | ||
16 | #ifndef __ASM_CPU_OPS_H | ||
17 | #define __ASM_CPU_OPS_H | ||
18 | |||
19 | #include <linux/init.h> | ||
20 | #include <linux/threads.h> | ||
21 | |||
22 | struct device_node; | ||
23 | |||
24 | /** | ||
25 | * struct cpu_operations - Callback operations for hotplugging CPUs. | ||
26 | * | ||
27 | * @name: Name of the property as appears in a devicetree cpu node's | ||
28 | * enable-method property. | ||
29 | * @cpu_init: Reads any data necessary for a specific enable-method from the | ||
30 | * devicetree, for a given cpu node and proposed logical id. | ||
31 | * @cpu_prepare: Early one-time preparation step for a cpu. If there is a | ||
32 | * mechanism for doing so, tests whether it is possible to boot | ||
33 | * the given CPU. | ||
34 | * @cpu_boot: Boots a cpu into the kernel. | ||
35 | * @cpu_postboot: Optionally, perform any post-boot cleanup or necesary | ||
36 | * synchronisation. Called from the cpu being booted. | ||
37 | * @cpu_disable: Prepares a cpu to die. May fail for some mechanism-specific | ||
38 | * reason, which will cause the hot unplug to be aborted. Called | ||
39 | * from the cpu to be killed. | ||
40 | * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the | ||
41 | * cpu being killed. | ||
42 | */ | ||
43 | struct cpu_operations { | ||
44 | const char *name; | ||
45 | int (*cpu_init)(struct device_node *, unsigned int); | ||
46 | int (*cpu_prepare)(unsigned int); | ||
47 | int (*cpu_boot)(unsigned int); | ||
48 | void (*cpu_postboot)(void); | ||
49 | #ifdef CONFIG_HOTPLUG_CPU | ||
50 | int (*cpu_disable)(unsigned int cpu); | ||
51 | void (*cpu_die)(unsigned int cpu); | ||
52 | #endif | ||
53 | }; | ||
54 | |||
55 | extern const struct cpu_operations *cpu_ops[NR_CPUS]; | ||
56 | extern int __init cpu_read_ops(struct device_node *dn, int cpu); | ||
57 | extern void __init cpu_read_bootcpu_ops(void); | ||
58 | |||
59 | #endif /* ifndef __ASM_CPU_OPS_H */ | ||
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index e7fa87f9201b..01d3aab64b79 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h | |||
@@ -90,11 +90,24 @@ typedef struct user_fpsimd_state elf_fpregset_t; | |||
90 | * These are used to set parameters in the core dumps. | 90 | * These are used to set parameters in the core dumps. |
91 | */ | 91 | */ |
92 | #define ELF_CLASS ELFCLASS64 | 92 | #define ELF_CLASS ELFCLASS64 |
93 | #ifdef __AARCH64EB__ | ||
94 | #define ELF_DATA ELFDATA2MSB | ||
95 | #else | ||
93 | #define ELF_DATA ELFDATA2LSB | 96 | #define ELF_DATA ELFDATA2LSB |
97 | #endif | ||
94 | #define ELF_ARCH EM_AARCH64 | 98 | #define ELF_ARCH EM_AARCH64 |
95 | 99 | ||
100 | /* | ||
101 | * This yields a string that ld.so will use to load implementation | ||
102 | * specific libraries for optimization. This is more specific in | ||
103 | * intent than poking at uname or /proc/cpuinfo. | ||
104 | */ | ||
96 | #define ELF_PLATFORM_SIZE 16 | 105 | #define ELF_PLATFORM_SIZE 16 |
106 | #ifdef __AARCH64EB__ | ||
107 | #define ELF_PLATFORM ("aarch64_be") | ||
108 | #else | ||
97 | #define ELF_PLATFORM ("aarch64") | 109 | #define ELF_PLATFORM ("aarch64") |
110 | #endif | ||
98 | 111 | ||
99 | /* | 112 | /* |
100 | * This is used to ensure we don't load something for the wrong architecture. | 113 | * This is used to ensure we don't load something for the wrong architecture. |
@@ -149,7 +162,12 @@ extern unsigned long arch_randomize_brk(struct mm_struct *mm); | |||
149 | #define arch_randomize_brk arch_randomize_brk | 162 | #define arch_randomize_brk arch_randomize_brk |
150 | 163 | ||
151 | #ifdef CONFIG_COMPAT | 164 | #ifdef CONFIG_COMPAT |
165 | |||
166 | #ifdef __AARCH64EB__ | ||
167 | #define COMPAT_ELF_PLATFORM ("v8b") | ||
168 | #else | ||
152 | #define COMPAT_ELF_PLATFORM ("v8l") | 169 | #define COMPAT_ELF_PLATFORM ("v8l") |
170 | #endif | ||
153 | 171 | ||
154 | #define COMPAT_ELF_ET_DYN_BASE (randomize_et_dyn(2 * TASK_SIZE_32 / 3)) | 172 | #define COMPAT_ELF_ET_DYN_BASE (randomize_et_dyn(2 * TASK_SIZE_32 / 3)) |
155 | 173 | ||
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index e2950b098e76..6cddbb0c9f54 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h | |||
@@ -30,6 +30,7 @@ | |||
30 | #define COMPAT_HWCAP_IDIVA (1 << 17) | 30 | #define COMPAT_HWCAP_IDIVA (1 << 17) |
31 | #define COMPAT_HWCAP_IDIVT (1 << 18) | 31 | #define COMPAT_HWCAP_IDIVT (1 << 18) |
32 | #define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT) | 32 | #define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT) |
33 | #define COMPAT_HWCAP_EVTSTRM (1 << 21) | ||
33 | 34 | ||
34 | #ifndef __ASSEMBLY__ | 35 | #ifndef __ASSEMBLY__ |
35 | /* | 36 | /* |
@@ -37,11 +38,11 @@ | |||
37 | * instruction set this cpu supports. | 38 | * instruction set this cpu supports. |
38 | */ | 39 | */ |
39 | #define ELF_HWCAP (elf_hwcap) | 40 | #define ELF_HWCAP (elf_hwcap) |
40 | #define COMPAT_ELF_HWCAP (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\ | 41 | |
41 | COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\ | 42 | #ifdef CONFIG_COMPAT |
42 | COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\ | 43 | #define COMPAT_ELF_HWCAP (compat_elf_hwcap) |
43 | COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\ | 44 | extern unsigned int compat_elf_hwcap; |
44 | COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV) | 45 | #endif |
45 | 46 | ||
46 | extern unsigned long elf_hwcap; | 47 | extern unsigned long elf_hwcap; |
47 | #endif | 48 | #endif |
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 757c87a04531..4cc813eddacb 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h | |||
@@ -227,6 +227,7 @@ extern void __memset_io(volatile void __iomem *, int, size_t); | |||
227 | */ | 227 | */ |
228 | extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot); | 228 | extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot); |
229 | extern void __iounmap(volatile void __iomem *addr); | 229 | extern void __iounmap(volatile void __iomem *addr); |
230 | extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); | ||
230 | 231 | ||
231 | #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) | 232 | #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) |
232 | #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) | 233 | #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) |
@@ -236,7 +237,6 @@ extern void __iounmap(volatile void __iomem *addr); | |||
236 | #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) | 237 | #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) |
237 | #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) | 238 | #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) |
238 | #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) | 239 | #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) |
239 | #define ioremap_cached(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL)) | ||
240 | #define iounmap __iounmap | 240 | #define iounmap __iounmap |
241 | 241 | ||
242 | #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF) | 242 | #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF) |
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h index 0332fc077f6e..e1f7ecdde11f 100644 --- a/arch/arm64/include/asm/irq.h +++ b/arch/arm64/include/asm/irq.h | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <asm-generic/irq.h> | 4 | #include <asm-generic/irq.h> |
5 | 5 | ||
6 | extern void (*handle_arch_irq)(struct pt_regs *); | 6 | extern void (*handle_arch_irq)(struct pt_regs *); |
7 | extern void migrate_irqs(void); | ||
7 | extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); | 8 | extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); |
8 | 9 | ||
9 | #endif | 10 | #endif |
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 20925bcf4e2a..37762175896f 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h | |||
@@ -33,18 +33,23 @@ | |||
33 | #define UL(x) _AC(x, UL) | 33 | #define UL(x) _AC(x, UL) |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * PAGE_OFFSET - the virtual address of the start of the kernel image. | 36 | * PAGE_OFFSET - the virtual address of the start of the kernel image (top |
37 | * (VA_BITS - 1)) | ||
37 | * VA_BITS - the maximum number of bits for virtual addresses. | 38 | * VA_BITS - the maximum number of bits for virtual addresses. |
38 | * TASK_SIZE - the maximum size of a user space task. | 39 | * TASK_SIZE - the maximum size of a user space task. |
39 | * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. | 40 | * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. |
40 | * The module space lives between the addresses given by TASK_SIZE | 41 | * The module space lives between the addresses given by TASK_SIZE |
41 | * and PAGE_OFFSET - it must be within 128MB of the kernel text. | 42 | * and PAGE_OFFSET - it must be within 128MB of the kernel text. |
42 | */ | 43 | */ |
43 | #define PAGE_OFFSET UL(0xffffffc000000000) | 44 | #ifdef CONFIG_ARM64_64K_PAGES |
45 | #define VA_BITS (42) | ||
46 | #else | ||
47 | #define VA_BITS (39) | ||
48 | #endif | ||
49 | #define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1)) | ||
44 | #define MODULES_END (PAGE_OFFSET) | 50 | #define MODULES_END (PAGE_OFFSET) |
45 | #define MODULES_VADDR (MODULES_END - SZ_64M) | 51 | #define MODULES_VADDR (MODULES_END - SZ_64M) |
46 | #define EARLYCON_IOBASE (MODULES_VADDR - SZ_4M) | 52 | #define EARLYCON_IOBASE (MODULES_VADDR - SZ_4M) |
47 | #define VA_BITS (39) | ||
48 | #define TASK_SIZE_64 (UL(1) << VA_BITS) | 53 | #define TASK_SIZE_64 (UL(1) << VA_BITS) |
49 | 54 | ||
50 | #ifdef CONFIG_COMPAT | 55 | #ifdef CONFIG_COMPAT |
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index f214069ec5d5..9bea6e74a001 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h | |||
@@ -63,9 +63,12 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr) | |||
63 | struct page *pte; | 63 | struct page *pte; |
64 | 64 | ||
65 | pte = alloc_pages(PGALLOC_GFP, 0); | 65 | pte = alloc_pages(PGALLOC_GFP, 0); |
66 | if (pte) | 66 | if (!pte) |
67 | pgtable_page_ctor(pte); | 67 | return NULL; |
68 | 68 | if (!pgtable_page_ctor(pte)) { | |
69 | __free_page(pte); | ||
70 | return NULL; | ||
71 | } | ||
69 | return pte; | 72 | return pte; |
70 | } | 73 | } |
71 | 74 | ||
diff --git a/arch/arm64/include/asm/pgtable-2level-hwdef.h b/arch/arm64/include/asm/pgtable-2level-hwdef.h index 0a8ed3f94e93..2593b490c56a 100644 --- a/arch/arm64/include/asm/pgtable-2level-hwdef.h +++ b/arch/arm64/include/asm/pgtable-2level-hwdef.h | |||
@@ -21,10 +21,10 @@ | |||
21 | * 8192 entries of 8 bytes each, occupying a 64KB page. Levels 0 and 1 are not | 21 | * 8192 entries of 8 bytes each, occupying a 64KB page. Levels 0 and 1 are not |
22 | * used. The 2nd level table (PGD for Linux) can cover a range of 4TB, each | 22 | * used. The 2nd level table (PGD for Linux) can cover a range of 4TB, each |
23 | * entry representing 512MB. The user and kernel address spaces are limited to | 23 | * entry representing 512MB. The user and kernel address spaces are limited to |
24 | * 512GB and therefore we only use 1024 entries in the PGD. | 24 | * 4TB in the 64KB page configuration. |
25 | */ | 25 | */ |
26 | #define PTRS_PER_PTE 8192 | 26 | #define PTRS_PER_PTE 8192 |
27 | #define PTRS_PER_PGD 1024 | 27 | #define PTRS_PER_PGD 8192 |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * PGDIR_SHIFT determines the size a top-level page table entry can map. | 30 | * PGDIR_SHIFT determines the size a top-level page table entry can map. |
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index f0bebc5e22cd..17bd3af0a117 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h | |||
@@ -33,7 +33,7 @@ | |||
33 | /* | 33 | /* |
34 | * VMALLOC and SPARSEMEM_VMEMMAP ranges. | 34 | * VMALLOC and SPARSEMEM_VMEMMAP ranges. |
35 | */ | 35 | */ |
36 | #define VMALLOC_START UL(0xffffff8000000000) | 36 | #define VMALLOC_START (UL(0xffffffffffffffff) << VA_BITS) |
37 | #define VMALLOC_END (PAGE_OFFSET - UL(0x400000000) - SZ_64K) | 37 | #define VMALLOC_END (PAGE_OFFSET - UL(0x400000000) - SZ_64K) |
38 | 38 | ||
39 | #define vmemmap ((struct page *)(VMALLOC_END + SZ_64K)) | 39 | #define vmemmap ((struct page *)(VMALLOC_END + SZ_64K)) |
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index ab239b2c456f..45b20cd6cbca 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h | |||
@@ -107,6 +107,11 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, | |||
107 | regs->pstate = COMPAT_PSR_MODE_USR; | 107 | regs->pstate = COMPAT_PSR_MODE_USR; |
108 | if (pc & 1) | 108 | if (pc & 1) |
109 | regs->pstate |= COMPAT_PSR_T_BIT; | 109 | regs->pstate |= COMPAT_PSR_T_BIT; |
110 | |||
111 | #ifdef __AARCH64EB__ | ||
112 | regs->pstate |= COMPAT_PSR_E_BIT; | ||
113 | #endif | ||
114 | |||
110 | regs->compat_sp = sp; | 115 | regs->compat_sp = sp; |
111 | } | 116 | } |
112 | #endif | 117 | #endif |
diff --git a/arch/arm64/include/asm/prom.h b/arch/arm64/include/asm/prom.h deleted file mode 100644 index 68b90e682957..000000000000 --- a/arch/arm64/include/asm/prom.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | /* Empty for now */ | ||
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h index 0604237ecd99..e5312ea0ec1a 100644 --- a/arch/arm64/include/asm/psci.h +++ b/arch/arm64/include/asm/psci.h | |||
@@ -14,25 +14,6 @@ | |||
14 | #ifndef __ASM_PSCI_H | 14 | #ifndef __ASM_PSCI_H |
15 | #define __ASM_PSCI_H | 15 | #define __ASM_PSCI_H |
16 | 16 | ||
17 | #define PSCI_POWER_STATE_TYPE_STANDBY 0 | ||
18 | #define PSCI_POWER_STATE_TYPE_POWER_DOWN 1 | ||
19 | |||
20 | struct psci_power_state { | ||
21 | u16 id; | ||
22 | u8 type; | ||
23 | u8 affinity_level; | ||
24 | }; | ||
25 | |||
26 | struct psci_operations { | ||
27 | int (*cpu_suspend)(struct psci_power_state state, | ||
28 | unsigned long entry_point); | ||
29 | int (*cpu_off)(struct psci_power_state state); | ||
30 | int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); | ||
31 | int (*migrate)(unsigned long cpuid); | ||
32 | }; | ||
33 | |||
34 | extern struct psci_operations psci_ops; | ||
35 | |||
36 | int psci_init(void); | 17 | int psci_init(void); |
37 | 18 | ||
38 | #endif /* __ASM_PSCI_H */ | 19 | #endif /* __ASM_PSCI_H */ |
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 0dacbbf9458b..0e7fa4963735 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h | |||
@@ -42,6 +42,7 @@ | |||
42 | #define COMPAT_PSR_MODE_UND 0x0000001b | 42 | #define COMPAT_PSR_MODE_UND 0x0000001b |
43 | #define COMPAT_PSR_MODE_SYS 0x0000001f | 43 | #define COMPAT_PSR_MODE_SYS 0x0000001f |
44 | #define COMPAT_PSR_T_BIT 0x00000020 | 44 | #define COMPAT_PSR_T_BIT 0x00000020 |
45 | #define COMPAT_PSR_E_BIT 0x00000200 | ||
45 | #define COMPAT_PSR_F_BIT 0x00000040 | 46 | #define COMPAT_PSR_F_BIT 0x00000040 |
46 | #define COMPAT_PSR_I_BIT 0x00000080 | 47 | #define COMPAT_PSR_I_BIT 0x00000080 |
47 | #define COMPAT_PSR_A_BIT 0x00000100 | 48 | #define COMPAT_PSR_A_BIT 0x00000100 |
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index 4b8023c5d146..a498f2cd2c2a 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h | |||
@@ -60,21 +60,14 @@ struct secondary_data { | |||
60 | void *stack; | 60 | void *stack; |
61 | }; | 61 | }; |
62 | extern struct secondary_data secondary_data; | 62 | extern struct secondary_data secondary_data; |
63 | extern void secondary_holding_pen(void); | 63 | extern void secondary_entry(void); |
64 | extern volatile unsigned long secondary_holding_pen_release; | ||
65 | 64 | ||
66 | extern void arch_send_call_function_single_ipi(int cpu); | 65 | extern void arch_send_call_function_single_ipi(int cpu); |
67 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); | 66 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); |
68 | 67 | ||
69 | struct device_node; | 68 | extern int __cpu_disable(void); |
70 | 69 | ||
71 | struct smp_enable_ops { | 70 | extern void __cpu_die(unsigned int cpu); |
72 | const char *name; | 71 | extern void cpu_die(void); |
73 | int (*init_cpu)(struct device_node *, int); | ||
74 | int (*prepare_cpu)(int); | ||
75 | }; | ||
76 | |||
77 | extern const struct smp_enable_ops smp_spin_table_ops; | ||
78 | extern const struct smp_enable_ops smp_psci_ops; | ||
79 | 72 | ||
80 | #endif /* ifndef __ASM_SMP_H */ | 73 | #endif /* ifndef __ASM_SMP_H */ |
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 0defa0728a9b..3d5cf064d7a1 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h | |||
@@ -22,17 +22,10 @@ | |||
22 | /* | 22 | /* |
23 | * Spinlock implementation. | 23 | * Spinlock implementation. |
24 | * | 24 | * |
25 | * The old value is read exclusively and the new one, if unlocked, is written | ||
26 | * exclusively. In case of failure, the loop is restarted. | ||
27 | * | ||
28 | * The memory barriers are implicit with the load-acquire and store-release | 25 | * The memory barriers are implicit with the load-acquire and store-release |
29 | * instructions. | 26 | * instructions. |
30 | * | ||
31 | * Unlocked value: 0 | ||
32 | * Locked value: 1 | ||
33 | */ | 27 | */ |
34 | 28 | ||
35 | #define arch_spin_is_locked(x) ((x)->lock != 0) | ||
36 | #define arch_spin_unlock_wait(lock) \ | 29 | #define arch_spin_unlock_wait(lock) \ |
37 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) | 30 | do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) |
38 | 31 | ||
@@ -41,32 +34,51 @@ | |||
41 | static inline void arch_spin_lock(arch_spinlock_t *lock) | 34 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
42 | { | 35 | { |
43 | unsigned int tmp; | 36 | unsigned int tmp; |
37 | arch_spinlock_t lockval, newval; | ||
44 | 38 | ||
45 | asm volatile( | 39 | asm volatile( |
46 | " sevl\n" | 40 | /* Atomically increment the next ticket. */ |
47 | "1: wfe\n" | 41 | " prfm pstl1strm, %3\n" |
48 | "2: ldaxr %w0, %1\n" | 42 | "1: ldaxr %w0, %3\n" |
49 | " cbnz %w0, 1b\n" | 43 | " add %w1, %w0, %w5\n" |
50 | " stxr %w0, %w2, %1\n" | 44 | " stxr %w2, %w1, %3\n" |
51 | " cbnz %w0, 2b\n" | 45 | " cbnz %w2, 1b\n" |
52 | : "=&r" (tmp), "+Q" (lock->lock) | 46 | /* Did we get the lock? */ |
53 | : "r" (1) | 47 | " eor %w1, %w0, %w0, ror #16\n" |
54 | : "cc", "memory"); | 48 | " cbz %w1, 3f\n" |
49 | /* | ||
50 | * No: spin on the owner. Send a local event to avoid missing an | ||
51 | * unlock before the exclusive load. | ||
52 | */ | ||
53 | " sevl\n" | ||
54 | "2: wfe\n" | ||
55 | " ldaxrh %w2, %4\n" | ||
56 | " eor %w1, %w2, %w0, lsr #16\n" | ||
57 | " cbnz %w1, 2b\n" | ||
58 | /* We got the lock. Critical section starts here. */ | ||
59 | "3:" | ||
60 | : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock) | ||
61 | : "Q" (lock->owner), "I" (1 << TICKET_SHIFT) | ||
62 | : "memory"); | ||
55 | } | 63 | } |
56 | 64 | ||
57 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | 65 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
58 | { | 66 | { |
59 | unsigned int tmp; | 67 | unsigned int tmp; |
68 | arch_spinlock_t lockval; | ||
60 | 69 | ||
61 | asm volatile( | 70 | asm volatile( |
62 | "2: ldaxr %w0, %1\n" | 71 | " prfm pstl1strm, %2\n" |
63 | " cbnz %w0, 1f\n" | 72 | "1: ldaxr %w0, %2\n" |
64 | " stxr %w0, %w2, %1\n" | 73 | " eor %w1, %w0, %w0, ror #16\n" |
65 | " cbnz %w0, 2b\n" | 74 | " cbnz %w1, 2f\n" |
66 | "1:\n" | 75 | " add %w0, %w0, %3\n" |
67 | : "=&r" (tmp), "+Q" (lock->lock) | 76 | " stxr %w1, %w0, %2\n" |
68 | : "r" (1) | 77 | " cbnz %w1, 1b\n" |
69 | : "cc", "memory"); | 78 | "2:" |
79 | : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) | ||
80 | : "I" (1 << TICKET_SHIFT) | ||
81 | : "memory"); | ||
70 | 82 | ||
71 | return !tmp; | 83 | return !tmp; |
72 | } | 84 | } |
@@ -74,9 +86,28 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
74 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | 86 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
75 | { | 87 | { |
76 | asm volatile( | 88 | asm volatile( |
77 | " stlr %w1, %0\n" | 89 | " stlrh %w1, %0\n" |
78 | : "=Q" (lock->lock) : "r" (0) : "memory"); | 90 | : "=Q" (lock->owner) |
91 | : "r" (lock->owner + 1) | ||
92 | : "memory"); | ||
93 | } | ||
94 | |||
95 | static inline int arch_spin_value_unlocked(arch_spinlock_t lock) | ||
96 | { | ||
97 | return lock.owner == lock.next; | ||
98 | } | ||
99 | |||
100 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) | ||
101 | { | ||
102 | return !arch_spin_value_unlocked(ACCESS_ONCE(*lock)); | ||
103 | } | ||
104 | |||
105 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) | ||
106 | { | ||
107 | arch_spinlock_t lockval = ACCESS_ONCE(*lock); | ||
108 | return (lockval.next - lockval.owner) > 1; | ||
79 | } | 109 | } |
110 | #define arch_spin_is_contended arch_spin_is_contended | ||
80 | 111 | ||
81 | /* | 112 | /* |
82 | * Write lock implementation. | 113 | * Write lock implementation. |
diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h index 9a494346efed..b8d383665f56 100644 --- a/arch/arm64/include/asm/spinlock_types.h +++ b/arch/arm64/include/asm/spinlock_types.h | |||
@@ -20,14 +20,19 @@ | |||
20 | # error "please don't include this file directly" | 20 | # error "please don't include this file directly" |
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | /* We only require natural alignment for exclusive accesses. */ | 23 | #define TICKET_SHIFT 16 |
24 | #define __lock_aligned | ||
25 | 24 | ||
26 | typedef struct { | 25 | typedef struct { |
27 | volatile unsigned int lock; | 26 | #ifdef __AARCH64EB__ |
28 | } arch_spinlock_t; | 27 | u16 next; |
28 | u16 owner; | ||
29 | #else | ||
30 | u16 owner; | ||
31 | u16 next; | ||
32 | #endif | ||
33 | } __aligned(4) arch_spinlock_t; | ||
29 | 34 | ||
30 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } | 35 | #define __ARCH_SPIN_LOCK_UNLOCKED { 0 , 0 } |
31 | 36 | ||
32 | typedef struct { | 37 | typedef struct { |
33 | volatile unsigned int lock; | 38 | volatile unsigned int lock; |
diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h index 89c047f9a971..70ba9d4ee978 100644 --- a/arch/arm64/include/asm/syscall.h +++ b/arch/arm64/include/asm/syscall.h | |||
@@ -59,6 +59,9 @@ static inline void syscall_get_arguments(struct task_struct *task, | |||
59 | unsigned int i, unsigned int n, | 59 | unsigned int i, unsigned int n, |
60 | unsigned long *args) | 60 | unsigned long *args) |
61 | { | 61 | { |
62 | if (n == 0) | ||
63 | return; | ||
64 | |||
62 | if (i + n > SYSCALL_MAX_ARGS) { | 65 | if (i + n > SYSCALL_MAX_ARGS) { |
63 | unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i; | 66 | unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i; |
64 | unsigned int n_bad = n + i - SYSCALL_MAX_ARGS; | 67 | unsigned int n_bad = n + i - SYSCALL_MAX_ARGS; |
@@ -82,6 +85,9 @@ static inline void syscall_set_arguments(struct task_struct *task, | |||
82 | unsigned int i, unsigned int n, | 85 | unsigned int i, unsigned int n, |
83 | const unsigned long *args) | 86 | const unsigned long *args) |
84 | { | 87 | { |
88 | if (n == 0) | ||
89 | return; | ||
90 | |||
85 | if (i + n > SYSCALL_MAX_ARGS) { | 91 | if (i + n > SYSCALL_MAX_ARGS) { |
86 | pr_warning("%s called with max args %d, handling only %d\n", | 92 | pr_warning("%s called with max args %d, handling only %d\n", |
87 | __func__, i + n, SYSCALL_MAX_ARGS); | 93 | __func__, i + n, SYSCALL_MAX_ARGS); |
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 26e310c54344..130e2be952cf 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h | |||
@@ -18,7 +18,8 @@ | |||
18 | #ifndef __ASM__VIRT_H | 18 | #ifndef __ASM__VIRT_H |
19 | #define __ASM__VIRT_H | 19 | #define __ASM__VIRT_H |
20 | 20 | ||
21 | #define BOOT_CPU_MODE_EL2 (0x0e12b007) | 21 | #define BOOT_CPU_MODE_EL1 (0xe11) |
22 | #define BOOT_CPU_MODE_EL2 (0xe12) | ||
22 | 23 | ||
23 | #ifndef __ASSEMBLY__ | 24 | #ifndef __ASSEMBLY__ |
24 | #include <asm/cacheflush.h> | 25 | #include <asm/cacheflush.h> |
diff --git a/arch/arm64/include/uapi/asm/byteorder.h b/arch/arm64/include/uapi/asm/byteorder.h index 2b92046aafc5..dc19e9537f0d 100644 --- a/arch/arm64/include/uapi/asm/byteorder.h +++ b/arch/arm64/include/uapi/asm/byteorder.h | |||
@@ -16,6 +16,10 @@ | |||
16 | #ifndef __ASM_BYTEORDER_H | 16 | #ifndef __ASM_BYTEORDER_H |
17 | #define __ASM_BYTEORDER_H | 17 | #define __ASM_BYTEORDER_H |
18 | 18 | ||
19 | #ifdef __AARCH64EB__ | ||
20 | #include <linux/byteorder/big_endian.h> | ||
21 | #else | ||
19 | #include <linux/byteorder/little_endian.h> | 22 | #include <linux/byteorder/little_endian.h> |
23 | #endif | ||
20 | 24 | ||
21 | #endif /* __ASM_BYTEORDER_H */ | 25 | #endif /* __ASM_BYTEORDER_H */ |
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index eea497578b87..9b12476e9c85 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h | |||
@@ -21,6 +21,7 @@ | |||
21 | */ | 21 | */ |
22 | #define HWCAP_FP (1 << 0) | 22 | #define HWCAP_FP (1 << 0) |
23 | #define HWCAP_ASIMD (1 << 1) | 23 | #define HWCAP_ASIMD (1 << 1) |
24 | #define HWCAP_EVTSTRM (1 << 2) | ||
24 | 25 | ||
25 | 26 | ||
26 | #endif /* _UAPI__ASM_HWCAP_H */ | 27 | #endif /* _UAPI__ASM_HWCAP_H */ |
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 7b4b564961d4..5ba2fd43a75b 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile | |||
@@ -9,12 +9,12 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) | |||
9 | arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ | 9 | arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ |
10 | entry-fpsimd.o process.o ptrace.o setup.o signal.o \ | 10 | entry-fpsimd.o process.o ptrace.o setup.o signal.o \ |
11 | sys.o stacktrace.o time.o traps.o io.o vdso.o \ | 11 | sys.o stacktrace.o time.o traps.o io.o vdso.o \ |
12 | hyp-stub.o psci.o | 12 | hyp-stub.o psci.o cpu_ops.o |
13 | 13 | ||
14 | arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ | 14 | arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ |
15 | sys_compat.o | 15 | sys_compat.o |
16 | arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o | 16 | arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o |
17 | arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o smp_psci.o | 17 | arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o |
18 | arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o | 18 | arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o |
19 | arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o | 19 | arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o |
20 | arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | 20 | arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o |
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index 41b4f626d554..e7ee770c0697 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c | |||
@@ -39,6 +39,7 @@ EXPORT_SYMBOL(clear_page); | |||
39 | EXPORT_SYMBOL(__copy_from_user); | 39 | EXPORT_SYMBOL(__copy_from_user); |
40 | EXPORT_SYMBOL(__copy_to_user); | 40 | EXPORT_SYMBOL(__copy_to_user); |
41 | EXPORT_SYMBOL(__clear_user); | 41 | EXPORT_SYMBOL(__clear_user); |
42 | EXPORT_SYMBOL(__copy_in_user); | ||
42 | 43 | ||
43 | /* physical memory */ | 44 | /* physical memory */ |
44 | EXPORT_SYMBOL(memstart_addr); | 45 | EXPORT_SYMBOL(memstart_addr); |
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c new file mode 100644 index 000000000000..d62d12fb36c8 --- /dev/null +++ b/arch/arm64/kernel/cpu_ops.c | |||
@@ -0,0 +1,87 @@ | |||
1 | /* | ||
2 | * CPU kernel entry/exit control | ||
3 | * | ||
4 | * Copyright (C) 2013 ARM Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #include <asm/cpu_ops.h> | ||
20 | #include <asm/smp_plat.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/of.h> | ||
23 | #include <linux/string.h> | ||
24 | |||
25 | extern const struct cpu_operations smp_spin_table_ops; | ||
26 | extern const struct cpu_operations cpu_psci_ops; | ||
27 | |||
28 | const struct cpu_operations *cpu_ops[NR_CPUS]; | ||
29 | |||
30 | static const struct cpu_operations *supported_cpu_ops[] __initconst = { | ||
31 | #ifdef CONFIG_SMP | ||
32 | &smp_spin_table_ops, | ||
33 | &cpu_psci_ops, | ||
34 | #endif | ||
35 | NULL, | ||
36 | }; | ||
37 | |||
38 | static const struct cpu_operations * __init cpu_get_ops(const char *name) | ||
39 | { | ||
40 | const struct cpu_operations **ops = supported_cpu_ops; | ||
41 | |||
42 | while (*ops) { | ||
43 | if (!strcmp(name, (*ops)->name)) | ||
44 | return *ops; | ||
45 | |||
46 | ops++; | ||
47 | } | ||
48 | |||
49 | return NULL; | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * Read a cpu's enable method from the device tree and record it in cpu_ops. | ||
54 | */ | ||
55 | int __init cpu_read_ops(struct device_node *dn, int cpu) | ||
56 | { | ||
57 | const char *enable_method = of_get_property(dn, "enable-method", NULL); | ||
58 | if (!enable_method) { | ||
59 | /* | ||
60 | * The boot CPU may not have an enable method (e.g. when | ||
61 | * spin-table is used for secondaries). Don't warn spuriously. | ||
62 | */ | ||
63 | if (cpu != 0) | ||
64 | pr_err("%s: missing enable-method property\n", | ||
65 | dn->full_name); | ||
66 | return -ENOENT; | ||
67 | } | ||
68 | |||
69 | cpu_ops[cpu] = cpu_get_ops(enable_method); | ||
70 | if (!cpu_ops[cpu]) { | ||
71 | pr_warn("%s: unsupported enable-method property: %s\n", | ||
72 | dn->full_name, enable_method); | ||
73 | return -EOPNOTSUPP; | ||
74 | } | ||
75 | |||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | void __init cpu_read_bootcpu_ops(void) | ||
80 | { | ||
81 | struct device_node *dn = of_get_cpu_node(0, NULL); | ||
82 | if (!dn) { | ||
83 | pr_err("Failed to find device node for boot cpu\n"); | ||
84 | return; | ||
85 | } | ||
86 | cpu_read_ops(dn, 0); | ||
87 | } | ||
diff --git a/arch/arm64/kernel/cputable.c b/arch/arm64/kernel/cputable.c index 63cfc4a43f4e..fd3993cb060f 100644 --- a/arch/arm64/kernel/cputable.c +++ b/arch/arm64/kernel/cputable.c | |||
@@ -22,7 +22,7 @@ | |||
22 | 22 | ||
23 | extern unsigned long __cpu_setup(void); | 23 | extern unsigned long __cpu_setup(void); |
24 | 24 | ||
25 | struct cpu_info __initdata cpu_table[] = { | 25 | struct cpu_info cpu_table[] = { |
26 | { | 26 | { |
27 | .cpu_id_val = 0x000f0000, | 27 | .cpu_id_val = 0x000f0000, |
28 | .cpu_id_mask = 0x000f0000, | 28 | .cpu_id_mask = 0x000f0000, |
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index cbfacf7fb438..6a0a9b132d7a 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/uaccess.h> | 27 | #include <linux/uaccess.h> |
28 | 28 | ||
29 | #include <asm/debug-monitors.h> | 29 | #include <asm/debug-monitors.h> |
30 | #include <asm/local.h> | ||
31 | #include <asm/cputype.h> | 30 | #include <asm/cputype.h> |
32 | #include <asm/system_misc.h> | 31 | #include <asm/system_misc.h> |
33 | 32 | ||
@@ -89,8 +88,8 @@ early_param("nodebugmon", early_debug_disable); | |||
89 | * Keep track of debug users on each core. | 88 | * Keep track of debug users on each core. |
90 | * The ref counts are per-cpu so we use a local_t type. | 89 | * The ref counts are per-cpu so we use a local_t type. |
91 | */ | 90 | */ |
92 | static DEFINE_PER_CPU(local_t, mde_ref_count); | 91 | static DEFINE_PER_CPU(int, mde_ref_count); |
93 | static DEFINE_PER_CPU(local_t, kde_ref_count); | 92 | static DEFINE_PER_CPU(int, kde_ref_count); |
94 | 93 | ||
95 | void enable_debug_monitors(enum debug_el el) | 94 | void enable_debug_monitors(enum debug_el el) |
96 | { | 95 | { |
@@ -98,11 +97,11 @@ void enable_debug_monitors(enum debug_el el) | |||
98 | 97 | ||
99 | WARN_ON(preemptible()); | 98 | WARN_ON(preemptible()); |
100 | 99 | ||
101 | if (local_inc_return(&__get_cpu_var(mde_ref_count)) == 1) | 100 | if (this_cpu_inc_return(mde_ref_count) == 1) |
102 | enable = DBG_MDSCR_MDE; | 101 | enable = DBG_MDSCR_MDE; |
103 | 102 | ||
104 | if (el == DBG_ACTIVE_EL1 && | 103 | if (el == DBG_ACTIVE_EL1 && |
105 | local_inc_return(&__get_cpu_var(kde_ref_count)) == 1) | 104 | this_cpu_inc_return(kde_ref_count) == 1) |
106 | enable |= DBG_MDSCR_KDE; | 105 | enable |= DBG_MDSCR_KDE; |
107 | 106 | ||
108 | if (enable && debug_enabled) { | 107 | if (enable && debug_enabled) { |
@@ -118,11 +117,11 @@ void disable_debug_monitors(enum debug_el el) | |||
118 | 117 | ||
119 | WARN_ON(preemptible()); | 118 | WARN_ON(preemptible()); |
120 | 119 | ||
121 | if (local_dec_and_test(&__get_cpu_var(mde_ref_count))) | 120 | if (this_cpu_dec_return(mde_ref_count) == 0) |
122 | disable = ~DBG_MDSCR_MDE; | 121 | disable = ~DBG_MDSCR_MDE; |
123 | 122 | ||
124 | if (el == DBG_ACTIVE_EL1 && | 123 | if (el == DBG_ACTIVE_EL1 && |
125 | local_dec_and_test(&__get_cpu_var(kde_ref_count))) | 124 | this_cpu_dec_return(kde_ref_count) == 0) |
126 | disable &= ~DBG_MDSCR_KDE; | 125 | disable &= ~DBG_MDSCR_KDE; |
127 | 126 | ||
128 | if (disable) { | 127 | if (disable) { |
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 3881fd115ebb..e1166145ca29 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S | |||
@@ -311,14 +311,14 @@ el1_irq: | |||
311 | #endif | 311 | #endif |
312 | #ifdef CONFIG_PREEMPT | 312 | #ifdef CONFIG_PREEMPT |
313 | get_thread_info tsk | 313 | get_thread_info tsk |
314 | ldr x24, [tsk, #TI_PREEMPT] // get preempt count | 314 | ldr w24, [tsk, #TI_PREEMPT] // get preempt count |
315 | add x0, x24, #1 // increment it | 315 | add w0, w24, #1 // increment it |
316 | str x0, [tsk, #TI_PREEMPT] | 316 | str w0, [tsk, #TI_PREEMPT] |
317 | #endif | 317 | #endif |
318 | irq_handler | 318 | irq_handler |
319 | #ifdef CONFIG_PREEMPT | 319 | #ifdef CONFIG_PREEMPT |
320 | str x24, [tsk, #TI_PREEMPT] // restore preempt count | 320 | str w24, [tsk, #TI_PREEMPT] // restore preempt count |
321 | cbnz x24, 1f // preempt count != 0 | 321 | cbnz w24, 1f // preempt count != 0 |
322 | ldr x0, [tsk, #TI_FLAGS] // get flags | 322 | ldr x0, [tsk, #TI_FLAGS] // get flags |
323 | tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? | 323 | tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? |
324 | bl el1_preempt | 324 | bl el1_preempt |
@@ -509,15 +509,15 @@ el0_irq_naked: | |||
509 | #endif | 509 | #endif |
510 | get_thread_info tsk | 510 | get_thread_info tsk |
511 | #ifdef CONFIG_PREEMPT | 511 | #ifdef CONFIG_PREEMPT |
512 | ldr x24, [tsk, #TI_PREEMPT] // get preempt count | 512 | ldr w24, [tsk, #TI_PREEMPT] // get preempt count |
513 | add x23, x24, #1 // increment it | 513 | add w23, w24, #1 // increment it |
514 | str x23, [tsk, #TI_PREEMPT] | 514 | str w23, [tsk, #TI_PREEMPT] |
515 | #endif | 515 | #endif |
516 | irq_handler | 516 | irq_handler |
517 | #ifdef CONFIG_PREEMPT | 517 | #ifdef CONFIG_PREEMPT |
518 | ldr x0, [tsk, #TI_PREEMPT] | 518 | ldr w0, [tsk, #TI_PREEMPT] |
519 | str x24, [tsk, #TI_PREEMPT] | 519 | str w24, [tsk, #TI_PREEMPT] |
520 | cmp x0, x23 | 520 | cmp w0, w23 |
521 | b.eq 1f | 521 | b.eq 1f |
522 | mov x1, #0 | 522 | mov x1, #0 |
523 | str x1, [x1] // BUG | 523 | str x1, [x1] // BUG |
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 7090c126797c..7009387348b7 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
@@ -123,8 +123,9 @@ | |||
123 | 123 | ||
124 | ENTRY(stext) | 124 | ENTRY(stext) |
125 | mov x21, x0 // x21=FDT | 125 | mov x21, x0 // x21=FDT |
126 | bl el2_setup // Drop to EL1, w20=cpu_boot_mode | ||
126 | bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET | 127 | bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET |
127 | bl el2_setup // Drop to EL1 | 128 | bl set_cpu_boot_mode_flag |
128 | mrs x22, midr_el1 // x22=cpuid | 129 | mrs x22, midr_el1 // x22=cpuid |
129 | mov x0, x22 | 130 | mov x0, x22 |
130 | bl lookup_processor_type | 131 | bl lookup_processor_type |
@@ -150,21 +151,30 @@ ENDPROC(stext) | |||
150 | /* | 151 | /* |
151 | * If we're fortunate enough to boot at EL2, ensure that the world is | 152 | * If we're fortunate enough to boot at EL2, ensure that the world is |
152 | * sane before dropping to EL1. | 153 | * sane before dropping to EL1. |
154 | * | ||
155 | * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x20 if | ||
156 | * booted in EL1 or EL2 respectively. | ||
153 | */ | 157 | */ |
154 | ENTRY(el2_setup) | 158 | ENTRY(el2_setup) |
155 | mrs x0, CurrentEL | 159 | mrs x0, CurrentEL |
156 | cmp x0, #PSR_MODE_EL2t | 160 | cmp x0, #PSR_MODE_EL2t |
157 | ccmp x0, #PSR_MODE_EL2h, #0x4, ne | 161 | ccmp x0, #PSR_MODE_EL2h, #0x4, ne |
158 | ldr x0, =__boot_cpu_mode // Compute __boot_cpu_mode | 162 | b.ne 1f |
159 | add x0, x0, x28 | 163 | mrs x0, sctlr_el2 |
160 | b.eq 1f | 164 | CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2 |
161 | str wzr, [x0] // Remember we don't have EL2... | 165 | CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2 |
166 | msr sctlr_el2, x0 | ||
167 | b 2f | ||
168 | 1: mrs x0, sctlr_el1 | ||
169 | CPU_BE( orr x0, x0, #(3 << 24) ) // Set the EE and E0E bits for EL1 | ||
170 | CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1 | ||
171 | msr sctlr_el1, x0 | ||
172 | mov w20, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1 | ||
173 | isb | ||
162 | ret | 174 | ret |
163 | 175 | ||
164 | /* Hyp configuration. */ | 176 | /* Hyp configuration. */ |
165 | 1: ldr w1, =BOOT_CPU_MODE_EL2 | 177 | 2: mov x0, #(1 << 31) // 64-bit EL1 |
166 | str w1, [x0, #4] // This CPU has EL2 | ||
167 | mov x0, #(1 << 31) // 64-bit EL1 | ||
168 | msr hcr_el2, x0 | 178 | msr hcr_el2, x0 |
169 | 179 | ||
170 | /* Generic timers. */ | 180 | /* Generic timers. */ |
@@ -181,7 +191,8 @@ ENTRY(el2_setup) | |||
181 | 191 | ||
182 | /* sctlr_el1 */ | 192 | /* sctlr_el1 */ |
183 | mov x0, #0x0800 // Set/clear RES{1,0} bits | 193 | mov x0, #0x0800 // Set/clear RES{1,0} bits |
184 | movk x0, #0x30d0, lsl #16 | 194 | CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems |
195 | CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems | ||
185 | msr sctlr_el1, x0 | 196 | msr sctlr_el1, x0 |
186 | 197 | ||
187 | /* Coprocessor traps. */ | 198 | /* Coprocessor traps. */ |
@@ -204,10 +215,25 @@ ENTRY(el2_setup) | |||
204 | PSR_MODE_EL1h) | 215 | PSR_MODE_EL1h) |
205 | msr spsr_el2, x0 | 216 | msr spsr_el2, x0 |
206 | msr elr_el2, lr | 217 | msr elr_el2, lr |
218 | mov w20, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 | ||
207 | eret | 219 | eret |
208 | ENDPROC(el2_setup) | 220 | ENDPROC(el2_setup) |
209 | 221 | ||
210 | /* | 222 | /* |
223 | * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed | ||
224 | * in x20. See arch/arm64/include/asm/virt.h for more info. | ||
225 | */ | ||
226 | ENTRY(set_cpu_boot_mode_flag) | ||
227 | ldr x1, =__boot_cpu_mode // Compute __boot_cpu_mode | ||
228 | add x1, x1, x28 | ||
229 | cmp w20, #BOOT_CPU_MODE_EL2 | ||
230 | b.ne 1f | ||
231 | add x1, x1, #4 | ||
232 | 1: str w20, [x1] // This CPU has booted in EL1 | ||
233 | ret | ||
234 | ENDPROC(set_cpu_boot_mode_flag) | ||
235 | |||
236 | /* | ||
211 | * We need to find out the CPU boot mode long after boot, so we need to | 237 | * We need to find out the CPU boot mode long after boot, so we need to |
212 | * store it in a writable variable. | 238 | * store it in a writable variable. |
213 | * | 239 | * |
@@ -225,7 +251,6 @@ ENTRY(__boot_cpu_mode) | |||
225 | .quad PAGE_OFFSET | 251 | .quad PAGE_OFFSET |
226 | 252 | ||
227 | #ifdef CONFIG_SMP | 253 | #ifdef CONFIG_SMP |
228 | .pushsection .smp.pen.text, "ax" | ||
229 | .align 3 | 254 | .align 3 |
230 | 1: .quad . | 255 | 1: .quad . |
231 | .quad secondary_holding_pen_release | 256 | .quad secondary_holding_pen_release |
@@ -235,8 +260,9 @@ ENTRY(__boot_cpu_mode) | |||
235 | * cores are held until we're ready for them to initialise. | 260 | * cores are held until we're ready for them to initialise. |
236 | */ | 261 | */ |
237 | ENTRY(secondary_holding_pen) | 262 | ENTRY(secondary_holding_pen) |
238 | bl __calc_phys_offset // x24=phys offset | 263 | bl el2_setup // Drop to EL1, w20=cpu_boot_mode |
239 | bl el2_setup // Drop to EL1 | 264 | bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET |
265 | bl set_cpu_boot_mode_flag | ||
240 | mrs x0, mpidr_el1 | 266 | mrs x0, mpidr_el1 |
241 | ldr x1, =MPIDR_HWID_BITMASK | 267 | ldr x1, =MPIDR_HWID_BITMASK |
242 | and x0, x0, x1 | 268 | and x0, x0, x1 |
@@ -250,7 +276,16 @@ pen: ldr x4, [x3] | |||
250 | wfe | 276 | wfe |
251 | b pen | 277 | b pen |
252 | ENDPROC(secondary_holding_pen) | 278 | ENDPROC(secondary_holding_pen) |
253 | .popsection | 279 | |
280 | /* | ||
281 | * Secondary entry point that jumps straight into the kernel. Only to | ||
282 | * be used where CPUs are brought online dynamically by the kernel. | ||
283 | */ | ||
284 | ENTRY(secondary_entry) | ||
285 | bl __calc_phys_offset // x2=phys offset | ||
286 | bl el2_setup // Drop to EL1 | ||
287 | b secondary_startup | ||
288 | ENDPROC(secondary_entry) | ||
254 | 289 | ||
255 | ENTRY(secondary_startup) | 290 | ENTRY(secondary_startup) |
256 | /* | 291 | /* |
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index 329218ca9ffb..ff516f6691e4 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c | |||
@@ -184,14 +184,14 @@ int arch_install_hw_breakpoint(struct perf_event *bp) | |||
184 | /* Breakpoint */ | 184 | /* Breakpoint */ |
185 | ctrl_reg = AARCH64_DBG_REG_BCR; | 185 | ctrl_reg = AARCH64_DBG_REG_BCR; |
186 | val_reg = AARCH64_DBG_REG_BVR; | 186 | val_reg = AARCH64_DBG_REG_BVR; |
187 | slots = __get_cpu_var(bp_on_reg); | 187 | slots = this_cpu_ptr(bp_on_reg); |
188 | max_slots = core_num_brps; | 188 | max_slots = core_num_brps; |
189 | reg_enable = !debug_info->bps_disabled; | 189 | reg_enable = !debug_info->bps_disabled; |
190 | } else { | 190 | } else { |
191 | /* Watchpoint */ | 191 | /* Watchpoint */ |
192 | ctrl_reg = AARCH64_DBG_REG_WCR; | 192 | ctrl_reg = AARCH64_DBG_REG_WCR; |
193 | val_reg = AARCH64_DBG_REG_WVR; | 193 | val_reg = AARCH64_DBG_REG_WVR; |
194 | slots = __get_cpu_var(wp_on_reg); | 194 | slots = this_cpu_ptr(wp_on_reg); |
195 | max_slots = core_num_wrps; | 195 | max_slots = core_num_wrps; |
196 | reg_enable = !debug_info->wps_disabled; | 196 | reg_enable = !debug_info->wps_disabled; |
197 | } | 197 | } |
@@ -230,12 +230,12 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp) | |||
230 | if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { | 230 | if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { |
231 | /* Breakpoint */ | 231 | /* Breakpoint */ |
232 | base = AARCH64_DBG_REG_BCR; | 232 | base = AARCH64_DBG_REG_BCR; |
233 | slots = __get_cpu_var(bp_on_reg); | 233 | slots = this_cpu_ptr(bp_on_reg); |
234 | max_slots = core_num_brps; | 234 | max_slots = core_num_brps; |
235 | } else { | 235 | } else { |
236 | /* Watchpoint */ | 236 | /* Watchpoint */ |
237 | base = AARCH64_DBG_REG_WCR; | 237 | base = AARCH64_DBG_REG_WCR; |
238 | slots = __get_cpu_var(wp_on_reg); | 238 | slots = this_cpu_ptr(wp_on_reg); |
239 | max_slots = core_num_wrps; | 239 | max_slots = core_num_wrps; |
240 | } | 240 | } |
241 | 241 | ||
@@ -505,11 +505,11 @@ static void toggle_bp_registers(int reg, enum debug_el el, int enable) | |||
505 | 505 | ||
506 | switch (reg) { | 506 | switch (reg) { |
507 | case AARCH64_DBG_REG_BCR: | 507 | case AARCH64_DBG_REG_BCR: |
508 | slots = __get_cpu_var(bp_on_reg); | 508 | slots = this_cpu_ptr(bp_on_reg); |
509 | max_slots = core_num_brps; | 509 | max_slots = core_num_brps; |
510 | break; | 510 | break; |
511 | case AARCH64_DBG_REG_WCR: | 511 | case AARCH64_DBG_REG_WCR: |
512 | slots = __get_cpu_var(wp_on_reg); | 512 | slots = this_cpu_ptr(wp_on_reg); |
513 | max_slots = core_num_wrps; | 513 | max_slots = core_num_wrps; |
514 | break; | 514 | break; |
515 | default: | 515 | default: |
@@ -546,7 +546,7 @@ static int breakpoint_handler(unsigned long unused, unsigned int esr, | |||
546 | struct debug_info *debug_info; | 546 | struct debug_info *debug_info; |
547 | struct arch_hw_breakpoint_ctrl ctrl; | 547 | struct arch_hw_breakpoint_ctrl ctrl; |
548 | 548 | ||
549 | slots = (struct perf_event **)__get_cpu_var(bp_on_reg); | 549 | slots = this_cpu_ptr(bp_on_reg); |
550 | addr = instruction_pointer(regs); | 550 | addr = instruction_pointer(regs); |
551 | debug_info = ¤t->thread.debug; | 551 | debug_info = ¤t->thread.debug; |
552 | 552 | ||
@@ -596,7 +596,7 @@ unlock: | |||
596 | user_enable_single_step(current); | 596 | user_enable_single_step(current); |
597 | } else { | 597 | } else { |
598 | toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0); | 598 | toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0); |
599 | kernel_step = &__get_cpu_var(stepping_kernel_bp); | 599 | kernel_step = this_cpu_ptr(&stepping_kernel_bp); |
600 | 600 | ||
601 | if (*kernel_step != ARM_KERNEL_STEP_NONE) | 601 | if (*kernel_step != ARM_KERNEL_STEP_NONE) |
602 | return 0; | 602 | return 0; |
@@ -623,7 +623,7 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr, | |||
623 | struct arch_hw_breakpoint *info; | 623 | struct arch_hw_breakpoint *info; |
624 | struct arch_hw_breakpoint_ctrl ctrl; | 624 | struct arch_hw_breakpoint_ctrl ctrl; |
625 | 625 | ||
626 | slots = (struct perf_event **)__get_cpu_var(wp_on_reg); | 626 | slots = this_cpu_ptr(wp_on_reg); |
627 | debug_info = ¤t->thread.debug; | 627 | debug_info = ¤t->thread.debug; |
628 | 628 | ||
629 | for (i = 0; i < core_num_wrps; ++i) { | 629 | for (i = 0; i < core_num_wrps; ++i) { |
@@ -698,7 +698,7 @@ unlock: | |||
698 | user_enable_single_step(current); | 698 | user_enable_single_step(current); |
699 | } else { | 699 | } else { |
700 | toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0); | 700 | toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0); |
701 | kernel_step = &__get_cpu_var(stepping_kernel_bp); | 701 | kernel_step = this_cpu_ptr(&stepping_kernel_bp); |
702 | 702 | ||
703 | if (*kernel_step != ARM_KERNEL_STEP_NONE) | 703 | if (*kernel_step != ARM_KERNEL_STEP_NONE) |
704 | return 0; | 704 | return 0; |
@@ -722,7 +722,7 @@ int reinstall_suspended_bps(struct pt_regs *regs) | |||
722 | struct debug_info *debug_info = ¤t->thread.debug; | 722 | struct debug_info *debug_info = ¤t->thread.debug; |
723 | int handled_exception = 0, *kernel_step; | 723 | int handled_exception = 0, *kernel_step; |
724 | 724 | ||
725 | kernel_step = &__get_cpu_var(stepping_kernel_bp); | 725 | kernel_step = this_cpu_ptr(&stepping_kernel_bp); |
726 | 726 | ||
727 | /* | 727 | /* |
728 | * Called from single-step exception handler. | 728 | * Called from single-step exception handler. |
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index ecb3354292ed..473e5dbf8f39 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c | |||
@@ -81,3 +81,64 @@ void __init init_IRQ(void) | |||
81 | if (!handle_arch_irq) | 81 | if (!handle_arch_irq) |
82 | panic("No interrupt controller found."); | 82 | panic("No interrupt controller found."); |
83 | } | 83 | } |
84 | |||
85 | #ifdef CONFIG_HOTPLUG_CPU | ||
86 | static bool migrate_one_irq(struct irq_desc *desc) | ||
87 | { | ||
88 | struct irq_data *d = irq_desc_get_irq_data(desc); | ||
89 | const struct cpumask *affinity = d->affinity; | ||
90 | struct irq_chip *c; | ||
91 | bool ret = false; | ||
92 | |||
93 | /* | ||
94 | * If this is a per-CPU interrupt, or the affinity does not | ||
95 | * include this CPU, then we have nothing to do. | ||
96 | */ | ||
97 | if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) | ||
98 | return false; | ||
99 | |||
100 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { | ||
101 | affinity = cpu_online_mask; | ||
102 | ret = true; | ||
103 | } | ||
104 | |||
105 | c = irq_data_get_irq_chip(d); | ||
106 | if (!c->irq_set_affinity) | ||
107 | pr_debug("IRQ%u: unable to set affinity\n", d->irq); | ||
108 | else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) | ||
109 | cpumask_copy(d->affinity, affinity); | ||
110 | |||
111 | return ret; | ||
112 | } | ||
113 | |||
114 | /* | ||
115 | * The current CPU has been marked offline. Migrate IRQs off this CPU. | ||
116 | * If the affinity settings do not allow other CPUs, force them onto any | ||
117 | * available CPU. | ||
118 | * | ||
119 | * Note: we must iterate over all IRQs, whether they have an attached | ||
120 | * action structure or not, as we need to get chained interrupts too. | ||
121 | */ | ||
122 | void migrate_irqs(void) | ||
123 | { | ||
124 | unsigned int i; | ||
125 | struct irq_desc *desc; | ||
126 | unsigned long flags; | ||
127 | |||
128 | local_irq_save(flags); | ||
129 | |||
130 | for_each_irq_desc(i, desc) { | ||
131 | bool affinity_broken; | ||
132 | |||
133 | raw_spin_lock(&desc->lock); | ||
134 | affinity_broken = migrate_one_irq(desc); | ||
135 | raw_spin_unlock(&desc->lock); | ||
136 | |||
137 | if (affinity_broken) | ||
138 | pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n", | ||
139 | i, smp_processor_id()); | ||
140 | } | ||
141 | |||
142 | local_irq_restore(flags); | ||
143 | } | ||
144 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S index 8b69ecb1d8bc..63c48ffdf230 100644 --- a/arch/arm64/kernel/kuser32.S +++ b/arch/arm64/kernel/kuser32.S | |||
@@ -27,6 +27,9 @@ | |||
27 | * | 27 | * |
28 | * See Documentation/arm/kernel_user_helpers.txt for formal definitions. | 28 | * See Documentation/arm/kernel_user_helpers.txt for formal definitions. |
29 | */ | 29 | */ |
30 | |||
31 | #include <asm/unistd32.h> | ||
32 | |||
30 | .align 5 | 33 | .align 5 |
31 | .globl __kuser_helper_start | 34 | .globl __kuser_helper_start |
32 | __kuser_helper_start: | 35 | __kuser_helper_start: |
@@ -35,33 +38,30 @@ __kuser_cmpxchg64: // 0xffff0f60 | |||
35 | .inst 0xe92d00f0 // push {r4, r5, r6, r7} | 38 | .inst 0xe92d00f0 // push {r4, r5, r6, r7} |
36 | .inst 0xe1c040d0 // ldrd r4, r5, [r0] | 39 | .inst 0xe1c040d0 // ldrd r4, r5, [r0] |
37 | .inst 0xe1c160d0 // ldrd r6, r7, [r1] | 40 | .inst 0xe1c160d0 // ldrd r6, r7, [r1] |
38 | .inst 0xf57ff05f // dmb sy | 41 | .inst 0xe1b20e9f // 1: ldaexd r0, r1, [r2] |
39 | .inst 0xe1b20f9f // 1: ldrexd r0, r1, [r2] | ||
40 | .inst 0xe0303004 // eors r3, r0, r4 | 42 | .inst 0xe0303004 // eors r3, r0, r4 |
41 | .inst 0x00313005 // eoreqs r3, r1, r5 | 43 | .inst 0x00313005 // eoreqs r3, r1, r5 |
42 | .inst 0x01a23f96 // strexdeq r3, r6, [r2] | 44 | .inst 0x01a23e96 // stlexdeq r3, r6, [r2] |
43 | .inst 0x03330001 // teqeq r3, #1 | 45 | .inst 0x03330001 // teqeq r3, #1 |
44 | .inst 0x0afffff9 // beq 1b | 46 | .inst 0x0afffff9 // beq 1b |
45 | .inst 0xf57ff05f // dmb sy | ||
46 | .inst 0xe2730000 // rsbs r0, r3, #0 | 47 | .inst 0xe2730000 // rsbs r0, r3, #0 |
47 | .inst 0xe8bd00f0 // pop {r4, r5, r6, r7} | 48 | .inst 0xe8bd00f0 // pop {r4, r5, r6, r7} |
48 | .inst 0xe12fff1e // bx lr | 49 | .inst 0xe12fff1e // bx lr |
49 | 50 | ||
50 | .align 5 | 51 | .align 5 |
51 | __kuser_memory_barrier: // 0xffff0fa0 | 52 | __kuser_memory_barrier: // 0xffff0fa0 |
52 | .inst 0xf57ff05f // dmb sy | 53 | .inst 0xf57ff05b // dmb ish |
53 | .inst 0xe12fff1e // bx lr | 54 | .inst 0xe12fff1e // bx lr |
54 | 55 | ||
55 | .align 5 | 56 | .align 5 |
56 | __kuser_cmpxchg: // 0xffff0fc0 | 57 | __kuser_cmpxchg: // 0xffff0fc0 |
57 | .inst 0xf57ff05f // dmb sy | 58 | .inst 0xe1923e9f // 1: ldaex r3, [r2] |
58 | .inst 0xe1923f9f // 1: ldrex r3, [r2] | ||
59 | .inst 0xe0533000 // subs r3, r3, r0 | 59 | .inst 0xe0533000 // subs r3, r3, r0 |
60 | .inst 0x01823f91 // strexeq r3, r1, [r2] | 60 | .inst 0x01823e91 // stlexeq r3, r1, [r2] |
61 | .inst 0x03330001 // teqeq r3, #1 | 61 | .inst 0x03330001 // teqeq r3, #1 |
62 | .inst 0x0afffffa // beq 1b | 62 | .inst 0x0afffffa // beq 1b |
63 | .inst 0xe2730000 // rsbs r0, r3, #0 | 63 | .inst 0xe2730000 // rsbs r0, r3, #0 |
64 | .inst 0xeaffffef // b <__kuser_memory_barrier> | 64 | .inst 0xe12fff1e // bx lr |
65 | 65 | ||
66 | .align 5 | 66 | .align 5 |
67 | __kuser_get_tls: // 0xffff0fe0 | 67 | __kuser_get_tls: // 0xffff0fe0 |
@@ -75,3 +75,42 @@ __kuser_helper_version: // 0xffff0ffc | |||
75 | .word ((__kuser_helper_end - __kuser_helper_start) >> 5) | 75 | .word ((__kuser_helper_end - __kuser_helper_start) >> 5) |
76 | .globl __kuser_helper_end | 76 | .globl __kuser_helper_end |
77 | __kuser_helper_end: | 77 | __kuser_helper_end: |
78 | |||
79 | /* | ||
80 | * AArch32 sigreturn code | ||
81 | * | ||
82 | * For ARM syscalls, the syscall number has to be loaded into r7. | ||
83 | * We do not support an OABI userspace. | ||
84 | * | ||
85 | * For Thumb syscalls, we also pass the syscall number via r7. We therefore | ||
86 | * need two 16-bit instructions. | ||
87 | */ | ||
88 | .globl __aarch32_sigret_code_start | ||
89 | __aarch32_sigret_code_start: | ||
90 | |||
91 | /* | ||
92 | * ARM Code | ||
93 | */ | ||
94 | .byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_sigreturn | ||
95 | .byte __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_sigreturn | ||
96 | |||
97 | /* | ||
98 | * Thumb code | ||
99 | */ | ||
100 | .byte __NR_compat_sigreturn, 0x27 // svc #__NR_compat_sigreturn | ||
101 | .byte __NR_compat_sigreturn, 0xdf // mov r7, #__NR_compat_sigreturn | ||
102 | |||
103 | /* | ||
104 | * ARM code | ||
105 | */ | ||
106 | .byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_rt_sigreturn | ||
107 | .byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_rt_sigreturn | ||
108 | |||
109 | /* | ||
110 | * Thumb code | ||
111 | */ | ||
112 | .byte __NR_compat_rt_sigreturn, 0x27 // svc #__NR_compat_rt_sigreturn | ||
113 | .byte __NR_compat_rt_sigreturn, 0xdf // mov r7, #__NR_compat_rt_sigreturn | ||
114 | |||
115 | .globl __aarch32_sigret_code_end | ||
116 | __aarch32_sigret_code_end: | ||
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index ca0e3d55da99..e2ad0d87721f 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c | |||
@@ -29,7 +29,7 @@ | |||
29 | void *module_alloc(unsigned long size) | 29 | void *module_alloc(unsigned long size) |
30 | { | 30 | { |
31 | return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, | 31 | return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, |
32 | GFP_KERNEL, PAGE_KERNEL_EXEC, -1, | 32 | GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE, |
33 | __builtin_return_address(0)); | 33 | __builtin_return_address(0)); |
34 | } | 34 | } |
35 | 35 | ||
@@ -111,6 +111,9 @@ static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm) | |||
111 | u32 immlo, immhi, lomask, himask, mask; | 111 | u32 immlo, immhi, lomask, himask, mask; |
112 | int shift; | 112 | int shift; |
113 | 113 | ||
114 | /* The instruction stream is always little endian. */ | ||
115 | insn = le32_to_cpu(insn); | ||
116 | |||
114 | switch (type) { | 117 | switch (type) { |
115 | case INSN_IMM_MOVNZ: | 118 | case INSN_IMM_MOVNZ: |
116 | /* | 119 | /* |
@@ -179,7 +182,7 @@ static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm) | |||
179 | insn &= ~(mask << shift); | 182 | insn &= ~(mask << shift); |
180 | insn |= (imm & mask) << shift; | 183 | insn |= (imm & mask) << shift; |
181 | 184 | ||
182 | return insn; | 185 | return cpu_to_le32(insn); |
183 | } | 186 | } |
184 | 187 | ||
185 | static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val, | 188 | static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val, |
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index cea1594ff933..0e63c98d224c 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c | |||
@@ -784,8 +784,8 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
784 | /* | 784 | /* |
785 | * PMXEVTYPER: Event selection reg | 785 | * PMXEVTYPER: Event selection reg |
786 | */ | 786 | */ |
787 | #define ARMV8_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */ | 787 | #define ARMV8_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */ |
788 | #define ARMV8_EVTYPE_EVENT 0xff /* Mask for EVENT bits */ | 788 | #define ARMV8_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */ |
789 | 789 | ||
790 | /* | 790 | /* |
791 | * Event filters for PMUv3 | 791 | * Event filters for PMUv3 |
@@ -1044,7 +1044,7 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) | |||
1044 | */ | 1044 | */ |
1045 | regs = get_irq_regs(); | 1045 | regs = get_irq_regs(); |
1046 | 1046 | ||
1047 | cpuc = &__get_cpu_var(cpu_hw_events); | 1047 | cpuc = this_cpu_ptr(&cpu_hw_events); |
1048 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { | 1048 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
1049 | struct perf_event *event = cpuc->events[idx]; | 1049 | struct perf_event *event = cpuc->events[idx]; |
1050 | struct hw_perf_event *hwc; | 1050 | struct hw_perf_event *hwc; |
@@ -1175,7 +1175,8 @@ static void armv8pmu_reset(void *info) | |||
1175 | static int armv8_pmuv3_map_event(struct perf_event *event) | 1175 | static int armv8_pmuv3_map_event(struct perf_event *event) |
1176 | { | 1176 | { |
1177 | return map_cpu_event(event, &armv8_pmuv3_perf_map, | 1177 | return map_cpu_event(event, &armv8_pmuv3_perf_map, |
1178 | &armv8_pmuv3_perf_cache_map, 0xFF); | 1178 | &armv8_pmuv3_perf_cache_map, |
1179 | ARMV8_EVTYPE_EVENT); | ||
1179 | } | 1180 | } |
1180 | 1181 | ||
1181 | static struct arm_pmu armv8pmu = { | 1182 | static struct arm_pmu armv8pmu = { |
@@ -1257,7 +1258,7 @@ device_initcall(register_pmu_driver); | |||
1257 | 1258 | ||
1258 | static struct pmu_hw_events *armpmu_get_cpu_events(void) | 1259 | static struct pmu_hw_events *armpmu_get_cpu_events(void) |
1259 | { | 1260 | { |
1260 | return &__get_cpu_var(cpu_hw_events); | 1261 | return this_cpu_ptr(&cpu_hw_events); |
1261 | } | 1262 | } |
1262 | 1263 | ||
1263 | static void __init cpu_pmu_init(struct arm_pmu *armpmu) | 1264 | static void __init cpu_pmu_init(struct arm_pmu *armpmu) |
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 7ae8a1f00c3c..de17c89985db 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c | |||
@@ -102,6 +102,13 @@ void arch_cpu_idle(void) | |||
102 | local_irq_enable(); | 102 | local_irq_enable(); |
103 | } | 103 | } |
104 | 104 | ||
105 | #ifdef CONFIG_HOTPLUG_CPU | ||
106 | void arch_cpu_idle_dead(void) | ||
107 | { | ||
108 | cpu_die(); | ||
109 | } | ||
110 | #endif | ||
111 | |||
105 | void machine_shutdown(void) | 112 | void machine_shutdown(void) |
106 | { | 113 | { |
107 | #ifdef CONFIG_SMP | 114 | #ifdef CONFIG_SMP |
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index 14f73c445ff5..4f97db3d7363 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c | |||
@@ -17,12 +17,32 @@ | |||
17 | 17 | ||
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/of.h> | 19 | #include <linux/of.h> |
20 | #include <linux/smp.h> | ||
20 | 21 | ||
21 | #include <asm/compiler.h> | 22 | #include <asm/compiler.h> |
23 | #include <asm/cpu_ops.h> | ||
22 | #include <asm/errno.h> | 24 | #include <asm/errno.h> |
23 | #include <asm/psci.h> | 25 | #include <asm/psci.h> |
26 | #include <asm/smp_plat.h> | ||
24 | 27 | ||
25 | struct psci_operations psci_ops; | 28 | #define PSCI_POWER_STATE_TYPE_STANDBY 0 |
29 | #define PSCI_POWER_STATE_TYPE_POWER_DOWN 1 | ||
30 | |||
31 | struct psci_power_state { | ||
32 | u16 id; | ||
33 | u8 type; | ||
34 | u8 affinity_level; | ||
35 | }; | ||
36 | |||
37 | struct psci_operations { | ||
38 | int (*cpu_suspend)(struct psci_power_state state, | ||
39 | unsigned long entry_point); | ||
40 | int (*cpu_off)(struct psci_power_state state); | ||
41 | int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); | ||
42 | int (*migrate)(unsigned long cpuid); | ||
43 | }; | ||
44 | |||
45 | static struct psci_operations psci_ops; | ||
26 | 46 | ||
27 | static int (*invoke_psci_fn)(u64, u64, u64, u64); | 47 | static int (*invoke_psci_fn)(u64, u64, u64, u64); |
28 | 48 | ||
@@ -209,3 +229,68 @@ out_put_node: | |||
209 | of_node_put(np); | 229 | of_node_put(np); |
210 | return err; | 230 | return err; |
211 | } | 231 | } |
232 | |||
233 | #ifdef CONFIG_SMP | ||
234 | |||
235 | static int __init cpu_psci_cpu_init(struct device_node *dn, unsigned int cpu) | ||
236 | { | ||
237 | return 0; | ||
238 | } | ||
239 | |||
240 | static int __init cpu_psci_cpu_prepare(unsigned int cpu) | ||
241 | { | ||
242 | if (!psci_ops.cpu_on) { | ||
243 | pr_err("no cpu_on method, not booting CPU%d\n", cpu); | ||
244 | return -ENODEV; | ||
245 | } | ||
246 | |||
247 | return 0; | ||
248 | } | ||
249 | |||
250 | static int cpu_psci_cpu_boot(unsigned int cpu) | ||
251 | { | ||
252 | int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry)); | ||
253 | if (err) | ||
254 | pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err); | ||
255 | |||
256 | return err; | ||
257 | } | ||
258 | |||
259 | #ifdef CONFIG_HOTPLUG_CPU | ||
260 | static int cpu_psci_cpu_disable(unsigned int cpu) | ||
261 | { | ||
262 | /* Fail early if we don't have CPU_OFF support */ | ||
263 | if (!psci_ops.cpu_off) | ||
264 | return -EOPNOTSUPP; | ||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | static void cpu_psci_cpu_die(unsigned int cpu) | ||
269 | { | ||
270 | int ret; | ||
271 | /* | ||
272 | * There are no known implementations of PSCI actually using the | ||
273 | * power state field, pass a sensible default for now. | ||
274 | */ | ||
275 | struct psci_power_state state = { | ||
276 | .type = PSCI_POWER_STATE_TYPE_POWER_DOWN, | ||
277 | }; | ||
278 | |||
279 | ret = psci_ops.cpu_off(state); | ||
280 | |||
281 | pr_crit("psci: unable to power off CPU%u (%d)\n", cpu, ret); | ||
282 | } | ||
283 | #endif | ||
284 | |||
285 | const struct cpu_operations cpu_psci_ops = { | ||
286 | .name = "psci", | ||
287 | .cpu_init = cpu_psci_cpu_init, | ||
288 | .cpu_prepare = cpu_psci_cpu_prepare, | ||
289 | .cpu_boot = cpu_psci_cpu_boot, | ||
290 | #ifdef CONFIG_HOTPLUG_CPU | ||
291 | .cpu_disable = cpu_psci_cpu_disable, | ||
292 | .cpu_die = cpu_psci_cpu_die, | ||
293 | #endif | ||
294 | }; | ||
295 | |||
296 | #endif | ||
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 055cfb80e05c..0bc5e4cbc017 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <asm/cputype.h> | 45 | #include <asm/cputype.h> |
46 | #include <asm/elf.h> | 46 | #include <asm/elf.h> |
47 | #include <asm/cputable.h> | 47 | #include <asm/cputable.h> |
48 | #include <asm/cpu_ops.h> | ||
48 | #include <asm/sections.h> | 49 | #include <asm/sections.h> |
49 | #include <asm/setup.h> | 50 | #include <asm/setup.h> |
50 | #include <asm/smp_plat.h> | 51 | #include <asm/smp_plat.h> |
@@ -60,6 +61,16 @@ EXPORT_SYMBOL(processor_id); | |||
60 | unsigned long elf_hwcap __read_mostly; | 61 | unsigned long elf_hwcap __read_mostly; |
61 | EXPORT_SYMBOL_GPL(elf_hwcap); | 62 | EXPORT_SYMBOL_GPL(elf_hwcap); |
62 | 63 | ||
64 | #ifdef CONFIG_COMPAT | ||
65 | #define COMPAT_ELF_HWCAP_DEFAULT \ | ||
66 | (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\ | ||
67 | COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\ | ||
68 | COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\ | ||
69 | COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\ | ||
70 | COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV) | ||
71 | unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT; | ||
72 | #endif | ||
73 | |||
63 | static const char *cpu_name; | 74 | static const char *cpu_name; |
64 | static const char *machine_name; | 75 | static const char *machine_name; |
65 | phys_addr_t __fdt_pointer __initdata; | 76 | phys_addr_t __fdt_pointer __initdata; |
@@ -97,6 +108,11 @@ void __init early_print(const char *str, ...) | |||
97 | printk("%s", buf); | 108 | printk("%s", buf); |
98 | } | 109 | } |
99 | 110 | ||
111 | bool arch_match_cpu_phys_id(int cpu, u64 phys_id) | ||
112 | { | ||
113 | return phys_id == cpu_logical_map(cpu); | ||
114 | } | ||
115 | |||
100 | static void __init setup_processor(void) | 116 | static void __init setup_processor(void) |
101 | { | 117 | { |
102 | struct cpu_info *cpu_info; | 118 | struct cpu_info *cpu_info; |
@@ -118,76 +134,24 @@ static void __init setup_processor(void) | |||
118 | printk("CPU: %s [%08x] revision %d\n", | 134 | printk("CPU: %s [%08x] revision %d\n", |
119 | cpu_name, read_cpuid_id(), read_cpuid_id() & 15); | 135 | cpu_name, read_cpuid_id(), read_cpuid_id() & 15); |
120 | 136 | ||
121 | sprintf(init_utsname()->machine, "aarch64"); | 137 | sprintf(init_utsname()->machine, ELF_PLATFORM); |
122 | elf_hwcap = 0; | 138 | elf_hwcap = 0; |
123 | } | 139 | } |
124 | 140 | ||
125 | static void __init setup_machine_fdt(phys_addr_t dt_phys) | 141 | static void __init setup_machine_fdt(phys_addr_t dt_phys) |
126 | { | 142 | { |
127 | struct boot_param_header *devtree; | 143 | if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) { |
128 | unsigned long dt_root; | ||
129 | |||
130 | /* Check we have a non-NULL DT pointer */ | ||
131 | if (!dt_phys) { | ||
132 | early_print("\n" | ||
133 | "Error: NULL or invalid device tree blob\n" | ||
134 | "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n" | ||
135 | "\nPlease check your bootloader.\n"); | ||
136 | |||
137 | while (true) | ||
138 | cpu_relax(); | ||
139 | |||
140 | } | ||
141 | |||
142 | devtree = phys_to_virt(dt_phys); | ||
143 | |||
144 | /* Check device tree validity */ | ||
145 | if (be32_to_cpu(devtree->magic) != OF_DT_HEADER) { | ||
146 | early_print("\n" | 144 | early_print("\n" |
147 | "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n" | 145 | "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n" |
148 | "Expected 0x%x, found 0x%x\n" | 146 | "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n" |
149 | "\nPlease check your bootloader.\n", | 147 | "\nPlease check your bootloader.\n", |
150 | dt_phys, devtree, OF_DT_HEADER, | 148 | dt_phys, phys_to_virt(dt_phys)); |
151 | be32_to_cpu(devtree->magic)); | ||
152 | 149 | ||
153 | while (true) | 150 | while (true) |
154 | cpu_relax(); | 151 | cpu_relax(); |
155 | } | 152 | } |
156 | 153 | ||
157 | initial_boot_params = devtree; | 154 | machine_name = of_flat_dt_get_machine_name(); |
158 | dt_root = of_get_flat_dt_root(); | ||
159 | |||
160 | machine_name = of_get_flat_dt_prop(dt_root, "model", NULL); | ||
161 | if (!machine_name) | ||
162 | machine_name = of_get_flat_dt_prop(dt_root, "compatible", NULL); | ||
163 | if (!machine_name) | ||
164 | machine_name = "<unknown>"; | ||
165 | pr_info("Machine: %s\n", machine_name); | ||
166 | |||
167 | /* Retrieve various information from the /chosen node */ | ||
168 | of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line); | ||
169 | /* Initialize {size,address}-cells info */ | ||
170 | of_scan_flat_dt(early_init_dt_scan_root, NULL); | ||
171 | /* Setup memory, calling early_init_dt_add_memory_arch */ | ||
172 | of_scan_flat_dt(early_init_dt_scan_memory, NULL); | ||
173 | } | ||
174 | |||
175 | void __init early_init_dt_add_memory_arch(u64 base, u64 size) | ||
176 | { | ||
177 | base &= PAGE_MASK; | ||
178 | size &= PAGE_MASK; | ||
179 | if (base + size < PHYS_OFFSET) { | ||
180 | pr_warning("Ignoring memory block 0x%llx - 0x%llx\n", | ||
181 | base, base + size); | ||
182 | return; | ||
183 | } | ||
184 | if (base < PHYS_OFFSET) { | ||
185 | pr_warning("Ignoring memory range 0x%llx - 0x%llx\n", | ||
186 | base, PHYS_OFFSET); | ||
187 | size -= PHYS_OFFSET - base; | ||
188 | base = PHYS_OFFSET; | ||
189 | } | ||
190 | memblock_add(base, size); | ||
191 | } | 155 | } |
192 | 156 | ||
193 | /* | 157 | /* |
@@ -264,6 +228,7 @@ void __init setup_arch(char **cmdline_p) | |||
264 | psci_init(); | 228 | psci_init(); |
265 | 229 | ||
266 | cpu_logical_map(0) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; | 230 | cpu_logical_map(0) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; |
231 | cpu_read_bootcpu_ops(); | ||
267 | #ifdef CONFIG_SMP | 232 | #ifdef CONFIG_SMP |
268 | smp_init_cpus(); | 233 | smp_init_cpus(); |
269 | #endif | 234 | #endif |
@@ -304,6 +269,7 @@ subsys_initcall(topology_init); | |||
304 | static const char *hwcap_str[] = { | 269 | static const char *hwcap_str[] = { |
305 | "fp", | 270 | "fp", |
306 | "asimd", | 271 | "asimd", |
272 | "evtstrm", | ||
307 | NULL | 273 | NULL |
308 | }; | 274 | }; |
309 | 275 | ||
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index e393174fe859..b3fc9f5ec6d3 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c | |||
@@ -100,34 +100,6 @@ struct compat_rt_sigframe { | |||
100 | 100 | ||
101 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 101 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) |
102 | 102 | ||
103 | /* | ||
104 | * For ARM syscalls, the syscall number has to be loaded into r7. | ||
105 | * We do not support an OABI userspace. | ||
106 | */ | ||
107 | #define MOV_R7_NR_SIGRETURN (0xe3a07000 | __NR_compat_sigreturn) | ||
108 | #define SVC_SYS_SIGRETURN (0xef000000 | __NR_compat_sigreturn) | ||
109 | #define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | __NR_compat_rt_sigreturn) | ||
110 | #define SVC_SYS_RT_SIGRETURN (0xef000000 | __NR_compat_rt_sigreturn) | ||
111 | |||
112 | /* | ||
113 | * For Thumb syscalls, we also pass the syscall number via r7. We therefore | ||
114 | * need two 16-bit instructions. | ||
115 | */ | ||
116 | #define SVC_THUMB_SIGRETURN (((0xdf00 | __NR_compat_sigreturn) << 16) | \ | ||
117 | 0x2700 | __NR_compat_sigreturn) | ||
118 | #define SVC_THUMB_RT_SIGRETURN (((0xdf00 | __NR_compat_rt_sigreturn) << 16) | \ | ||
119 | 0x2700 | __NR_compat_rt_sigreturn) | ||
120 | |||
121 | const compat_ulong_t aarch32_sigret_code[6] = { | ||
122 | /* | ||
123 | * AArch32 sigreturn code. | ||
124 | * We don't construct an OABI SWI - instead we just set the imm24 field | ||
125 | * to the EABI syscall number so that we create a sane disassembly. | ||
126 | */ | ||
127 | MOV_R7_NR_SIGRETURN, SVC_SYS_SIGRETURN, SVC_THUMB_SIGRETURN, | ||
128 | MOV_R7_NR_RT_SIGRETURN, SVC_SYS_RT_SIGRETURN, SVC_THUMB_RT_SIGRETURN, | ||
129 | }; | ||
130 | |||
131 | static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set) | 103 | static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set) |
132 | { | 104 | { |
133 | compat_sigset_t cset; | 105 | compat_sigset_t cset; |
@@ -150,7 +122,7 @@ static inline int get_sigset_t(sigset_t *set, | |||
150 | return 0; | 122 | return 0; |
151 | } | 123 | } |
152 | 124 | ||
153 | int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) | 125 | int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) |
154 | { | 126 | { |
155 | int err; | 127 | int err; |
156 | 128 | ||
@@ -474,12 +446,13 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka, | |||
474 | /* Check if the handler is written for ARM or Thumb */ | 446 | /* Check if the handler is written for ARM or Thumb */ |
475 | thumb = handler & 1; | 447 | thumb = handler & 1; |
476 | 448 | ||
477 | if (thumb) { | 449 | if (thumb) |
478 | spsr |= COMPAT_PSR_T_BIT; | 450 | spsr |= COMPAT_PSR_T_BIT; |
479 | spsr &= ~COMPAT_PSR_IT_MASK; | 451 | else |
480 | } else { | ||
481 | spsr &= ~COMPAT_PSR_T_BIT; | 452 | spsr &= ~COMPAT_PSR_T_BIT; |
482 | } | 453 | |
454 | /* The IT state must be cleared for both ARM and Thumb-2 */ | ||
455 | spsr &= ~COMPAT_PSR_IT_MASK; | ||
483 | 456 | ||
484 | if (ka->sa.sa_flags & SA_RESTORER) { | 457 | if (ka->sa.sa_flags & SA_RESTORER) { |
485 | retcode = ptr_to_compat(ka->sa.sa_restorer); | 458 | retcode = ptr_to_compat(ka->sa.sa_restorer); |
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 78db90dcc910..a5aeefab03c3 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <asm/atomic.h> | 39 | #include <asm/atomic.h> |
40 | #include <asm/cacheflush.h> | 40 | #include <asm/cacheflush.h> |
41 | #include <asm/cputype.h> | 41 | #include <asm/cputype.h> |
42 | #include <asm/cpu_ops.h> | ||
42 | #include <asm/mmu_context.h> | 43 | #include <asm/mmu_context.h> |
43 | #include <asm/pgtable.h> | 44 | #include <asm/pgtable.h> |
44 | #include <asm/pgalloc.h> | 45 | #include <asm/pgalloc.h> |
@@ -54,7 +55,6 @@ | |||
54 | * where to place its SVC stack | 55 | * where to place its SVC stack |
55 | */ | 56 | */ |
56 | struct secondary_data secondary_data; | 57 | struct secondary_data secondary_data; |
57 | volatile unsigned long secondary_holding_pen_release = INVALID_HWID; | ||
58 | 58 | ||
59 | enum ipi_msg_type { | 59 | enum ipi_msg_type { |
60 | IPI_RESCHEDULE, | 60 | IPI_RESCHEDULE, |
@@ -63,61 +63,16 @@ enum ipi_msg_type { | |||
63 | IPI_CPU_STOP, | 63 | IPI_CPU_STOP, |
64 | }; | 64 | }; |
65 | 65 | ||
66 | static DEFINE_RAW_SPINLOCK(boot_lock); | ||
67 | |||
68 | /* | ||
69 | * Write secondary_holding_pen_release in a way that is guaranteed to be | ||
70 | * visible to all observers, irrespective of whether they're taking part | ||
71 | * in coherency or not. This is necessary for the hotplug code to work | ||
72 | * reliably. | ||
73 | */ | ||
74 | static void write_pen_release(u64 val) | ||
75 | { | ||
76 | void *start = (void *)&secondary_holding_pen_release; | ||
77 | unsigned long size = sizeof(secondary_holding_pen_release); | ||
78 | |||
79 | secondary_holding_pen_release = val; | ||
80 | __flush_dcache_area(start, size); | ||
81 | } | ||
82 | |||
83 | /* | 66 | /* |
84 | * Boot a secondary CPU, and assign it the specified idle task. | 67 | * Boot a secondary CPU, and assign it the specified idle task. |
85 | * This also gives us the initial stack to use for this CPU. | 68 | * This also gives us the initial stack to use for this CPU. |
86 | */ | 69 | */ |
87 | static int boot_secondary(unsigned int cpu, struct task_struct *idle) | 70 | static int boot_secondary(unsigned int cpu, struct task_struct *idle) |
88 | { | 71 | { |
89 | unsigned long timeout; | 72 | if (cpu_ops[cpu]->cpu_boot) |
90 | 73 | return cpu_ops[cpu]->cpu_boot(cpu); | |
91 | /* | ||
92 | * Set synchronisation state between this boot processor | ||
93 | * and the secondary one | ||
94 | */ | ||
95 | raw_spin_lock(&boot_lock); | ||
96 | |||
97 | /* | ||
98 | * Update the pen release flag. | ||
99 | */ | ||
100 | write_pen_release(cpu_logical_map(cpu)); | ||
101 | |||
102 | /* | ||
103 | * Send an event, causing the secondaries to read pen_release. | ||
104 | */ | ||
105 | sev(); | ||
106 | |||
107 | timeout = jiffies + (1 * HZ); | ||
108 | while (time_before(jiffies, timeout)) { | ||
109 | if (secondary_holding_pen_release == INVALID_HWID) | ||
110 | break; | ||
111 | udelay(10); | ||
112 | } | ||
113 | |||
114 | /* | ||
115 | * Now the secondary core is starting up let it run its | ||
116 | * calibrations, then wait for it to finish | ||
117 | */ | ||
118 | raw_spin_unlock(&boot_lock); | ||
119 | 74 | ||
120 | return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0; | 75 | return -EOPNOTSUPP; |
121 | } | 76 | } |
122 | 77 | ||
123 | static DECLARE_COMPLETION(cpu_running); | 78 | static DECLARE_COMPLETION(cpu_running); |
@@ -187,17 +142,13 @@ asmlinkage void secondary_start_kernel(void) | |||
187 | preempt_disable(); | 142 | preempt_disable(); |
188 | trace_hardirqs_off(); | 143 | trace_hardirqs_off(); |
189 | 144 | ||
190 | /* | 145 | if (cpu_ops[cpu]->cpu_postboot) |
191 | * Let the primary processor know we're out of the | 146 | cpu_ops[cpu]->cpu_postboot(); |
192 | * pen, then head off into the C entry point | ||
193 | */ | ||
194 | write_pen_release(INVALID_HWID); | ||
195 | 147 | ||
196 | /* | 148 | /* |
197 | * Synchronise with the boot thread. | 149 | * Enable GIC and timers. |
198 | */ | 150 | */ |
199 | raw_spin_lock(&boot_lock); | 151 | notify_cpu_starting(cpu); |
200 | raw_spin_unlock(&boot_lock); | ||
201 | 152 | ||
202 | /* | 153 | /* |
203 | * OK, now it's safe to let the boot CPU continue. Wait for | 154 | * OK, now it's safe to let the boot CPU continue. Wait for |
@@ -207,11 +158,6 @@ asmlinkage void secondary_start_kernel(void) | |||
207 | set_cpu_online(cpu, true); | 158 | set_cpu_online(cpu, true); |
208 | complete(&cpu_running); | 159 | complete(&cpu_running); |
209 | 160 | ||
210 | /* | ||
211 | * Enable GIC and timers. | ||
212 | */ | ||
213 | notify_cpu_starting(cpu); | ||
214 | |||
215 | local_irq_enable(); | 161 | local_irq_enable(); |
216 | local_fiq_enable(); | 162 | local_fiq_enable(); |
217 | 163 | ||
@@ -221,39 +167,113 @@ asmlinkage void secondary_start_kernel(void) | |||
221 | cpu_startup_entry(CPUHP_ONLINE); | 167 | cpu_startup_entry(CPUHP_ONLINE); |
222 | } | 168 | } |
223 | 169 | ||
224 | void __init smp_cpus_done(unsigned int max_cpus) | 170 | #ifdef CONFIG_HOTPLUG_CPU |
171 | static int op_cpu_disable(unsigned int cpu) | ||
225 | { | 172 | { |
226 | pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); | 173 | /* |
174 | * If we don't have a cpu_die method, abort before we reach the point | ||
175 | * of no return. CPU0 may not have an cpu_ops, so test for it. | ||
176 | */ | ||
177 | if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die) | ||
178 | return -EOPNOTSUPP; | ||
179 | |||
180 | /* | ||
181 | * We may need to abort a hot unplug for some other mechanism-specific | ||
182 | * reason. | ||
183 | */ | ||
184 | if (cpu_ops[cpu]->cpu_disable) | ||
185 | return cpu_ops[cpu]->cpu_disable(cpu); | ||
186 | |||
187 | return 0; | ||
227 | } | 188 | } |
228 | 189 | ||
229 | void __init smp_prepare_boot_cpu(void) | 190 | /* |
191 | * __cpu_disable runs on the processor to be shutdown. | ||
192 | */ | ||
193 | int __cpu_disable(void) | ||
230 | { | 194 | { |
231 | } | 195 | unsigned int cpu = smp_processor_id(); |
196 | int ret; | ||
232 | 197 | ||
233 | static void (*smp_cross_call)(const struct cpumask *, unsigned int); | 198 | ret = op_cpu_disable(cpu); |
199 | if (ret) | ||
200 | return ret; | ||
234 | 201 | ||
235 | static const struct smp_enable_ops *enable_ops[] __initconst = { | 202 | /* |
236 | &smp_spin_table_ops, | 203 | * Take this CPU offline. Once we clear this, we can't return, |
237 | &smp_psci_ops, | 204 | * and we must not schedule until we're ready to give up the cpu. |
238 | NULL, | 205 | */ |
239 | }; | 206 | set_cpu_online(cpu, false); |
207 | |||
208 | /* | ||
209 | * OK - migrate IRQs away from this CPU | ||
210 | */ | ||
211 | migrate_irqs(); | ||
240 | 212 | ||
241 | static const struct smp_enable_ops *smp_enable_ops[NR_CPUS]; | 213 | /* |
214 | * Remove this CPU from the vm mask set of all processes. | ||
215 | */ | ||
216 | clear_tasks_mm_cpumask(cpu); | ||
242 | 217 | ||
243 | static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name) | 218 | return 0; |
244 | { | 219 | } |
245 | const struct smp_enable_ops **ops = enable_ops; | ||
246 | 220 | ||
247 | while (*ops) { | 221 | static DECLARE_COMPLETION(cpu_died); |
248 | if (!strcmp(name, (*ops)->name)) | ||
249 | return *ops; | ||
250 | 222 | ||
251 | ops++; | 223 | /* |
224 | * called on the thread which is asking for a CPU to be shutdown - | ||
225 | * waits until shutdown has completed, or it is timed out. | ||
226 | */ | ||
227 | void __cpu_die(unsigned int cpu) | ||
228 | { | ||
229 | if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { | ||
230 | pr_crit("CPU%u: cpu didn't die\n", cpu); | ||
231 | return; | ||
252 | } | 232 | } |
233 | pr_notice("CPU%u: shutdown\n", cpu); | ||
234 | } | ||
235 | |||
236 | /* | ||
237 | * Called from the idle thread for the CPU which has been shutdown. | ||
238 | * | ||
239 | * Note that we disable IRQs here, but do not re-enable them | ||
240 | * before returning to the caller. This is also the behaviour | ||
241 | * of the other hotplug-cpu capable cores, so presumably coming | ||
242 | * out of idle fixes this. | ||
243 | */ | ||
244 | void cpu_die(void) | ||
245 | { | ||
246 | unsigned int cpu = smp_processor_id(); | ||
247 | |||
248 | idle_task_exit(); | ||
253 | 249 | ||
254 | return NULL; | 250 | local_irq_disable(); |
251 | |||
252 | /* Tell __cpu_die() that this CPU is now safe to dispose of */ | ||
253 | complete(&cpu_died); | ||
254 | |||
255 | /* | ||
256 | * Actually shutdown the CPU. This must never fail. The specific hotplug | ||
257 | * mechanism must perform all required cache maintenance to ensure that | ||
258 | * no dirty lines are lost in the process of shutting down the CPU. | ||
259 | */ | ||
260 | cpu_ops[cpu]->cpu_die(cpu); | ||
261 | |||
262 | BUG(); | ||
263 | } | ||
264 | #endif | ||
265 | |||
266 | void __init smp_cpus_done(unsigned int max_cpus) | ||
267 | { | ||
268 | pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); | ||
255 | } | 269 | } |
256 | 270 | ||
271 | void __init smp_prepare_boot_cpu(void) | ||
272 | { | ||
273 | } | ||
274 | |||
275 | static void (*smp_cross_call)(const struct cpumask *, unsigned int); | ||
276 | |||
257 | /* | 277 | /* |
258 | * Enumerate the possible CPU set from the device tree and build the | 278 | * Enumerate the possible CPU set from the device tree and build the |
259 | * cpu logical map array containing MPIDR values related to logical | 279 | * cpu logical map array containing MPIDR values related to logical |
@@ -261,9 +281,8 @@ static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name) | |||
261 | */ | 281 | */ |
262 | void __init smp_init_cpus(void) | 282 | void __init smp_init_cpus(void) |
263 | { | 283 | { |
264 | const char *enable_method; | ||
265 | struct device_node *dn = NULL; | 284 | struct device_node *dn = NULL; |
266 | int i, cpu = 1; | 285 | unsigned int i, cpu = 1; |
267 | bool bootcpu_valid = false; | 286 | bool bootcpu_valid = false; |
268 | 287 | ||
269 | while ((dn = of_find_node_by_type(dn, "cpu"))) { | 288 | while ((dn = of_find_node_by_type(dn, "cpu"))) { |
@@ -332,25 +351,10 @@ void __init smp_init_cpus(void) | |||
332 | if (cpu >= NR_CPUS) | 351 | if (cpu >= NR_CPUS) |
333 | goto next; | 352 | goto next; |
334 | 353 | ||
335 | /* | 354 | if (cpu_read_ops(dn, cpu) != 0) |
336 | * We currently support only the "spin-table" enable-method. | ||
337 | */ | ||
338 | enable_method = of_get_property(dn, "enable-method", NULL); | ||
339 | if (!enable_method) { | ||
340 | pr_err("%s: missing enable-method property\n", | ||
341 | dn->full_name); | ||
342 | goto next; | 355 | goto next; |
343 | } | ||
344 | |||
345 | smp_enable_ops[cpu] = smp_get_enable_ops(enable_method); | ||
346 | |||
347 | if (!smp_enable_ops[cpu]) { | ||
348 | pr_err("%s: invalid enable-method property: %s\n", | ||
349 | dn->full_name, enable_method); | ||
350 | goto next; | ||
351 | } | ||
352 | 356 | ||
353 | if (smp_enable_ops[cpu]->init_cpu(dn, cpu)) | 357 | if (cpu_ops[cpu]->cpu_init(dn, cpu)) |
354 | goto next; | 358 | goto next; |
355 | 359 | ||
356 | pr_debug("cpu logical map 0x%llx\n", hwid); | 360 | pr_debug("cpu logical map 0x%llx\n", hwid); |
@@ -380,8 +384,8 @@ next: | |||
380 | 384 | ||
381 | void __init smp_prepare_cpus(unsigned int max_cpus) | 385 | void __init smp_prepare_cpus(unsigned int max_cpus) |
382 | { | 386 | { |
383 | int cpu, err; | 387 | int err; |
384 | unsigned int ncores = num_possible_cpus(); | 388 | unsigned int cpu, ncores = num_possible_cpus(); |
385 | 389 | ||
386 | /* | 390 | /* |
387 | * are we trying to boot more cores than exist? | 391 | * are we trying to boot more cores than exist? |
@@ -408,10 +412,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
408 | if (cpu == smp_processor_id()) | 412 | if (cpu == smp_processor_id()) |
409 | continue; | 413 | continue; |
410 | 414 | ||
411 | if (!smp_enable_ops[cpu]) | 415 | if (!cpu_ops[cpu]) |
412 | continue; | 416 | continue; |
413 | 417 | ||
414 | err = smp_enable_ops[cpu]->prepare_cpu(cpu); | 418 | err = cpu_ops[cpu]->cpu_prepare(cpu); |
415 | if (err) | 419 | if (err) |
416 | continue; | 420 | continue; |
417 | 421 | ||
@@ -451,7 +455,7 @@ void show_ipi_list(struct seq_file *p, int prec) | |||
451 | for (i = 0; i < NR_IPI; i++) { | 455 | for (i = 0; i < NR_IPI; i++) { |
452 | seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE, | 456 | seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE, |
453 | prec >= 4 ? " " : ""); | 457 | prec >= 4 ? " " : ""); |
454 | for_each_present_cpu(cpu) | 458 | for_each_online_cpu(cpu) |
455 | seq_printf(p, "%10u ", | 459 | seq_printf(p, "%10u ", |
456 | __get_irq_stat(cpu, ipi_irqs[i])); | 460 | __get_irq_stat(cpu, ipi_irqs[i])); |
457 | seq_printf(p, " %s\n", ipi_types[i]); | 461 | seq_printf(p, " %s\n", ipi_types[i]); |
diff --git a/arch/arm64/kernel/smp_psci.c b/arch/arm64/kernel/smp_psci.c deleted file mode 100644 index 0c533301be77..000000000000 --- a/arch/arm64/kernel/smp_psci.c +++ /dev/null | |||
@@ -1,53 +0,0 @@ | |||
1 | /* | ||
2 | * PSCI SMP initialisation | ||
3 | * | ||
4 | * Copyright (C) 2013 ARM Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #include <linux/init.h> | ||
20 | #include <linux/of.h> | ||
21 | #include <linux/smp.h> | ||
22 | |||
23 | #include <asm/psci.h> | ||
24 | #include <asm/smp_plat.h> | ||
25 | |||
26 | static int __init smp_psci_init_cpu(struct device_node *dn, int cpu) | ||
27 | { | ||
28 | return 0; | ||
29 | } | ||
30 | |||
31 | static int __init smp_psci_prepare_cpu(int cpu) | ||
32 | { | ||
33 | int err; | ||
34 | |||
35 | if (!psci_ops.cpu_on) { | ||
36 | pr_err("psci: no cpu_on method, not booting CPU%d\n", cpu); | ||
37 | return -ENODEV; | ||
38 | } | ||
39 | |||
40 | err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_holding_pen)); | ||
41 | if (err) { | ||
42 | pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err); | ||
43 | return err; | ||
44 | } | ||
45 | |||
46 | return 0; | ||
47 | } | ||
48 | |||
49 | const struct smp_enable_ops smp_psci_ops __initconst = { | ||
50 | .name = "psci", | ||
51 | .init_cpu = smp_psci_init_cpu, | ||
52 | .prepare_cpu = smp_psci_prepare_cpu, | ||
53 | }; | ||
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c index 7c35fa682f76..44c22805d2e2 100644 --- a/arch/arm64/kernel/smp_spin_table.c +++ b/arch/arm64/kernel/smp_spin_table.c | |||
@@ -16,15 +16,39 @@ | |||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/delay.h> | ||
19 | #include <linux/init.h> | 20 | #include <linux/init.h> |
20 | #include <linux/of.h> | 21 | #include <linux/of.h> |
21 | #include <linux/smp.h> | 22 | #include <linux/smp.h> |
22 | 23 | ||
23 | #include <asm/cacheflush.h> | 24 | #include <asm/cacheflush.h> |
25 | #include <asm/cpu_ops.h> | ||
26 | #include <asm/cputype.h> | ||
27 | #include <asm/smp_plat.h> | ||
28 | |||
29 | extern void secondary_holding_pen(void); | ||
30 | volatile unsigned long secondary_holding_pen_release = INVALID_HWID; | ||
24 | 31 | ||
25 | static phys_addr_t cpu_release_addr[NR_CPUS]; | 32 | static phys_addr_t cpu_release_addr[NR_CPUS]; |
33 | static DEFINE_RAW_SPINLOCK(boot_lock); | ||
34 | |||
35 | /* | ||
36 | * Write secondary_holding_pen_release in a way that is guaranteed to be | ||
37 | * visible to all observers, irrespective of whether they're taking part | ||
38 | * in coherency or not. This is necessary for the hotplug code to work | ||
39 | * reliably. | ||
40 | */ | ||
41 | static void write_pen_release(u64 val) | ||
42 | { | ||
43 | void *start = (void *)&secondary_holding_pen_release; | ||
44 | unsigned long size = sizeof(secondary_holding_pen_release); | ||
26 | 45 | ||
27 | static int __init smp_spin_table_init_cpu(struct device_node *dn, int cpu) | 46 | secondary_holding_pen_release = val; |
47 | __flush_dcache_area(start, size); | ||
48 | } | ||
49 | |||
50 | |||
51 | static int smp_spin_table_cpu_init(struct device_node *dn, unsigned int cpu) | ||
28 | { | 52 | { |
29 | /* | 53 | /* |
30 | * Determine the address from which the CPU is polling. | 54 | * Determine the address from which the CPU is polling. |
@@ -40,7 +64,7 @@ static int __init smp_spin_table_init_cpu(struct device_node *dn, int cpu) | |||
40 | return 0; | 64 | return 0; |
41 | } | 65 | } |
42 | 66 | ||
43 | static int __init smp_spin_table_prepare_cpu(int cpu) | 67 | static int smp_spin_table_cpu_prepare(unsigned int cpu) |
44 | { | 68 | { |
45 | void **release_addr; | 69 | void **release_addr; |
46 | 70 | ||
@@ -48,7 +72,16 @@ static int __init smp_spin_table_prepare_cpu(int cpu) | |||
48 | return -ENODEV; | 72 | return -ENODEV; |
49 | 73 | ||
50 | release_addr = __va(cpu_release_addr[cpu]); | 74 | release_addr = __va(cpu_release_addr[cpu]); |
51 | release_addr[0] = (void *)__pa(secondary_holding_pen); | 75 | |
76 | /* | ||
77 | * We write the release address as LE regardless of the native | ||
78 | * endianess of the kernel. Therefore, any boot-loaders that | ||
79 | * read this address need to convert this address to the | ||
80 | * boot-loader's endianess before jumping. This is mandated by | ||
81 | * the boot protocol. | ||
82 | */ | ||
83 | release_addr[0] = (void *) cpu_to_le64(__pa(secondary_holding_pen)); | ||
84 | |||
52 | __flush_dcache_area(release_addr, sizeof(release_addr[0])); | 85 | __flush_dcache_area(release_addr, sizeof(release_addr[0])); |
53 | 86 | ||
54 | /* | 87 | /* |
@@ -59,8 +92,60 @@ static int __init smp_spin_table_prepare_cpu(int cpu) | |||
59 | return 0; | 92 | return 0; |
60 | } | 93 | } |
61 | 94 | ||
62 | const struct smp_enable_ops smp_spin_table_ops __initconst = { | 95 | static int smp_spin_table_cpu_boot(unsigned int cpu) |
96 | { | ||
97 | unsigned long timeout; | ||
98 | |||
99 | /* | ||
100 | * Set synchronisation state between this boot processor | ||
101 | * and the secondary one | ||
102 | */ | ||
103 | raw_spin_lock(&boot_lock); | ||
104 | |||
105 | /* | ||
106 | * Update the pen release flag. | ||
107 | */ | ||
108 | write_pen_release(cpu_logical_map(cpu)); | ||
109 | |||
110 | /* | ||
111 | * Send an event, causing the secondaries to read pen_release. | ||
112 | */ | ||
113 | sev(); | ||
114 | |||
115 | timeout = jiffies + (1 * HZ); | ||
116 | while (time_before(jiffies, timeout)) { | ||
117 | if (secondary_holding_pen_release == INVALID_HWID) | ||
118 | break; | ||
119 | udelay(10); | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * Now the secondary core is starting up let it run its | ||
124 | * calibrations, then wait for it to finish | ||
125 | */ | ||
126 | raw_spin_unlock(&boot_lock); | ||
127 | |||
128 | return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0; | ||
129 | } | ||
130 | |||
131 | void smp_spin_table_cpu_postboot(void) | ||
132 | { | ||
133 | /* | ||
134 | * Let the primary processor know we're out of the pen. | ||
135 | */ | ||
136 | write_pen_release(INVALID_HWID); | ||
137 | |||
138 | /* | ||
139 | * Synchronise with the boot thread. | ||
140 | */ | ||
141 | raw_spin_lock(&boot_lock); | ||
142 | raw_spin_unlock(&boot_lock); | ||
143 | } | ||
144 | |||
145 | const struct cpu_operations smp_spin_table_ops = { | ||
63 | .name = "spin-table", | 146 | .name = "spin-table", |
64 | .init_cpu = smp_spin_table_init_cpu, | 147 | .cpu_init = smp_spin_table_cpu_init, |
65 | .prepare_cpu = smp_spin_table_prepare_cpu, | 148 | .cpu_prepare = smp_spin_table_cpu_prepare, |
149 | .cpu_boot = smp_spin_table_cpu_boot, | ||
150 | .cpu_postboot = smp_spin_table_cpu_postboot, | ||
66 | }; | 151 | }; |
diff --git a/arch/arm64/kernel/sys32.S b/arch/arm64/kernel/sys32.S index a1b19ed7467c..423a5b3fc2be 100644 --- a/arch/arm64/kernel/sys32.S +++ b/arch/arm64/kernel/sys32.S | |||
@@ -59,48 +59,48 @@ ENDPROC(compat_sys_fstatfs64_wrapper) | |||
59 | * extension. | 59 | * extension. |
60 | */ | 60 | */ |
61 | compat_sys_pread64_wrapper: | 61 | compat_sys_pread64_wrapper: |
62 | orr x3, x4, x5, lsl #32 | 62 | regs_to_64 x3, x4, x5 |
63 | b sys_pread64 | 63 | b sys_pread64 |
64 | ENDPROC(compat_sys_pread64_wrapper) | 64 | ENDPROC(compat_sys_pread64_wrapper) |
65 | 65 | ||
66 | compat_sys_pwrite64_wrapper: | 66 | compat_sys_pwrite64_wrapper: |
67 | orr x3, x4, x5, lsl #32 | 67 | regs_to_64 x3, x4, x5 |
68 | b sys_pwrite64 | 68 | b sys_pwrite64 |
69 | ENDPROC(compat_sys_pwrite64_wrapper) | 69 | ENDPROC(compat_sys_pwrite64_wrapper) |
70 | 70 | ||
71 | compat_sys_truncate64_wrapper: | 71 | compat_sys_truncate64_wrapper: |
72 | orr x1, x2, x3, lsl #32 | 72 | regs_to_64 x1, x2, x3 |
73 | b sys_truncate | 73 | b sys_truncate |
74 | ENDPROC(compat_sys_truncate64_wrapper) | 74 | ENDPROC(compat_sys_truncate64_wrapper) |
75 | 75 | ||
76 | compat_sys_ftruncate64_wrapper: | 76 | compat_sys_ftruncate64_wrapper: |
77 | orr x1, x2, x3, lsl #32 | 77 | regs_to_64 x1, x2, x3 |
78 | b sys_ftruncate | 78 | b sys_ftruncate |
79 | ENDPROC(compat_sys_ftruncate64_wrapper) | 79 | ENDPROC(compat_sys_ftruncate64_wrapper) |
80 | 80 | ||
81 | compat_sys_readahead_wrapper: | 81 | compat_sys_readahead_wrapper: |
82 | orr x1, x2, x3, lsl #32 | 82 | regs_to_64 x1, x2, x3 |
83 | mov w2, w4 | 83 | mov w2, w4 |
84 | b sys_readahead | 84 | b sys_readahead |
85 | ENDPROC(compat_sys_readahead_wrapper) | 85 | ENDPROC(compat_sys_readahead_wrapper) |
86 | 86 | ||
87 | compat_sys_fadvise64_64_wrapper: | 87 | compat_sys_fadvise64_64_wrapper: |
88 | mov w6, w1 | 88 | mov w6, w1 |
89 | orr x1, x2, x3, lsl #32 | 89 | regs_to_64 x1, x2, x3 |
90 | orr x2, x4, x5, lsl #32 | 90 | regs_to_64 x2, x4, x5 |
91 | mov w3, w6 | 91 | mov w3, w6 |
92 | b sys_fadvise64_64 | 92 | b sys_fadvise64_64 |
93 | ENDPROC(compat_sys_fadvise64_64_wrapper) | 93 | ENDPROC(compat_sys_fadvise64_64_wrapper) |
94 | 94 | ||
95 | compat_sys_sync_file_range2_wrapper: | 95 | compat_sys_sync_file_range2_wrapper: |
96 | orr x2, x2, x3, lsl #32 | 96 | regs_to_64 x2, x2, x3 |
97 | orr x3, x4, x5, lsl #32 | 97 | regs_to_64 x3, x4, x5 |
98 | b sys_sync_file_range2 | 98 | b sys_sync_file_range2 |
99 | ENDPROC(compat_sys_sync_file_range2_wrapper) | 99 | ENDPROC(compat_sys_sync_file_range2_wrapper) |
100 | 100 | ||
101 | compat_sys_fallocate_wrapper: | 101 | compat_sys_fallocate_wrapper: |
102 | orr x2, x2, x3, lsl #32 | 102 | regs_to_64 x2, x2, x3 |
103 | orr x3, x4, x5, lsl #32 | 103 | regs_to_64 x3, x4, x5 |
104 | b sys_fallocate | 104 | b sys_fallocate |
105 | ENDPROC(compat_sys_fallocate_wrapper) | 105 | ENDPROC(compat_sys_fallocate_wrapper) |
106 | 106 | ||
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c index 03dc3718eb13..29c39d5d77e3 100644 --- a/arch/arm64/kernel/time.c +++ b/arch/arm64/kernel/time.c | |||
@@ -61,13 +61,6 @@ unsigned long profile_pc(struct pt_regs *regs) | |||
61 | EXPORT_SYMBOL(profile_pc); | 61 | EXPORT_SYMBOL(profile_pc); |
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | static u64 sched_clock_mult __read_mostly; | ||
65 | |||
66 | unsigned long long notrace sched_clock(void) | ||
67 | { | ||
68 | return arch_timer_read_counter() * sched_clock_mult; | ||
69 | } | ||
70 | |||
71 | void __init time_init(void) | 64 | void __init time_init(void) |
72 | { | 65 | { |
73 | u32 arch_timer_rate; | 66 | u32 arch_timer_rate; |
@@ -78,9 +71,6 @@ void __init time_init(void) | |||
78 | if (!arch_timer_rate) | 71 | if (!arch_timer_rate) |
79 | panic("Unable to initialise architected timer.\n"); | 72 | panic("Unable to initialise architected timer.\n"); |
80 | 73 | ||
81 | /* Cache the sched_clock multiplier to save a divide in the hot path. */ | ||
82 | sched_clock_mult = NSEC_PER_SEC / arch_timer_rate; | ||
83 | |||
84 | /* Calibrate the delay loop directly */ | 74 | /* Calibrate the delay loop directly */ |
85 | lpj_fine = arch_timer_rate / HZ; | 75 | lpj_fine = arch_timer_rate / HZ; |
86 | } | 76 | } |
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 6a389dc1bd49..65d40cf6945a 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c | |||
@@ -58,7 +58,10 @@ static struct page *vectors_page[1]; | |||
58 | static int alloc_vectors_page(void) | 58 | static int alloc_vectors_page(void) |
59 | { | 59 | { |
60 | extern char __kuser_helper_start[], __kuser_helper_end[]; | 60 | extern char __kuser_helper_start[], __kuser_helper_end[]; |
61 | extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[]; | ||
62 | |||
61 | int kuser_sz = __kuser_helper_end - __kuser_helper_start; | 63 | int kuser_sz = __kuser_helper_end - __kuser_helper_start; |
64 | int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start; | ||
62 | unsigned long vpage; | 65 | unsigned long vpage; |
63 | 66 | ||
64 | vpage = get_zeroed_page(GFP_ATOMIC); | 67 | vpage = get_zeroed_page(GFP_ATOMIC); |
@@ -72,7 +75,7 @@ static int alloc_vectors_page(void) | |||
72 | 75 | ||
73 | /* sigreturn code */ | 76 | /* sigreturn code */ |
74 | memcpy((void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET, | 77 | memcpy((void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET, |
75 | aarch32_sigret_code, sizeof(aarch32_sigret_code)); | 78 | __aarch32_sigret_code_start, sigret_sz); |
76 | 79 | ||
77 | flush_icache_range(vpage, vpage + PAGE_SIZE); | 80 | flush_icache_range(vpage, vpage + PAGE_SIZE); |
78 | vectors_page[0] = virt_to_page(vpage); | 81 | vectors_page[0] = virt_to_page(vpage); |
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index f8ab9d8e2ea3..5161ad992091 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S | |||
@@ -54,7 +54,6 @@ SECTIONS | |||
54 | } | 54 | } |
55 | .text : { /* Real text segment */ | 55 | .text : { /* Real text segment */ |
56 | _stext = .; /* Text and read-only data */ | 56 | _stext = .; /* Text and read-only data */ |
57 | *(.smp.pen.text) | ||
58 | __exception_text_start = .; | 57 | __exception_text_start = .; |
59 | *(.exception.text) | 58 | *(.exception.text) |
60 | __exception_text_end = .; | 59 | __exception_text_end = .; |
@@ -97,30 +96,13 @@ SECTIONS | |||
97 | PERCPU_SECTION(64) | 96 | PERCPU_SECTION(64) |
98 | 97 | ||
99 | __init_end = .; | 98 | __init_end = .; |
100 | . = ALIGN(THREAD_SIZE); | 99 | |
101 | __data_loc = .; | 100 | . = ALIGN(PAGE_SIZE); |
102 | 101 | _data = .; | |
103 | .data : AT(__data_loc) { | 102 | __data_loc = _data - LOAD_OFFSET; |
104 | _data = .; /* address in memory */ | 103 | _sdata = .; |
105 | _sdata = .; | 104 | RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE) |
106 | 105 | _edata = .; | |
107 | /* | ||
108 | * first, the init task union, aligned | ||
109 | * to an 8192 byte boundary. | ||
110 | */ | ||
111 | INIT_TASK_DATA(THREAD_SIZE) | ||
112 | NOSAVE_DATA | ||
113 | CACHELINE_ALIGNED_DATA(64) | ||
114 | READ_MOSTLY_DATA(64) | ||
115 | |||
116 | /* | ||
117 | * and the usual data section | ||
118 | */ | ||
119 | DATA_DATA | ||
120 | CONSTRUCTORS | ||
121 | |||
122 | _edata = .; | ||
123 | } | ||
124 | _edata_loc = __data_loc + SIZEOF(.data); | 106 | _edata_loc = __data_loc + SIZEOF(.data); |
125 | 107 | ||
126 | BSS_SECTION(0, 0, 0) | 108 | BSS_SECTION(0, 0, 0) |
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index ba84e6705e20..2b0244d65c16 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S | |||
@@ -74,7 +74,10 @@ __do_hyp_init: | |||
74 | msr mair_el2, x4 | 74 | msr mair_el2, x4 |
75 | isb | 75 | isb |
76 | 76 | ||
77 | mov x4, #SCTLR_EL2_FLAGS | 77 | mrs x4, sctlr_el2 |
78 | and x4, x4, #SCTLR_EL2_EE // preserve endianness of EL2 | ||
79 | ldr x5, =SCTLR_EL2_FLAGS | ||
80 | orr x4, x4, x5 | ||
78 | msr sctlr_el2, x4 | 81 | msr sctlr_el2, x4 |
79 | isb | 82 | isb |
80 | 83 | ||
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 1ac0bbbdddb2..3b47c36e10ff 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S | |||
@@ -403,6 +403,14 @@ __kvm_hyp_code_start: | |||
403 | ldr w9, [x2, #GICH_ELRSR0] | 403 | ldr w9, [x2, #GICH_ELRSR0] |
404 | ldr w10, [x2, #GICH_ELRSR1] | 404 | ldr w10, [x2, #GICH_ELRSR1] |
405 | ldr w11, [x2, #GICH_APR] | 405 | ldr w11, [x2, #GICH_APR] |
406 | CPU_BE( rev w4, w4 ) | ||
407 | CPU_BE( rev w5, w5 ) | ||
408 | CPU_BE( rev w6, w6 ) | ||
409 | CPU_BE( rev w7, w7 ) | ||
410 | CPU_BE( rev w8, w8 ) | ||
411 | CPU_BE( rev w9, w9 ) | ||
412 | CPU_BE( rev w10, w10 ) | ||
413 | CPU_BE( rev w11, w11 ) | ||
406 | 414 | ||
407 | str w4, [x3, #VGIC_CPU_HCR] | 415 | str w4, [x3, #VGIC_CPU_HCR] |
408 | str w5, [x3, #VGIC_CPU_VMCR] | 416 | str w5, [x3, #VGIC_CPU_VMCR] |
@@ -421,6 +429,7 @@ __kvm_hyp_code_start: | |||
421 | ldr w4, [x3, #VGIC_CPU_NR_LR] | 429 | ldr w4, [x3, #VGIC_CPU_NR_LR] |
422 | add x3, x3, #VGIC_CPU_LR | 430 | add x3, x3, #VGIC_CPU_LR |
423 | 1: ldr w5, [x2], #4 | 431 | 1: ldr w5, [x2], #4 |
432 | CPU_BE( rev w5, w5 ) | ||
424 | str w5, [x3], #4 | 433 | str w5, [x3], #4 |
425 | sub w4, w4, #1 | 434 | sub w4, w4, #1 |
426 | cbnz w4, 1b | 435 | cbnz w4, 1b |
@@ -446,6 +455,9 @@ __kvm_hyp_code_start: | |||
446 | ldr w4, [x3, #VGIC_CPU_HCR] | 455 | ldr w4, [x3, #VGIC_CPU_HCR] |
447 | ldr w5, [x3, #VGIC_CPU_VMCR] | 456 | ldr w5, [x3, #VGIC_CPU_VMCR] |
448 | ldr w6, [x3, #VGIC_CPU_APR] | 457 | ldr w6, [x3, #VGIC_CPU_APR] |
458 | CPU_BE( rev w4, w4 ) | ||
459 | CPU_BE( rev w5, w5 ) | ||
460 | CPU_BE( rev w6, w6 ) | ||
449 | 461 | ||
450 | str w4, [x2, #GICH_HCR] | 462 | str w4, [x2, #GICH_HCR] |
451 | str w5, [x2, #GICH_VMCR] | 463 | str w5, [x2, #GICH_VMCR] |
@@ -456,6 +468,7 @@ __kvm_hyp_code_start: | |||
456 | ldr w4, [x3, #VGIC_CPU_NR_LR] | 468 | ldr w4, [x3, #VGIC_CPU_NR_LR] |
457 | add x3, x3, #VGIC_CPU_LR | 469 | add x3, x3, #VGIC_CPU_LR |
458 | 1: ldr w5, [x3], #4 | 470 | 1: ldr w5, [x3], #4 |
471 | CPU_BE( rev w5, w5 ) | ||
459 | str w5, [x2], #4 | 472 | str w5, [x2], #4 |
460 | sub w4, w4, #1 | 473 | sub w4, w4, #1 |
461 | cbnz w4, 1b | 474 | cbnz w4, 1b |
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index de2de5db628d..0cb8742de4f2 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <linux/sort.h> | 31 | #include <linux/sort.h> |
32 | #include <linux/of_fdt.h> | 32 | #include <linux/of_fdt.h> |
33 | 33 | ||
34 | #include <asm/prom.h> | ||
35 | #include <asm/sections.h> | 34 | #include <asm/sections.h> |
36 | #include <asm/setup.h> | 35 | #include <asm/setup.h> |
37 | #include <asm/sizes.h> | 36 | #include <asm/sizes.h> |
@@ -39,17 +38,9 @@ | |||
39 | 38 | ||
40 | #include "mm.h" | 39 | #include "mm.h" |
41 | 40 | ||
42 | static unsigned long phys_initrd_start __initdata = 0; | ||
43 | static unsigned long phys_initrd_size __initdata = 0; | ||
44 | |||
45 | phys_addr_t memstart_addr __read_mostly = 0; | 41 | phys_addr_t memstart_addr __read_mostly = 0; |
46 | 42 | ||
47 | void __init early_init_dt_setup_initrd_arch(u64 start, u64 end) | 43 | #ifdef CONFIG_BLK_DEV_INITRD |
48 | { | ||
49 | phys_initrd_start = start; | ||
50 | phys_initrd_size = end - start; | ||
51 | } | ||
52 | |||
53 | static int __init early_initrd(char *p) | 44 | static int __init early_initrd(char *p) |
54 | { | 45 | { |
55 | unsigned long start, size; | 46 | unsigned long start, size; |
@@ -59,12 +50,13 @@ static int __init early_initrd(char *p) | |||
59 | if (*endp == ',') { | 50 | if (*endp == ',') { |
60 | size = memparse(endp + 1, NULL); | 51 | size = memparse(endp + 1, NULL); |
61 | 52 | ||
62 | phys_initrd_start = start; | 53 | initrd_start = (unsigned long)__va(start); |
63 | phys_initrd_size = size; | 54 | initrd_end = (unsigned long)__va(start + size); |
64 | } | 55 | } |
65 | return 0; | 56 | return 0; |
66 | } | 57 | } |
67 | early_param("initrd", early_initrd); | 58 | early_param("initrd", early_initrd); |
59 | #endif | ||
68 | 60 | ||
69 | #define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT) | 61 | #define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT) |
70 | 62 | ||
@@ -137,13 +129,8 @@ void __init arm64_memblock_init(void) | |||
137 | /* Register the kernel text, kernel data and initrd with memblock */ | 129 | /* Register the kernel text, kernel data and initrd with memblock */ |
138 | memblock_reserve(__pa(_text), _end - _text); | 130 | memblock_reserve(__pa(_text), _end - _text); |
139 | #ifdef CONFIG_BLK_DEV_INITRD | 131 | #ifdef CONFIG_BLK_DEV_INITRD |
140 | if (phys_initrd_size) { | 132 | if (initrd_start) |
141 | memblock_reserve(phys_initrd_start, phys_initrd_size); | 133 | memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start); |
142 | |||
143 | /* Now convert initrd to virtual addresses */ | ||
144 | initrd_start = __phys_to_virt(phys_initrd_start); | ||
145 | initrd_end = initrd_start + phys_initrd_size; | ||
146 | } | ||
147 | #endif | 134 | #endif |
148 | 135 | ||
149 | /* | 136 | /* |
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c index 1725cd6db37a..2bb1d586664c 100644 --- a/arch/arm64/mm/ioremap.c +++ b/arch/arm64/mm/ioremap.c | |||
@@ -77,8 +77,24 @@ EXPORT_SYMBOL(__ioremap); | |||
77 | 77 | ||
78 | void __iounmap(volatile void __iomem *io_addr) | 78 | void __iounmap(volatile void __iomem *io_addr) |
79 | { | 79 | { |
80 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); | 80 | unsigned long addr = (unsigned long)io_addr & PAGE_MASK; |
81 | 81 | ||
82 | vunmap(addr); | 82 | /* |
83 | * We could get an address outside vmalloc range in case | ||
84 | * of ioremap_cache() reusing a RAM mapping. | ||
85 | */ | ||
86 | if (VMALLOC_START <= addr && addr < VMALLOC_END) | ||
87 | vunmap((void *)addr); | ||
83 | } | 88 | } |
84 | EXPORT_SYMBOL(__iounmap); | 89 | EXPORT_SYMBOL(__iounmap); |
90 | |||
91 | void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size) | ||
92 | { | ||
93 | /* For normal memory we already have a cacheable mapping. */ | ||
94 | if (pfn_valid(__phys_to_pfn(phys_addr))) | ||
95 | return (void __iomem *)__phys_to_virt(phys_addr); | ||
96 | |||
97 | return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL), | ||
98 | __builtin_return_address(0)); | ||
99 | } | ||
100 | EXPORT_SYMBOL(ioremap_cache); | ||
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index b1b31bbc967b..421b99fd635d 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S | |||
@@ -162,9 +162,9 @@ ENDPROC(__cpu_setup) | |||
162 | * CE0 XWHW CZ ME TEEA S | 162 | * CE0 XWHW CZ ME TEEA S |
163 | * .... .IEE .... NEAI TE.I ..AD DEN0 ACAM | 163 | * .... .IEE .... NEAI TE.I ..AD DEN0 ACAM |
164 | * 0011 0... 1101 ..0. ..0. 10.. .... .... < hardware reserved | 164 | * 0011 0... 1101 ..0. ..0. 10.. .... .... < hardware reserved |
165 | * .... .100 .... 01.1 11.1 ..01 0001 1101 < software settings | 165 | * .... .1.. .... 01.1 11.1 ..01 0001 1101 < software settings |
166 | */ | 166 | */ |
167 | .type crval, #object | 167 | .type crval, #object |
168 | crval: | 168 | crval: |
169 | .word 0x030802e2 // clear | 169 | .word 0x000802e2 // clear |
170 | .word 0x0405d11d // set | 170 | .word 0x0405d11d // set |