aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/kvm/arm.c30
-rw-r--r--arch/arm64/Kconfig21
-rw-r--r--arch/arm64/include/asm/cpu_ops.h6
-rw-r--r--arch/arm64/include/asm/cputype.h10
-rw-r--r--arch/arm64/include/asm/hardirq.h2
-rw-r--r--arch/arm64/include/asm/proc-fns.h3
-rw-r--r--arch/arm64/include/asm/smp_plat.h13
-rw-r--r--arch/arm64/include/asm/suspend.h27
-rw-r--r--arch/arm64/kernel/Makefile1
-rw-r--r--arch/arm64/kernel/asm-offsets.c11
-rw-r--r--arch/arm64/kernel/fpsimd.c36
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c221
-rw-r--r--arch/arm64/kernel/process.c7
-rw-r--r--arch/arm64/kernel/setup.c70
-rw-r--r--arch/arm64/kernel/sleep.S184
-rw-r--r--arch/arm64/kernel/smp.c17
-rw-r--r--arch/arm64/kernel/suspend.c109
-rw-r--r--arch/arm64/mm/proc.S69
18 files changed, 767 insertions, 70 deletions
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 2a700e00528d..b18165ca1d38 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -17,6 +17,7 @@
17 */ 17 */
18 18
19#include <linux/cpu.h> 19#include <linux/cpu.h>
20#include <linux/cpu_pm.h>
20#include <linux/errno.h> 21#include <linux/errno.h>
21#include <linux/err.h> 22#include <linux/err.h>
22#include <linux/kvm_host.h> 23#include <linux/kvm_host.h>
@@ -853,6 +854,33 @@ static struct notifier_block hyp_init_cpu_nb = {
853 .notifier_call = hyp_init_cpu_notify, 854 .notifier_call = hyp_init_cpu_notify,
854}; 855};
855 856
857#ifdef CONFIG_CPU_PM
858static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
859 unsigned long cmd,
860 void *v)
861{
862 if (cmd == CPU_PM_EXIT) {
863 cpu_init_hyp_mode(NULL);
864 return NOTIFY_OK;
865 }
866
867 return NOTIFY_DONE;
868}
869
870static struct notifier_block hyp_init_cpu_pm_nb = {
871 .notifier_call = hyp_init_cpu_pm_notifier,
872};
873
874static void __init hyp_cpu_pm_init(void)
875{
876 cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
877}
878#else
879static inline void hyp_cpu_pm_init(void)
880{
881}
882#endif
883
856/** 884/**
857 * Inits Hyp-mode on all online CPUs 885 * Inits Hyp-mode on all online CPUs
858 */ 886 */
@@ -1013,6 +1041,8 @@ int kvm_arch_init(void *opaque)
1013 goto out_err; 1041 goto out_err;
1014 } 1042 }
1015 1043
1044 hyp_cpu_pm_init();
1045
1016 kvm_coproc_table_init(); 1046 kvm_coproc_table_init();
1017 return 0; 1047 return 0;
1018out_err: 1048out_err:
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 101683d336c9..249acb9da4e3 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -2,6 +2,7 @@ config ARM64
2 def_bool y 2 def_bool y
3 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 3 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
4 select ARCH_USE_CMPXCHG_LOCKREF 4 select ARCH_USE_CMPXCHG_LOCKREF
5 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
5 select ARCH_WANT_OPTIONAL_GPIOLIB 6 select ARCH_WANT_OPTIONAL_GPIOLIB
6 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION 7 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
7 select ARCH_WANT_FRAME_POINTERS 8 select ARCH_WANT_FRAME_POINTERS
@@ -11,8 +12,10 @@ config ARM64
11 select BUILDTIME_EXTABLE_SORT 12 select BUILDTIME_EXTABLE_SORT
12 select CLONE_BACKWARDS 13 select CLONE_BACKWARDS
13 select COMMON_CLK 14 select COMMON_CLK
15 select CPU_PM if (SUSPEND || CPU_IDLE)
14 select DCACHE_WORD_ACCESS 16 select DCACHE_WORD_ACCESS
15 select GENERIC_CLOCKEVENTS 17 select GENERIC_CLOCKEVENTS
18 select GENERIC_CLOCKEVENTS_BROADCAST if SMP
16 select GENERIC_IOMAP 19 select GENERIC_IOMAP
17 select GENERIC_IRQ_PROBE 20 select GENERIC_IRQ_PROBE
18 select GENERIC_IRQ_SHOW 21 select GENERIC_IRQ_SHOW
@@ -280,6 +283,24 @@ config SYSVIPC_COMPAT
280 283
281endmenu 284endmenu
282 285
286menu "Power management options"
287
288source "kernel/power/Kconfig"
289
290config ARCH_SUSPEND_POSSIBLE
291 def_bool y
292
293config ARM64_CPU_SUSPEND
294 def_bool PM_SLEEP
295
296endmenu
297
298menu "CPU Power Management"
299
300source "drivers/cpuidle/Kconfig"
301
302endmenu
303
283source "net/Kconfig" 304source "net/Kconfig"
284 305
285source "drivers/Kconfig" 306source "drivers/Kconfig"
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
index c4cdb5e5b73d..152413076503 100644
--- a/arch/arm64/include/asm/cpu_ops.h
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -39,6 +39,9 @@ struct device_node;
39 * from the cpu to be killed. 39 * from the cpu to be killed.
40 * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the 40 * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the
41 * cpu being killed. 41 * cpu being killed.
42 * @cpu_suspend: Suspends a cpu and saves the required context. May fail owing
43 * to wrong parameters or error conditions. Called from the
44 * CPU being suspended. Must be called with IRQs disabled.
42 */ 45 */
43struct cpu_operations { 46struct cpu_operations {
44 const char *name; 47 const char *name;
@@ -50,6 +53,9 @@ struct cpu_operations {
50 int (*cpu_disable)(unsigned int cpu); 53 int (*cpu_disable)(unsigned int cpu);
51 void (*cpu_die)(unsigned int cpu); 54 void (*cpu_die)(unsigned int cpu);
52#endif 55#endif
56#ifdef CONFIG_ARM64_CPU_SUSPEND
57 int (*cpu_suspend)(unsigned long);
58#endif
53}; 59};
54 60
55extern const struct cpu_operations *cpu_ops[NR_CPUS]; 61extern const struct cpu_operations *cpu_ops[NR_CPUS];
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index e1af1b4200d5..c404fb0df3a6 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -20,6 +20,16 @@
20 20
21#define MPIDR_HWID_BITMASK 0xff00ffffff 21#define MPIDR_HWID_BITMASK 0xff00ffffff
22 22
23#define MPIDR_LEVEL_BITS_SHIFT 3
24#define MPIDR_LEVEL_BITS (1 << MPIDR_LEVEL_BITS_SHIFT)
25#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
26
27#define MPIDR_LEVEL_SHIFT(level) \
28 (((1 << level) >> 1) << MPIDR_LEVEL_BITS_SHIFT)
29
30#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
31 ((mpidr >> MPIDR_LEVEL_SHIFT(level)) & MPIDR_LEVEL_MASK)
32
23#define read_cpuid(reg) ({ \ 33#define read_cpuid(reg) ({ \
24 u64 __val; \ 34 u64 __val; \
25 asm("mrs %0, " #reg : "=r" (__val)); \ 35 asm("mrs %0, " #reg : "=r" (__val)); \
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index 990c051e7829..ae4801d77514 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -20,7 +20,7 @@
20#include <linux/threads.h> 20#include <linux/threads.h>
21#include <asm/irq.h> 21#include <asm/irq.h>
22 22
23#define NR_IPI 4 23#define NR_IPI 5
24 24
25typedef struct { 25typedef struct {
26 unsigned int __softirq_pending; 26 unsigned int __softirq_pending;
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 7cdf466fd0c5..0c657bb54597 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -26,11 +26,14 @@
26#include <asm/page.h> 26#include <asm/page.h>
27 27
28struct mm_struct; 28struct mm_struct;
29struct cpu_suspend_ctx;
29 30
30extern void cpu_cache_off(void); 31extern void cpu_cache_off(void);
31extern void cpu_do_idle(void); 32extern void cpu_do_idle(void);
32extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); 33extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
33extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); 34extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
35extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
36extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
34 37
35#include <asm/memory.h> 38#include <asm/memory.h>
36 39
diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h
index ed43a0d2b1b2..59e282311b58 100644
--- a/arch/arm64/include/asm/smp_plat.h
+++ b/arch/arm64/include/asm/smp_plat.h
@@ -21,6 +21,19 @@
21 21
22#include <asm/types.h> 22#include <asm/types.h>
23 23
24struct mpidr_hash {
25 u64 mask;
26 u32 shift_aff[4];
27 u32 bits;
28};
29
30extern struct mpidr_hash mpidr_hash;
31
32static inline u32 mpidr_hash_size(void)
33{
34 return 1 << mpidr_hash.bits;
35}
36
24/* 37/*
25 * Logical CPU mapping. 38 * Logical CPU mapping.
26 */ 39 */
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
new file mode 100644
index 000000000000..e9c149c042e0
--- /dev/null
+++ b/arch/arm64/include/asm/suspend.h
@@ -0,0 +1,27 @@
1#ifndef __ASM_SUSPEND_H
2#define __ASM_SUSPEND_H
3
4#define NR_CTX_REGS 11
5
6/*
7 * struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on
8 * the stack, which must be 16-byte aligned on v8
9 */
10struct cpu_suspend_ctx {
11 /*
12 * This struct must be kept in sync with
13 * cpu_do_{suspend/resume} in mm/proc.S
14 */
15 u64 ctx_regs[NR_CTX_REGS];
16 u64 sp;
17} __aligned(16);
18
19struct sleep_save_sp {
20 phys_addr_t *save_ptr_stash;
21 phys_addr_t save_ptr_stash_phys;
22};
23
24extern void cpu_resume(void);
25extern int cpu_suspend(unsigned long);
26
27#endif
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 5ba2fd43a75b..1cd339d5037b 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -18,6 +18,7 @@ arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o
18arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o 18arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
19arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o 19arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o
20arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 20arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
21arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND) += sleep.o suspend.o
21 22
22obj-y += $(arm64-obj-y) vdso/ 23obj-y += $(arm64-obj-y) vdso/
23obj-m += $(arm64-obj-m) 24obj-m += $(arm64-obj-m)
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 666e231d410b..646f888387cd 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -25,6 +25,8 @@
25#include <asm/thread_info.h> 25#include <asm/thread_info.h>
26#include <asm/memory.h> 26#include <asm/memory.h>
27#include <asm/cputable.h> 27#include <asm/cputable.h>
28#include <asm/smp_plat.h>
29#include <asm/suspend.h>
28#include <asm/vdso_datapage.h> 30#include <asm/vdso_datapage.h>
29#include <linux/kbuild.h> 31#include <linux/kbuild.h>
30 32
@@ -138,5 +140,14 @@ int main(void)
138 DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); 140 DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
139 DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); 141 DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
140#endif 142#endif
143#ifdef CONFIG_ARM64_CPU_SUSPEND
144 DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx));
145 DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp));
146 DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask));
147 DEFINE(MPIDR_HASH_SHIFTS, offsetof(struct mpidr_hash, shift_aff));
148 DEFINE(SLEEP_SAVE_SP_SZ, sizeof(struct sleep_save_sp));
149 DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys));
150 DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash));
151#endif
141 return 0; 152 return 0;
142} 153}
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index bb785d23dbde..4aef42a04bdc 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -17,6 +17,7 @@
17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 19
20#include <linux/cpu_pm.h>
20#include <linux/kernel.h> 21#include <linux/kernel.h>
21#include <linux/init.h> 22#include <linux/init.h>
22#include <linux/sched.h> 23#include <linux/sched.h>
@@ -113,6 +114,39 @@ EXPORT_SYMBOL(kernel_neon_end);
113 114
114#endif /* CONFIG_KERNEL_MODE_NEON */ 115#endif /* CONFIG_KERNEL_MODE_NEON */
115 116
117#ifdef CONFIG_CPU_PM
118static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
119 unsigned long cmd, void *v)
120{
121 switch (cmd) {
122 case CPU_PM_ENTER:
123 if (current->mm)
124 fpsimd_save_state(&current->thread.fpsimd_state);
125 break;
126 case CPU_PM_EXIT:
127 if (current->mm)
128 fpsimd_load_state(&current->thread.fpsimd_state);
129 break;
130 case CPU_PM_ENTER_FAILED:
131 default:
132 return NOTIFY_DONE;
133 }
134 return NOTIFY_OK;
135}
136
137static struct notifier_block fpsimd_cpu_pm_notifier_block = {
138 .notifier_call = fpsimd_cpu_pm_notifier,
139};
140
141static void fpsimd_pm_init(void)
142{
143 cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block);
144}
145
146#else
147static inline void fpsimd_pm_init(void) { }
148#endif /* CONFIG_CPU_PM */
149
116/* 150/*
117 * FP/SIMD support code initialisation. 151 * FP/SIMD support code initialisation.
118 */ 152 */
@@ -131,6 +165,8 @@ static int __init fpsimd_init(void)
131 else 165 else
132 elf_hwcap |= HWCAP_ASIMD; 166 elf_hwcap |= HWCAP_ASIMD;
133 167
168 fpsimd_pm_init();
169
134 return 0; 170 return 0;
135} 171}
136late_initcall(fpsimd_init); 172late_initcall(fpsimd_init);
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index ff516f6691e4..bcaaac9e14d6 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -20,6 +20,7 @@
20 20
21#define pr_fmt(fmt) "hw-breakpoint: " fmt 21#define pr_fmt(fmt) "hw-breakpoint: " fmt
22 22
23#include <linux/cpu_pm.h>
23#include <linux/errno.h> 24#include <linux/errno.h>
24#include <linux/hw_breakpoint.h> 25#include <linux/hw_breakpoint.h>
25#include <linux/perf_event.h> 26#include <linux/perf_event.h>
@@ -169,15 +170,68 @@ static enum debug_el debug_exception_level(int privilege)
169 } 170 }
170} 171}
171 172
172/* 173enum hw_breakpoint_ops {
173 * Install a perf counter breakpoint. 174 HW_BREAKPOINT_INSTALL,
175 HW_BREAKPOINT_UNINSTALL,
176 HW_BREAKPOINT_RESTORE
177};
178
179/**
180 * hw_breakpoint_slot_setup - Find and setup a perf slot according to
181 * operations
182 *
183 * @slots: pointer to array of slots
184 * @max_slots: max number of slots
185 * @bp: perf_event to setup
186 * @ops: operation to be carried out on the slot
187 *
188 * Return:
189 * slot index on success
190 * -ENOSPC if no slot is available/matches
191 * -EINVAL on wrong operations parameter
174 */ 192 */
175int arch_install_hw_breakpoint(struct perf_event *bp) 193static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots,
194 struct perf_event *bp,
195 enum hw_breakpoint_ops ops)
196{
197 int i;
198 struct perf_event **slot;
199
200 for (i = 0; i < max_slots; ++i) {
201 slot = &slots[i];
202 switch (ops) {
203 case HW_BREAKPOINT_INSTALL:
204 if (!*slot) {
205 *slot = bp;
206 return i;
207 }
208 break;
209 case HW_BREAKPOINT_UNINSTALL:
210 if (*slot == bp) {
211 *slot = NULL;
212 return i;
213 }
214 break;
215 case HW_BREAKPOINT_RESTORE:
216 if (*slot == bp)
217 return i;
218 break;
219 default:
220 pr_warn_once("Unhandled hw breakpoint ops %d\n", ops);
221 return -EINVAL;
222 }
223 }
224 return -ENOSPC;
225}
226
227static int hw_breakpoint_control(struct perf_event *bp,
228 enum hw_breakpoint_ops ops)
176{ 229{
177 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 230 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
178 struct perf_event **slot, **slots; 231 struct perf_event **slots;
179 struct debug_info *debug_info = &current->thread.debug; 232 struct debug_info *debug_info = &current->thread.debug;
180 int i, max_slots, ctrl_reg, val_reg, reg_enable; 233 int i, max_slots, ctrl_reg, val_reg, reg_enable;
234 enum debug_el dbg_el = debug_exception_level(info->ctrl.privilege);
181 u32 ctrl; 235 u32 ctrl;
182 236
183 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { 237 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
@@ -196,67 +250,54 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
196 reg_enable = !debug_info->wps_disabled; 250 reg_enable = !debug_info->wps_disabled;
197 } 251 }
198 252
199 for (i = 0; i < max_slots; ++i) { 253 i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops);
200 slot = &slots[i];
201 254
202 if (!*slot) { 255 if (WARN_ONCE(i < 0, "Can't find any breakpoint slot"))
203 *slot = bp; 256 return i;
204 break;
205 }
206 }
207 257
208 if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot")) 258 switch (ops) {
209 return -ENOSPC; 259 case HW_BREAKPOINT_INSTALL:
210 260 /*
211 /* Ensure debug monitors are enabled at the correct exception level. */ 261 * Ensure debug monitors are enabled at the correct exception
212 enable_debug_monitors(debug_exception_level(info->ctrl.privilege)); 262 * level.
213 263 */
214 /* Setup the address register. */ 264 enable_debug_monitors(dbg_el);
215 write_wb_reg(val_reg, i, info->address); 265 /* Fall through */
266 case HW_BREAKPOINT_RESTORE:
267 /* Setup the address register. */
268 write_wb_reg(val_reg, i, info->address);
269
270 /* Setup the control register. */
271 ctrl = encode_ctrl_reg(info->ctrl);
272 write_wb_reg(ctrl_reg, i,
273 reg_enable ? ctrl | 0x1 : ctrl & ~0x1);
274 break;
275 case HW_BREAKPOINT_UNINSTALL:
276 /* Reset the control register. */
277 write_wb_reg(ctrl_reg, i, 0);
216 278
217 /* Setup the control register. */ 279 /*
218 ctrl = encode_ctrl_reg(info->ctrl); 280 * Release the debug monitors for the correct exception
219 write_wb_reg(ctrl_reg, i, reg_enable ? ctrl | 0x1 : ctrl & ~0x1); 281 * level.
282 */
283 disable_debug_monitors(dbg_el);
284 break;
285 }
220 286
221 return 0; 287 return 0;
222} 288}
223 289
224void arch_uninstall_hw_breakpoint(struct perf_event *bp) 290/*
291 * Install a perf counter breakpoint.
292 */
293int arch_install_hw_breakpoint(struct perf_event *bp)
225{ 294{
226 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 295 return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL);
227 struct perf_event **slot, **slots; 296}
228 int i, max_slots, base;
229
230 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
231 /* Breakpoint */
232 base = AARCH64_DBG_REG_BCR;
233 slots = this_cpu_ptr(bp_on_reg);
234 max_slots = core_num_brps;
235 } else {
236 /* Watchpoint */
237 base = AARCH64_DBG_REG_WCR;
238 slots = this_cpu_ptr(wp_on_reg);
239 max_slots = core_num_wrps;
240 }
241
242 /* Remove the breakpoint. */
243 for (i = 0; i < max_slots; ++i) {
244 slot = &slots[i];
245
246 if (*slot == bp) {
247 *slot = NULL;
248 break;
249 }
250 }
251
252 if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot"))
253 return;
254
255 /* Reset the control register. */
256 write_wb_reg(base, i, 0);
257 297
258 /* Release the debug monitors for the correct exception level. */ 298void arch_uninstall_hw_breakpoint(struct perf_event *bp)
259 disable_debug_monitors(debug_exception_level(info->ctrl.privilege)); 299{
300 hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL);
260} 301}
261 302
262static int get_hbp_len(u8 hbp_len) 303static int get_hbp_len(u8 hbp_len)
@@ -806,18 +847,36 @@ void hw_breakpoint_thread_switch(struct task_struct *next)
806/* 847/*
807 * CPU initialisation. 848 * CPU initialisation.
808 */ 849 */
809static void reset_ctrl_regs(void *unused) 850static void hw_breakpoint_reset(void *unused)
810{ 851{
811 int i; 852 int i;
812 853 struct perf_event **slots;
813 for (i = 0; i < core_num_brps; ++i) { 854 /*
814 write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL); 855 * When a CPU goes through cold-boot, it does not have any installed
815 write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL); 856 * slot, so it is safe to share the same function for restoring and
857 * resetting breakpoints; when a CPU is hotplugged in, it goes
858 * through the slots, which are all empty, hence it just resets control
859 * and value for debug registers.
860 * When this function is triggered on warm-boot through a CPU PM
861 * notifier some slots might be initialized; if so they are
862 * reprogrammed according to the debug slots content.
863 */
864 for (slots = this_cpu_ptr(bp_on_reg), i = 0; i < core_num_brps; ++i) {
865 if (slots[i]) {
866 hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
867 } else {
868 write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL);
869 write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL);
870 }
816 } 871 }
817 872
818 for (i = 0; i < core_num_wrps; ++i) { 873 for (slots = this_cpu_ptr(wp_on_reg), i = 0; i < core_num_wrps; ++i) {
819 write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL); 874 if (slots[i]) {
820 write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL); 875 hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
876 } else {
877 write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL);
878 write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL);
879 }
821 } 880 }
822} 881}
823 882
@@ -827,7 +886,7 @@ static int hw_breakpoint_reset_notify(struct notifier_block *self,
827{ 886{
828 int cpu = (long)hcpu; 887 int cpu = (long)hcpu;
829 if (action == CPU_ONLINE) 888 if (action == CPU_ONLINE)
830 smp_call_function_single(cpu, reset_ctrl_regs, NULL, 1); 889 smp_call_function_single(cpu, hw_breakpoint_reset, NULL, 1);
831 return NOTIFY_OK; 890 return NOTIFY_OK;
832} 891}
833 892
@@ -835,6 +894,33 @@ static struct notifier_block hw_breakpoint_reset_nb = {
835 .notifier_call = hw_breakpoint_reset_notify, 894 .notifier_call = hw_breakpoint_reset_notify,
836}; 895};
837 896
897#ifdef CONFIG_CPU_PM
898static int hw_breakpoint_cpu_pm_notify(struct notifier_block *self,
899 unsigned long action,
900 void *v)
901{
902 if (action == CPU_PM_EXIT) {
903 hw_breakpoint_reset(NULL);
904 return NOTIFY_OK;
905 }
906
907 return NOTIFY_DONE;
908}
909
910static struct notifier_block hw_breakpoint_cpu_pm_nb = {
911 .notifier_call = hw_breakpoint_cpu_pm_notify,
912};
913
914static void __init hw_breakpoint_pm_init(void)
915{
916 cpu_pm_register_notifier(&hw_breakpoint_cpu_pm_nb);
917}
918#else
919static inline void hw_breakpoint_pm_init(void)
920{
921}
922#endif
923
838/* 924/*
839 * One-time initialisation. 925 * One-time initialisation.
840 */ 926 */
@@ -850,8 +936,8 @@ static int __init arch_hw_breakpoint_init(void)
850 * Reset the breakpoint resources. We assume that a halting 936 * Reset the breakpoint resources. We assume that a halting
851 * debugger will leave the world in a nice state for us. 937 * debugger will leave the world in a nice state for us.
852 */ 938 */
853 smp_call_function(reset_ctrl_regs, NULL, 1); 939 smp_call_function(hw_breakpoint_reset, NULL, 1);
854 reset_ctrl_regs(NULL); 940 hw_breakpoint_reset(NULL);
855 941
856 /* Register debug fault handlers. */ 942 /* Register debug fault handlers. */
857 hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP, 943 hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP,
@@ -861,6 +947,7 @@ static int __init arch_hw_breakpoint_init(void)
861 947
862 /* Register hotplug notifier. */ 948 /* Register hotplug notifier. */
863 register_cpu_notifier(&hw_breakpoint_reset_nb); 949 register_cpu_notifier(&hw_breakpoint_reset_nb);
950 hw_breakpoint_pm_init();
864 951
865 return 0; 952 return 0;
866} 953}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 0adb8f0f4549..248a15db37f2 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -33,6 +33,7 @@
33#include <linux/kallsyms.h> 33#include <linux/kallsyms.h>
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/cpu.h> 35#include <linux/cpu.h>
36#include <linux/cpuidle.h>
36#include <linux/elfcore.h> 37#include <linux/elfcore.h>
37#include <linux/pm.h> 38#include <linux/pm.h>
38#include <linux/tick.h> 39#include <linux/tick.h>
@@ -98,8 +99,10 @@ void arch_cpu_idle(void)
98 * This should do all the clock switching and wait for interrupt 99 * This should do all the clock switching and wait for interrupt
99 * tricks 100 * tricks
100 */ 101 */
101 cpu_do_idle(); 102 if (cpuidle_idle_call()) {
102 local_irq_enable(); 103 cpu_do_idle();
104 local_irq_enable();
105 }
103} 106}
104 107
105#ifdef CONFIG_HOTPLUG_CPU 108#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index bb33fff09ba2..c8e9effe52e1 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -123,6 +123,75 @@ bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
123 return phys_id == cpu_logical_map(cpu); 123 return phys_id == cpu_logical_map(cpu);
124} 124}
125 125
126struct mpidr_hash mpidr_hash;
127#ifdef CONFIG_SMP
128/**
129 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
130 * level in order to build a linear index from an
131 * MPIDR value. Resulting algorithm is a collision
132 * free hash carried out through shifting and ORing
133 */
134static void __init smp_build_mpidr_hash(void)
135{
136 u32 i, affinity, fs[4], bits[4], ls;
137 u64 mask = 0;
138 /*
139 * Pre-scan the list of MPIDRS and filter out bits that do
140 * not contribute to affinity levels, ie they never toggle.
141 */
142 for_each_possible_cpu(i)
143 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
144 pr_debug("mask of set bits %#llx\n", mask);
145 /*
146 * Find and stash the last and first bit set at all affinity levels to
147 * check how many bits are required to represent them.
148 */
149 for (i = 0; i < 4; i++) {
150 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
151 /*
152 * Find the MSB bit and LSB bits position
153 * to determine how many bits are required
154 * to express the affinity level.
155 */
156 ls = fls(affinity);
157 fs[i] = affinity ? ffs(affinity) - 1 : 0;
158 bits[i] = ls - fs[i];
159 }
160 /*
161 * An index can be created from the MPIDR_EL1 by isolating the
162 * significant bits at each affinity level and by shifting
163 * them in order to compress the 32 bits values space to a
164 * compressed set of values. This is equivalent to hashing
165 * the MPIDR_EL1 through shifting and ORing. It is a collision free
166 * hash though not minimal since some levels might contain a number
167 * of CPUs that is not an exact power of 2 and their bit
168 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
169 */
170 mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
171 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
172 mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
173 (bits[1] + bits[0]);
174 mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
175 fs[3] - (bits[2] + bits[1] + bits[0]);
176 mpidr_hash.mask = mask;
177 mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
178 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
179 mpidr_hash.shift_aff[0],
180 mpidr_hash.shift_aff[1],
181 mpidr_hash.shift_aff[2],
182 mpidr_hash.shift_aff[3],
183 mpidr_hash.mask,
184 mpidr_hash.bits);
185 /*
186 * 4x is an arbitrary value used to warn on a hash table much bigger
187 * than expected on most systems.
188 */
189 if (mpidr_hash_size() > 4 * num_possible_cpus())
190 pr_warn("Large number of MPIDR hash buckets detected\n");
191 __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
192}
193#endif
194
126static void __init setup_processor(void) 195static void __init setup_processor(void)
127{ 196{
128 struct cpu_info *cpu_info; 197 struct cpu_info *cpu_info;
@@ -273,6 +342,7 @@ void __init setup_arch(char **cmdline_p)
273 cpu_read_bootcpu_ops(); 342 cpu_read_bootcpu_ops();
274#ifdef CONFIG_SMP 343#ifdef CONFIG_SMP
275 smp_init_cpus(); 344 smp_init_cpus();
345 smp_build_mpidr_hash();
276#endif 346#endif
277 347
278#ifdef CONFIG_VT 348#ifdef CONFIG_VT
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
new file mode 100644
index 000000000000..b1925729c692
--- /dev/null
+++ b/arch/arm64/kernel/sleep.S
@@ -0,0 +1,184 @@
1#include <linux/errno.h>
2#include <linux/linkage.h>
3#include <asm/asm-offsets.h>
4#include <asm/assembler.h>
5
6 .text
7/*
8 * Implementation of MPIDR_EL1 hash algorithm through shifting
9 * and OR'ing.
10 *
11 * @dst: register containing hash result
12 * @rs0: register containing affinity level 0 bit shift
13 * @rs1: register containing affinity level 1 bit shift
14 * @rs2: register containing affinity level 2 bit shift
15 * @rs3: register containing affinity level 3 bit shift
16 * @mpidr: register containing MPIDR_EL1 value
17 * @mask: register containing MPIDR mask
18 *
19 * Pseudo C-code:
20 *
21 *u32 dst;
22 *
23 *compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 rs3, u64 mpidr, u64 mask) {
24 * u32 aff0, aff1, aff2, aff3;
25 * u64 mpidr_masked = mpidr & mask;
26 * aff0 = mpidr_masked & 0xff;
27 * aff1 = mpidr_masked & 0xff00;
28 * aff2 = mpidr_masked & 0xff0000;
29 * aff2 = mpidr_masked & 0xff00000000;
30 * dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2 | aff3 >> rs3);
31 *}
32 * Input registers: rs0, rs1, rs2, rs3, mpidr, mask
33 * Output register: dst
34 * Note: input and output registers must be disjoint register sets
35 (eg: a macro instance with mpidr = x1 and dst = x1 is invalid)
36 */
37 .macro compute_mpidr_hash dst, rs0, rs1, rs2, rs3, mpidr, mask
38 and \mpidr, \mpidr, \mask // mask out MPIDR bits
39 and \dst, \mpidr, #0xff // mask=aff0
40 lsr \dst ,\dst, \rs0 // dst=aff0>>rs0
41 and \mask, \mpidr, #0xff00 // mask = aff1
42 lsr \mask ,\mask, \rs1
43 orr \dst, \dst, \mask // dst|=(aff1>>rs1)
44 and \mask, \mpidr, #0xff0000 // mask = aff2
45 lsr \mask ,\mask, \rs2
46 orr \dst, \dst, \mask // dst|=(aff2>>rs2)
47 and \mask, \mpidr, #0xff00000000 // mask = aff3
48 lsr \mask ,\mask, \rs3
49 orr \dst, \dst, \mask // dst|=(aff3>>rs3)
50 .endm
51/*
52 * Save CPU state for a suspend. This saves callee registers, and allocates
53 * space on the kernel stack to save the CPU specific registers + some
54 * other data for resume.
55 *
56 * x0 = suspend finisher argument
57 */
58ENTRY(__cpu_suspend)
59 stp x29, lr, [sp, #-96]!
60 stp x19, x20, [sp,#16]
61 stp x21, x22, [sp,#32]
62 stp x23, x24, [sp,#48]
63 stp x25, x26, [sp,#64]
64 stp x27, x28, [sp,#80]
65 mov x2, sp
66 sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx
67 mov x1, sp
68 /*
69 * x1 now points to struct cpu_suspend_ctx allocated on the stack
70 */
71 str x2, [x1, #CPU_CTX_SP]
72 ldr x2, =sleep_save_sp
73 ldr x2, [x2, #SLEEP_SAVE_SP_VIRT]
74#ifdef CONFIG_SMP
75 mrs x7, mpidr_el1
76 ldr x9, =mpidr_hash
77 ldr x10, [x9, #MPIDR_HASH_MASK]
78 /*
79 * Following code relies on the struct mpidr_hash
80 * members size.
81 */
82 ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS]
83 ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
84 compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
85 add x2, x2, x8, lsl #3
86#endif
87 bl __cpu_suspend_finisher
88 /*
89 * Never gets here, unless suspend fails.
90 * Successful cpu_suspend should return from cpu_resume, returning
91 * through this code path is considered an error
92 * If the return value is set to 0 force x0 = -EOPNOTSUPP
93 * to make sure a proper error condition is propagated
94 */
95 cmp x0, #0
96 mov x3, #-EOPNOTSUPP
97 csel x0, x3, x0, eq
98 add sp, sp, #CPU_SUSPEND_SZ // rewind stack pointer
99 ldp x19, x20, [sp, #16]
100 ldp x21, x22, [sp, #32]
101 ldp x23, x24, [sp, #48]
102 ldp x25, x26, [sp, #64]
103 ldp x27, x28, [sp, #80]
104 ldp x29, lr, [sp], #96
105 ret
106ENDPROC(__cpu_suspend)
107 .ltorg
108
109/*
110 * x0 must contain the sctlr value retrieved from restored context
111 */
112ENTRY(cpu_resume_mmu)
113 ldr x3, =cpu_resume_after_mmu
114 msr sctlr_el1, x0 // restore sctlr_el1
115 isb
116 br x3 // global jump to virtual address
117ENDPROC(cpu_resume_mmu)
118cpu_resume_after_mmu:
119 mov x0, #0 // return zero on success
120 ldp x19, x20, [sp, #16]
121 ldp x21, x22, [sp, #32]
122 ldp x23, x24, [sp, #48]
123 ldp x25, x26, [sp, #64]
124 ldp x27, x28, [sp, #80]
125 ldp x29, lr, [sp], #96
126 ret
127ENDPROC(cpu_resume_after_mmu)
128
129 .data
130ENTRY(cpu_resume)
131 bl el2_setup // if in EL2 drop to EL1 cleanly
132#ifdef CONFIG_SMP
133 mrs x1, mpidr_el1
134 adr x4, mpidr_hash_ptr
135 ldr x5, [x4]
136 add x8, x4, x5 // x8 = struct mpidr_hash phys address
137 /* retrieve mpidr_hash members to compute the hash */
138 ldr x2, [x8, #MPIDR_HASH_MASK]
139 ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS]
140 ldp w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)]
141 compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2
142 /* x7 contains hash index, let's use it to grab context pointer */
143#else
144 mov x7, xzr
145#endif
146 adr x0, sleep_save_sp
147 ldr x0, [x0, #SLEEP_SAVE_SP_PHYS]
148 ldr x0, [x0, x7, lsl #3]
149 /* load sp from context */
150 ldr x2, [x0, #CPU_CTX_SP]
151 adr x1, sleep_idmap_phys
152 /* load physical address of identity map page table in x1 */
153 ldr x1, [x1]
154 mov sp, x2
155 /*
156 * cpu_do_resume expects x0 to contain context physical address
157 * pointer and x1 to contain physical address of 1:1 page tables
158 */
159 bl cpu_do_resume // PC relative jump, MMU off
160 b cpu_resume_mmu // Resume MMU, never returns
161ENDPROC(cpu_resume)
162
163 .align 3
164mpidr_hash_ptr:
165 /*
166 * offset of mpidr_hash symbol from current location
167 * used to obtain run-time mpidr_hash address with MMU off
168 */
169 .quad mpidr_hash - .
170/*
171 * physical address of identity mapped page tables
172 */
173 .type sleep_idmap_phys, #object
174ENTRY(sleep_idmap_phys)
175 .quad 0
176/*
177 * struct sleep_save_sp {
178 * phys_addr_t *save_ptr_stash;
179 * phys_addr_t save_ptr_stash_phys;
180 * };
181 */
182 .type sleep_save_sp, #object
183ENTRY(sleep_save_sp)
184 .space SLEEP_SAVE_SP_SZ // struct sleep_save_sp
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index b5d2031c12c6..1b7617ab499b 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -61,6 +61,7 @@ enum ipi_msg_type {
61 IPI_CALL_FUNC, 61 IPI_CALL_FUNC,
62 IPI_CALL_FUNC_SINGLE, 62 IPI_CALL_FUNC_SINGLE,
63 IPI_CPU_STOP, 63 IPI_CPU_STOP,
64 IPI_TIMER,
64}; 65};
65 66
66/* 67/*
@@ -449,6 +450,7 @@ static const char *ipi_types[NR_IPI] = {
449 S(IPI_CALL_FUNC, "Function call interrupts"), 450 S(IPI_CALL_FUNC, "Function call interrupts"),
450 S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), 451 S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
451 S(IPI_CPU_STOP, "CPU stop interrupts"), 452 S(IPI_CPU_STOP, "CPU stop interrupts"),
453 S(IPI_TIMER, "Timer broadcast interrupts"),
452}; 454};
453 455
454void show_ipi_list(struct seq_file *p, int prec) 456void show_ipi_list(struct seq_file *p, int prec)
@@ -534,6 +536,14 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
534 irq_exit(); 536 irq_exit();
535 break; 537 break;
536 538
539#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
540 case IPI_TIMER:
541 irq_enter();
542 tick_receive_broadcast();
543 irq_exit();
544 break;
545#endif
546
537 default: 547 default:
538 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); 548 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
539 break; 549 break;
@@ -546,6 +556,13 @@ void smp_send_reschedule(int cpu)
546 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); 556 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
547} 557}
548 558
559#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
560void tick_broadcast(const struct cpumask *mask)
561{
562 smp_cross_call(mask, IPI_TIMER);
563}
564#endif
565
549void smp_send_stop(void) 566void smp_send_stop(void)
550{ 567{
551 unsigned long timeout; 568 unsigned long timeout;
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
new file mode 100644
index 000000000000..e074b1c32723
--- /dev/null
+++ b/arch/arm64/kernel/suspend.c
@@ -0,0 +1,109 @@
1#include <linux/slab.h>
2#include <asm/cacheflush.h>
3#include <asm/cpu_ops.h>
4#include <asm/debug-monitors.h>
5#include <asm/pgtable.h>
6#include <asm/memory.h>
7#include <asm/smp_plat.h>
8#include <asm/suspend.h>
9#include <asm/tlbflush.h>
10
11extern int __cpu_suspend(unsigned long);
12/*
13 * This is called by __cpu_suspend() to save the state, and do whatever
14 * flushing is required to ensure that when the CPU goes to sleep we have
15 * the necessary data available when the caches are not searched.
16 *
17 * @arg: Argument to pass to suspend operations
18 * @ptr: CPU context virtual address
19 * @save_ptr: address of the location where the context physical address
20 * must be saved
21 */
22int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr,
23 phys_addr_t *save_ptr)
24{
25 int cpu = smp_processor_id();
26
27 *save_ptr = virt_to_phys(ptr);
28
29 cpu_do_suspend(ptr);
30 /*
31 * Only flush the context that must be retrieved with the MMU
32 * off. VA primitives ensure the flush is applied to all
33 * cache levels so context is pushed to DRAM.
34 */
35 __flush_dcache_area(ptr, sizeof(*ptr));
36 __flush_dcache_area(save_ptr, sizeof(*save_ptr));
37
38 return cpu_ops[cpu]->cpu_suspend(arg);
39}
40
41/**
42 * cpu_suspend
43 *
44 * @arg: argument to pass to the finisher function
45 */
46int cpu_suspend(unsigned long arg)
47{
48 struct mm_struct *mm = current->active_mm;
49 int ret, cpu = smp_processor_id();
50 unsigned long flags;
51
52 /*
53 * If cpu_ops have not been registered or suspend
54 * has not been initialized, cpu_suspend call fails early.
55 */
56 if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend)
57 return -EOPNOTSUPP;
58
59 /*
60 * From this point debug exceptions are disabled to prevent
61 * updates to mdscr register (saved and restored along with
62 * general purpose registers) from kernel debuggers.
63 */
64 local_dbg_save(flags);
65
66 /*
67 * mm context saved on the stack, it will be restored when
68 * the cpu comes out of reset through the identity mapped
69 * page tables, so that the thread address space is properly
70 * set-up on function return.
71 */
72 ret = __cpu_suspend(arg);
73 if (ret == 0) {
74 cpu_switch_mm(mm->pgd, mm);
75 flush_tlb_all();
76 }
77
78 /*
79 * Restore pstate flags. OS lock and mdscr have been already
80 * restored, so from this point onwards, debugging is fully
81 * renabled if it was enabled when core started shutdown.
82 */
83 local_dbg_restore(flags);
84
85 return ret;
86}
87
88extern struct sleep_save_sp sleep_save_sp;
89extern phys_addr_t sleep_idmap_phys;
90
91static int cpu_suspend_init(void)
92{
93 void *ctx_ptr;
94
95 /* ctx_ptr is an array of physical addresses */
96 ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(phys_addr_t), GFP_KERNEL);
97
98 if (WARN_ON(!ctx_ptr))
99 return -ENOMEM;
100
101 sleep_save_sp.save_ptr_stash = ctx_ptr;
102 sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
103 sleep_idmap_phys = virt_to_phys(idmap_pg_dir);
104 __flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp));
105 __flush_dcache_area(&sleep_idmap_phys, sizeof(sleep_idmap_phys));
106
107 return 0;
108}
109early_initcall(cpu_suspend_init);
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 0f7fec52c7f8..bed1f1de1caf 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -80,6 +80,75 @@ ENTRY(cpu_do_idle)
80 ret 80 ret
81ENDPROC(cpu_do_idle) 81ENDPROC(cpu_do_idle)
82 82
83#ifdef CONFIG_ARM64_CPU_SUSPEND
84/**
85 * cpu_do_suspend - save CPU registers context
86 *
87 * x0: virtual address of context pointer
88 */
89ENTRY(cpu_do_suspend)
90 mrs x2, tpidr_el0
91 mrs x3, tpidrro_el0
92 mrs x4, contextidr_el1
93 mrs x5, mair_el1
94 mrs x6, cpacr_el1
95 mrs x7, ttbr1_el1
96 mrs x8, tcr_el1
97 mrs x9, vbar_el1
98 mrs x10, mdscr_el1
99 mrs x11, oslsr_el1
100 mrs x12, sctlr_el1
101 stp x2, x3, [x0]
102 stp x4, x5, [x0, #16]
103 stp x6, x7, [x0, #32]
104 stp x8, x9, [x0, #48]
105 stp x10, x11, [x0, #64]
106 str x12, [x0, #80]
107 ret
108ENDPROC(cpu_do_suspend)
109
110/**
111 * cpu_do_resume - restore CPU register context
112 *
113 * x0: Physical address of context pointer
114 * x1: ttbr0_el1 to be restored
115 *
116 * Returns:
117 * sctlr_el1 value in x0
118 */
119ENTRY(cpu_do_resume)
120 /*
121 * Invalidate local tlb entries before turning on MMU
122 */
123 tlbi vmalle1
124 ldp x2, x3, [x0]
125 ldp x4, x5, [x0, #16]
126 ldp x6, x7, [x0, #32]
127 ldp x8, x9, [x0, #48]
128 ldp x10, x11, [x0, #64]
129 ldr x12, [x0, #80]
130 msr tpidr_el0, x2
131 msr tpidrro_el0, x3
132 msr contextidr_el1, x4
133 msr mair_el1, x5
134 msr cpacr_el1, x6
135 msr ttbr0_el1, x1
136 msr ttbr1_el1, x7
137 msr tcr_el1, x8
138 msr vbar_el1, x9
139 msr mdscr_el1, x10
140 /*
141 * Restore oslsr_el1 by writing oslar_el1
142 */
143 ubfx x11, x11, #1, #1
144 msr oslar_el1, x11
145 mov x0, x12
146 dsb nsh // Make sure local tlb invalidation completed
147 isb
148 ret
149ENDPROC(cpu_do_resume)
150#endif
151
83/* 152/*
84 * cpu_switch_mm(pgd_phys, tsk) 153 * cpu_switch_mm(pgd_phys, tsk)
85 * 154 *