aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLorenzo Pieralisi <lorenzo.pieralisi@arm.com>2013-07-22 07:22:13 -0400
committerLorenzo Pieralisi <lorenzo.pieralisi@arm.com>2013-12-16 12:17:31 -0500
commit95322526ef62b84adb469c27535ab0252a369a85 (patch)
tree81fe36a9dd3361ebbcfbebee7a18207c0f58b7f9 /arch
parent6732bc65c277b697f6d8b645b15f63d1558c0cc4 (diff)
arm64: kernel: cpu_{suspend/resume} implementation
Kernel subsystems like CPU idle and suspend to RAM require a generic mechanism to suspend a processor, save its context and put it into a quiescent state. The cpu_{suspend}/{resume} implementation provides such a framework through a kernel interface allowing to save/restore registers, flush the context to DRAM and suspend/resume to/from low-power states where processor context may be lost. The CPU suspend implementation relies on the suspend protocol registered in CPU operations to carry out a suspend request after context is saved and flushed to DRAM. The cpu_suspend interface: int cpu_suspend(unsigned long arg); allows to pass an opaque parameter that is handed over to the suspend CPU operations back-end so that it can take action according to the semantics attached to it. The arg parameter allows suspend to RAM and CPU idle drivers to communicate to suspend protocol back-ends; it requires standardization so that the interface can be reused seamlessly across systems, paving the way for generic drivers. Context memory is allocated on the stack, whose address is stashed in a per-cpu variable to keep track of it and passed to core functions that save/restore the registers required by the architecture. Even though, upon successful execution, the cpu_suspend function shuts down the suspending processor, the warm boot resume mechanism, based on the cpu_resume function, makes the resume path operate as a cpu_suspend function return, so that cpu_suspend can be treated as a C function by the caller, which simplifies coding the PM drivers that rely on the cpu_suspend API. Upon context save, the minimal amount of memory is flushed to DRAM so that it can be retrieved when the MMU is off and caches are not searched. The suspend CPU operation, depending on the required operations (eg CPU vs Cluster shutdown) is in charge of flushing the cache hierarchy either implicitly (by calling firmware implementations like PSCI) or explicitly by executing the required cache maintainance functions. Debug exceptions are disabled during cpu_{suspend}/{resume} operations so that debug registers can be saved and restored properly preventing preemption from debug agents enabled in the kernel. Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/include/asm/cpu_ops.h6
-rw-r--r--arch/arm64/include/asm/suspend.h9
-rw-r--r--arch/arm64/kernel/asm-offsets.c11
-rw-r--r--arch/arm64/kernel/sleep.S184
-rw-r--r--arch/arm64/kernel/suspend.c109
5 files changed, 319 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
index c4cdb5e5b73d..152413076503 100644
--- a/arch/arm64/include/asm/cpu_ops.h
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -39,6 +39,9 @@ struct device_node;
39 * from the cpu to be killed. 39 * from the cpu to be killed.
40 * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the 40 * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the
41 * cpu being killed. 41 * cpu being killed.
42 * @cpu_suspend: Suspends a cpu and saves the required context. May fail owing
43 * to wrong parameters or error conditions. Called from the
44 * CPU being suspended. Must be called with IRQs disabled.
42 */ 45 */
43struct cpu_operations { 46struct cpu_operations {
44 const char *name; 47 const char *name;
@@ -50,6 +53,9 @@ struct cpu_operations {
50 int (*cpu_disable)(unsigned int cpu); 53 int (*cpu_disable)(unsigned int cpu);
51 void (*cpu_die)(unsigned int cpu); 54 void (*cpu_die)(unsigned int cpu);
52#endif 55#endif
56#ifdef CONFIG_ARM64_CPU_SUSPEND
57 int (*cpu_suspend)(unsigned long);
58#endif
53}; 59};
54 60
55extern const struct cpu_operations *cpu_ops[NR_CPUS]; 61extern const struct cpu_operations *cpu_ops[NR_CPUS];
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
index a88558e223da..e9c149c042e0 100644
--- a/arch/arm64/include/asm/suspend.h
+++ b/arch/arm64/include/asm/suspend.h
@@ -15,4 +15,13 @@ struct cpu_suspend_ctx {
15 u64 ctx_regs[NR_CTX_REGS]; 15 u64 ctx_regs[NR_CTX_REGS];
16 u64 sp; 16 u64 sp;
17} __aligned(16); 17} __aligned(16);
18
19struct sleep_save_sp {
20 phys_addr_t *save_ptr_stash;
21 phys_addr_t save_ptr_stash_phys;
22};
23
24extern void cpu_resume(void);
25extern int cpu_suspend(unsigned long);
26
18#endif 27#endif
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 666e231d410b..646f888387cd 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -25,6 +25,8 @@
25#include <asm/thread_info.h> 25#include <asm/thread_info.h>
26#include <asm/memory.h> 26#include <asm/memory.h>
27#include <asm/cputable.h> 27#include <asm/cputable.h>
28#include <asm/smp_plat.h>
29#include <asm/suspend.h>
28#include <asm/vdso_datapage.h> 30#include <asm/vdso_datapage.h>
29#include <linux/kbuild.h> 31#include <linux/kbuild.h>
30 32
@@ -138,5 +140,14 @@ int main(void)
138 DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); 140 DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
139 DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); 141 DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
140#endif 142#endif
143#ifdef CONFIG_ARM64_CPU_SUSPEND
144 DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx));
145 DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp));
146 DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask));
147 DEFINE(MPIDR_HASH_SHIFTS, offsetof(struct mpidr_hash, shift_aff));
148 DEFINE(SLEEP_SAVE_SP_SZ, sizeof(struct sleep_save_sp));
149 DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys));
150 DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash));
151#endif
141 return 0; 152 return 0;
142} 153}
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
new file mode 100644
index 000000000000..b1925729c692
--- /dev/null
+++ b/arch/arm64/kernel/sleep.S
@@ -0,0 +1,184 @@
1#include <linux/errno.h>
2#include <linux/linkage.h>
3#include <asm/asm-offsets.h>
4#include <asm/assembler.h>
5
6 .text
7/*
8 * Implementation of MPIDR_EL1 hash algorithm through shifting
9 * and OR'ing.
10 *
11 * @dst: register containing hash result
12 * @rs0: register containing affinity level 0 bit shift
13 * @rs1: register containing affinity level 1 bit shift
14 * @rs2: register containing affinity level 2 bit shift
15 * @rs3: register containing affinity level 3 bit shift
16 * @mpidr: register containing MPIDR_EL1 value
17 * @mask: register containing MPIDR mask
18 *
19 * Pseudo C-code:
20 *
21 *u32 dst;
22 *
23 *compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 rs3, u64 mpidr, u64 mask) {
24 * u32 aff0, aff1, aff2, aff3;
25 * u64 mpidr_masked = mpidr & mask;
26 * aff0 = mpidr_masked & 0xff;
27 * aff1 = mpidr_masked & 0xff00;
28 * aff2 = mpidr_masked & 0xff0000;
29 * aff2 = mpidr_masked & 0xff00000000;
30 * dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2 | aff3 >> rs3);
31 *}
32 * Input registers: rs0, rs1, rs2, rs3, mpidr, mask
33 * Output register: dst
34 * Note: input and output registers must be disjoint register sets
35 (eg: a macro instance with mpidr = x1 and dst = x1 is invalid)
36 */
37 .macro compute_mpidr_hash dst, rs0, rs1, rs2, rs3, mpidr, mask
38 and \mpidr, \mpidr, \mask // mask out MPIDR bits
39 and \dst, \mpidr, #0xff // mask=aff0
40 lsr \dst ,\dst, \rs0 // dst=aff0>>rs0
41 and \mask, \mpidr, #0xff00 // mask = aff1
42 lsr \mask ,\mask, \rs1
43 orr \dst, \dst, \mask // dst|=(aff1>>rs1)
44 and \mask, \mpidr, #0xff0000 // mask = aff2
45 lsr \mask ,\mask, \rs2
46 orr \dst, \dst, \mask // dst|=(aff2>>rs2)
47 and \mask, \mpidr, #0xff00000000 // mask = aff3
48 lsr \mask ,\mask, \rs3
49 orr \dst, \dst, \mask // dst|=(aff3>>rs3)
50 .endm
51/*
52 * Save CPU state for a suspend. This saves callee registers, and allocates
53 * space on the kernel stack to save the CPU specific registers + some
54 * other data for resume.
55 *
56 * x0 = suspend finisher argument
57 */
58ENTRY(__cpu_suspend)
59 stp x29, lr, [sp, #-96]!
60 stp x19, x20, [sp,#16]
61 stp x21, x22, [sp,#32]
62 stp x23, x24, [sp,#48]
63 stp x25, x26, [sp,#64]
64 stp x27, x28, [sp,#80]
65 mov x2, sp
66 sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx
67 mov x1, sp
68 /*
69 * x1 now points to struct cpu_suspend_ctx allocated on the stack
70 */
71 str x2, [x1, #CPU_CTX_SP]
72 ldr x2, =sleep_save_sp
73 ldr x2, [x2, #SLEEP_SAVE_SP_VIRT]
74#ifdef CONFIG_SMP
75 mrs x7, mpidr_el1
76 ldr x9, =mpidr_hash
77 ldr x10, [x9, #MPIDR_HASH_MASK]
78 /*
79 * Following code relies on the struct mpidr_hash
80 * members size.
81 */
82 ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS]
83 ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
84 compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
85 add x2, x2, x8, lsl #3
86#endif
87 bl __cpu_suspend_finisher
88 /*
89 * Never gets here, unless suspend fails.
90 * Successful cpu_suspend should return from cpu_resume, returning
91 * through this code path is considered an error
92 * If the return value is set to 0 force x0 = -EOPNOTSUPP
93 * to make sure a proper error condition is propagated
94 */
95 cmp x0, #0
96 mov x3, #-EOPNOTSUPP
97 csel x0, x3, x0, eq
98 add sp, sp, #CPU_SUSPEND_SZ // rewind stack pointer
99 ldp x19, x20, [sp, #16]
100 ldp x21, x22, [sp, #32]
101 ldp x23, x24, [sp, #48]
102 ldp x25, x26, [sp, #64]
103 ldp x27, x28, [sp, #80]
104 ldp x29, lr, [sp], #96
105 ret
106ENDPROC(__cpu_suspend)
107 .ltorg
108
109/*
110 * x0 must contain the sctlr value retrieved from restored context
111 */
112ENTRY(cpu_resume_mmu)
113 ldr x3, =cpu_resume_after_mmu
114 msr sctlr_el1, x0 // restore sctlr_el1
115 isb
116 br x3 // global jump to virtual address
117ENDPROC(cpu_resume_mmu)
118cpu_resume_after_mmu:
119 mov x0, #0 // return zero on success
120 ldp x19, x20, [sp, #16]
121 ldp x21, x22, [sp, #32]
122 ldp x23, x24, [sp, #48]
123 ldp x25, x26, [sp, #64]
124 ldp x27, x28, [sp, #80]
125 ldp x29, lr, [sp], #96
126 ret
127ENDPROC(cpu_resume_after_mmu)
128
129 .data
130ENTRY(cpu_resume)
131 bl el2_setup // if in EL2 drop to EL1 cleanly
132#ifdef CONFIG_SMP
133 mrs x1, mpidr_el1
134 adr x4, mpidr_hash_ptr
135 ldr x5, [x4]
136 add x8, x4, x5 // x8 = struct mpidr_hash phys address
137 /* retrieve mpidr_hash members to compute the hash */
138 ldr x2, [x8, #MPIDR_HASH_MASK]
139 ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS]
140 ldp w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)]
141 compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2
142 /* x7 contains hash index, let's use it to grab context pointer */
143#else
144 mov x7, xzr
145#endif
146 adr x0, sleep_save_sp
147 ldr x0, [x0, #SLEEP_SAVE_SP_PHYS]
148 ldr x0, [x0, x7, lsl #3]
149 /* load sp from context */
150 ldr x2, [x0, #CPU_CTX_SP]
151 adr x1, sleep_idmap_phys
152 /* load physical address of identity map page table in x1 */
153 ldr x1, [x1]
154 mov sp, x2
155 /*
156 * cpu_do_resume expects x0 to contain context physical address
157 * pointer and x1 to contain physical address of 1:1 page tables
158 */
159 bl cpu_do_resume // PC relative jump, MMU off
160 b cpu_resume_mmu // Resume MMU, never returns
161ENDPROC(cpu_resume)
162
163 .align 3
164mpidr_hash_ptr:
165 /*
166 * offset of mpidr_hash symbol from current location
167 * used to obtain run-time mpidr_hash address with MMU off
168 */
169 .quad mpidr_hash - .
170/*
171 * physical address of identity mapped page tables
172 */
173 .type sleep_idmap_phys, #object
174ENTRY(sleep_idmap_phys)
175 .quad 0
176/*
177 * struct sleep_save_sp {
178 * phys_addr_t *save_ptr_stash;
179 * phys_addr_t save_ptr_stash_phys;
180 * };
181 */
182 .type sleep_save_sp, #object
183ENTRY(sleep_save_sp)
184 .space SLEEP_SAVE_SP_SZ // struct sleep_save_sp
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
new file mode 100644
index 000000000000..e074b1c32723
--- /dev/null
+++ b/arch/arm64/kernel/suspend.c
@@ -0,0 +1,109 @@
1#include <linux/slab.h>
2#include <asm/cacheflush.h>
3#include <asm/cpu_ops.h>
4#include <asm/debug-monitors.h>
5#include <asm/pgtable.h>
6#include <asm/memory.h>
7#include <asm/smp_plat.h>
8#include <asm/suspend.h>
9#include <asm/tlbflush.h>
10
11extern int __cpu_suspend(unsigned long);
12/*
13 * This is called by __cpu_suspend() to save the state, and do whatever
14 * flushing is required to ensure that when the CPU goes to sleep we have
15 * the necessary data available when the caches are not searched.
16 *
17 * @arg: Argument to pass to suspend operations
18 * @ptr: CPU context virtual address
19 * @save_ptr: address of the location where the context physical address
20 * must be saved
21 */
22int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr,
23 phys_addr_t *save_ptr)
24{
25 int cpu = smp_processor_id();
26
27 *save_ptr = virt_to_phys(ptr);
28
29 cpu_do_suspend(ptr);
30 /*
31 * Only flush the context that must be retrieved with the MMU
32 * off. VA primitives ensure the flush is applied to all
33 * cache levels so context is pushed to DRAM.
34 */
35 __flush_dcache_area(ptr, sizeof(*ptr));
36 __flush_dcache_area(save_ptr, sizeof(*save_ptr));
37
38 return cpu_ops[cpu]->cpu_suspend(arg);
39}
40
41/**
42 * cpu_suspend
43 *
44 * @arg: argument to pass to the finisher function
45 */
46int cpu_suspend(unsigned long arg)
47{
48 struct mm_struct *mm = current->active_mm;
49 int ret, cpu = smp_processor_id();
50 unsigned long flags;
51
52 /*
53 * If cpu_ops have not been registered or suspend
54 * has not been initialized, cpu_suspend call fails early.
55 */
56 if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend)
57 return -EOPNOTSUPP;
58
59 /*
60 * From this point debug exceptions are disabled to prevent
61 * updates to mdscr register (saved and restored along with
62 * general purpose registers) from kernel debuggers.
63 */
64 local_dbg_save(flags);
65
66 /*
67 * mm context saved on the stack, it will be restored when
68 * the cpu comes out of reset through the identity mapped
69 * page tables, so that the thread address space is properly
70 * set-up on function return.
71 */
72 ret = __cpu_suspend(arg);
73 if (ret == 0) {
74 cpu_switch_mm(mm->pgd, mm);
75 flush_tlb_all();
76 }
77
78 /*
79 * Restore pstate flags. OS lock and mdscr have been already
80 * restored, so from this point onwards, debugging is fully
81 * renabled if it was enabled when core started shutdown.
82 */
83 local_dbg_restore(flags);
84
85 return ret;
86}
87
88extern struct sleep_save_sp sleep_save_sp;
89extern phys_addr_t sleep_idmap_phys;
90
91static int cpu_suspend_init(void)
92{
93 void *ctx_ptr;
94
95 /* ctx_ptr is an array of physical addresses */
96 ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(phys_addr_t), GFP_KERNEL);
97
98 if (WARN_ON(!ctx_ptr))
99 return -ENOMEM;
100
101 sleep_save_sp.save_ptr_stash = ctx_ptr;
102 sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
103 sleep_idmap_phys = virt_to_phys(idmap_pg_dir);
104 __flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp));
105 __flush_dcache_area(&sleep_idmap_phys, sizeof(sleep_idmap_phys));
106
107 return 0;
108}
109early_initcall(cpu_suspend_init);