aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLorenzo Pieralisi <lorenzo.pieralisi@arm.com>2013-07-17 05:14:45 -0400
committerLorenzo Pieralisi <lorenzo.pieralisi@arm.com>2013-12-16 12:17:31 -0500
commit6732bc65c277b697f6d8b645b15f63d1558c0cc4 (patch)
tree3fec2f68e25807aea9f814520541211cc8ff979a
parent976d7d3f79a997b223f2ed8eabef7e12e469b5cf (diff)
arm64: kernel: suspend/resume registers save/restore
Power management software requires the kernel to save and restore CPU registers while going through suspend and resume operations triggered by kernel subsystems like CPU idle and suspend to RAM. This patch implements code that provides save and restore mechanism for the arm v8 implementation. Memory for the context is passed as parameter to both cpu_do_suspend and cpu_do_resume functions, and allows the callers to implement context allocation as they deem fit. The registers that are saved and restored correspond to the registers set actually required by the kernel to be up and running which represents a subset of v8 ISA. Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
-rw-r--r--arch/arm64/include/asm/proc-fns.h3
-rw-r--r--arch/arm64/include/asm/suspend.h18
-rw-r--r--arch/arm64/mm/proc.S69
3 files changed, 90 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 7cdf466fd0c5..0c657bb54597 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -26,11 +26,14 @@
26#include <asm/page.h> 26#include <asm/page.h>
27 27
28struct mm_struct; 28struct mm_struct;
29struct cpu_suspend_ctx;
29 30
30extern void cpu_cache_off(void); 31extern void cpu_cache_off(void);
31extern void cpu_do_idle(void); 32extern void cpu_do_idle(void);
32extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); 33extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
33extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); 34extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
35extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
36extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
34 37
35#include <asm/memory.h> 38#include <asm/memory.h>
36 39
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
new file mode 100644
index 000000000000..a88558e223da
--- /dev/null
+++ b/arch/arm64/include/asm/suspend.h
@@ -0,0 +1,18 @@
1#ifndef __ASM_SUSPEND_H
2#define __ASM_SUSPEND_H
3
4#define NR_CTX_REGS 11
5
6/*
7 * struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on
8 * the stack, which must be 16-byte aligned on v8
9 */
10struct cpu_suspend_ctx {
11 /*
12 * This struct must be kept in sync with
13 * cpu_do_{suspend/resume} in mm/proc.S
14 */
15 u64 ctx_regs[NR_CTX_REGS];
16 u64 sp;
17} __aligned(16);
18#endif
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 0f7fec52c7f8..bed1f1de1caf 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -80,6 +80,75 @@ ENTRY(cpu_do_idle)
80 ret 80 ret
81ENDPROC(cpu_do_idle) 81ENDPROC(cpu_do_idle)
82 82
83#ifdef CONFIG_ARM64_CPU_SUSPEND
84/**
85 * cpu_do_suspend - save CPU registers context
86 *
87 * x0: virtual address of context pointer
88 */
89ENTRY(cpu_do_suspend)
90 mrs x2, tpidr_el0
91 mrs x3, tpidrro_el0
92 mrs x4, contextidr_el1
93 mrs x5, mair_el1
94 mrs x6, cpacr_el1
95 mrs x7, ttbr1_el1
96 mrs x8, tcr_el1
97 mrs x9, vbar_el1
98 mrs x10, mdscr_el1
99 mrs x11, oslsr_el1
100 mrs x12, sctlr_el1
101 stp x2, x3, [x0]
102 stp x4, x5, [x0, #16]
103 stp x6, x7, [x0, #32]
104 stp x8, x9, [x0, #48]
105 stp x10, x11, [x0, #64]
106 str x12, [x0, #80]
107 ret
108ENDPROC(cpu_do_suspend)
109
110/**
111 * cpu_do_resume - restore CPU register context
112 *
113 * x0: Physical address of context pointer
114 * x1: ttbr0_el1 to be restored
115 *
116 * Returns:
117 * sctlr_el1 value in x0
118 */
119ENTRY(cpu_do_resume)
120 /*
121 * Invalidate local tlb entries before turning on MMU
122 */
123 tlbi vmalle1
124 ldp x2, x3, [x0]
125 ldp x4, x5, [x0, #16]
126 ldp x6, x7, [x0, #32]
127 ldp x8, x9, [x0, #48]
128 ldp x10, x11, [x0, #64]
129 ldr x12, [x0, #80]
130 msr tpidr_el0, x2
131 msr tpidrro_el0, x3
132 msr contextidr_el1, x4
133 msr mair_el1, x5
134 msr cpacr_el1, x6
135 msr ttbr0_el1, x1
136 msr ttbr1_el1, x7
137 msr tcr_el1, x8
138 msr vbar_el1, x9
139 msr mdscr_el1, x10
140 /*
141 * Restore oslsr_el1 by writing oslar_el1
142 */
143 ubfx x11, x11, #1, #1
144 msr oslar_el1, x11
145 mov x0, x12
146 dsb nsh // Make sure local tlb invalidation completed
147 isb
148 ret
149ENDPROC(cpu_do_resume)
150#endif
151
83/* 152/*
84 * cpu_switch_mm(pgd_phys, tsk) 153 * cpu_switch_mm(pgd_phys, tsk)
85 * 154 *