aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMagnus Damm <damm@opensource.se>2011-09-25 17:18:42 -0400
committerRafael J. Wysocki <rjw@sisk.pl>2011-09-25 17:18:42 -0400
commit06b841666a5a47918d31472dd77837906f999a9a (patch)
tree9afdaad5000daff16b995959fc2458ba338567a2 /arch
parent0aa2a221696cc8ea20a4cdca01315d3b6b4ecc4d (diff)
ARM: mach-shmobile: sh7372 generic suspend/resume support
Convert the sh7372 Core Standby code to make use of the new generic ARM cpu suspend/resume code. Signed-off-by: Magnus Damm <damm@opensource.se> Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mach-shmobile/include/mach/common.h3
-rw-r--r--arch/arm/mach-shmobile/pm-sh7372.c36
-rw-r--r--arch/arm/mach-shmobile/sleep-sh7372.S230
3 files changed, 23 insertions, 246 deletions
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h
index 06aecb31d9c7..7b6f6f91da81 100644
--- a/arch/arm/mach-shmobile/include/mach/common.h
+++ b/arch/arm/mach-shmobile/include/mach/common.h
@@ -35,8 +35,7 @@ extern void sh7372_add_standard_devices(void);
35extern void sh7372_clock_init(void); 35extern void sh7372_clock_init(void);
36extern void sh7372_pinmux_init(void); 36extern void sh7372_pinmux_init(void);
37extern void sh7372_pm_init(void); 37extern void sh7372_pm_init(void);
38extern void sh7372_cpu_suspend(void); 38extern void sh7372_resume_core_standby(void);
39extern void sh7372_cpu_resume(void);
40extern struct clk sh7372_extal1_clk; 39extern struct clk sh7372_extal1_clk;
41extern struct clk sh7372_extal2_clk; 40extern struct clk sh7372_extal2_clk;
42 41
diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c
index ac47bfcd287e..aa7d352920d4 100644
--- a/arch/arm/mach-shmobile/pm-sh7372.c
+++ b/arch/arm/mach-shmobile/pm-sh7372.c
@@ -21,6 +21,7 @@
21#include <asm/system.h> 21#include <asm/system.h>
22#include <asm/io.h> 22#include <asm/io.h>
23#include <asm/tlbflush.h> 23#include <asm/tlbflush.h>
24#include <asm/suspend.h>
24#include <mach/common.h> 25#include <mach/common.h>
25#include <mach/sh7372.h> 26#include <mach/sh7372.h>
26 27
@@ -152,30 +153,25 @@ struct sh7372_pm_domain sh7372_a3sg = {
152 153
153#endif /* CONFIG_PM */ 154#endif /* CONFIG_PM */
154 155
155static void sh7372_enter_core_standby(void) 156static int sh7372_do_idle_core_standby(unsigned long unused)
156{ 157{
157 void __iomem *smfram = (void __iomem *)SMFRAM; 158 cpu_do_idle(); /* WFI when SYSTBCR == 0x10 -> Core Standby */
158 159 return 0;
159 __raw_writel(0, APARMBAREA); /* translate 4k */ 160}
160 __raw_writel(__pa(sh7372_cpu_resume), SBAR); /* set reset vector */
161 __raw_writel(0x10, SYSTBCR); /* enable core standby */
162
163 __raw_writel(0, smfram + 0x3c); /* clear page table address */
164
165 sh7372_cpu_suspend();
166 cpu_init();
167 161
168 /* if page table address is non-NULL then we have been powered down */ 162static void sh7372_enter_core_standby(void)
169 if (__raw_readl(smfram + 0x3c)) { 163{
170 __raw_writel(__raw_readl(smfram + 0x40), 164 /* set reset vector, translate 4k */
171 __va(__raw_readl(smfram + 0x3c))); 165 __raw_writel(__pa(sh7372_resume_core_standby), SBAR);
166 __raw_writel(0, APARMBAREA);
172 167
173 flush_tlb_all(); 168 /* enter sleep mode with SYSTBCR to 0x10 */
174 set_cr(__raw_readl(smfram + 0x38)); 169 __raw_writel(0x10, SYSTBCR);
175 } 170 cpu_suspend(0, sh7372_do_idle_core_standby);
171 __raw_writel(0, SYSTBCR);
176 172
177 __raw_writel(0, SYSTBCR); /* disable core standby */ 173 /* disable reset vector translation */
178 __raw_writel(0, SBAR); /* disable reset vector translation */ 174 __raw_writel(0, SBAR);
179} 175}
180 176
181#ifdef CONFIG_CPU_IDLE 177#ifdef CONFIG_CPU_IDLE
diff --git a/arch/arm/mach-shmobile/sleep-sh7372.S b/arch/arm/mach-shmobile/sleep-sh7372.S
index d37d3ca4d18f..dedf6126789c 100644
--- a/arch/arm/mach-shmobile/sleep-sh7372.S
+++ b/arch/arm/mach-shmobile/sleep-sh7372.S
@@ -30,231 +30,13 @@
30 */ 30 */
31 31
32#include <linux/linkage.h> 32#include <linux/linkage.h>
33#include <linux/init.h>
34#include <asm/memory.h>
33#include <asm/assembler.h> 35#include <asm/assembler.h>
34 36
35#define SMFRAM 0xe6a70000
36
37 .align
38kernel_flush:
39 .word v7_flush_dcache_all
40
41 .align 3
42ENTRY(sh7372_cpu_suspend)
43 stmfd sp!, {r0-r12, lr} @ save registers on stack
44
45 ldr r8, =SMFRAM
46
47 mov r4, sp @ Store sp
48 mrs r5, spsr @ Store spsr
49 mov r6, lr @ Store lr
50 stmia r8!, {r4-r6}
51
52 mrc p15, 0, r4, c1, c0, 2 @ Coprocessor access control register
53 mrc p15, 0, r5, c2, c0, 0 @ TTBR0
54 mrc p15, 0, r6, c2, c0, 1 @ TTBR1
55 mrc p15, 0, r7, c2, c0, 2 @ TTBCR
56 stmia r8!, {r4-r7}
57
58 mrc p15, 0, r4, c3, c0, 0 @ Domain access Control Register
59 mrc p15, 0, r5, c10, c2, 0 @ PRRR
60 mrc p15, 0, r6, c10, c2, 1 @ NMRR
61 stmia r8!,{r4-r6}
62
63 mrc p15, 0, r4, c13, c0, 1 @ Context ID
64 mrc p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
65 mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
66 mrs r7, cpsr @ Store current cpsr
67 stmia r8!, {r4-r7}
68
69 mrc p15, 0, r4, c1, c0, 0 @ save control register
70 stmia r8!, {r4}
71
72 /*
73 * jump out to kernel flush routine
74 * - reuse that code is better
75 * - it executes in a cached space so is faster than refetch per-block
76 * - should be faster and will change with kernel
77 * - 'might' have to copy address, load and jump to it
78 * Flush all data from the L1 data cache before disabling
79 * SCTLR.C bit.
80 */
81 ldr r1, kernel_flush
82 mov lr, pc
83 bx r1
84
85 /*
86 * Clear the SCTLR.C bit to prevent further data cache
87 * allocation. Clearing SCTLR.C would make all the data accesses
88 * strongly ordered and would not hit the cache.
89 */
90 mrc p15, 0, r0, c1, c0, 0
91 bic r0, r0, #(1 << 2) @ Disable the C bit
92 mcr p15, 0, r0, c1, c0, 0
93 isb
94
95 /*
96 * Invalidate L1 data cache. Even though only invalidate is
97 * necessary exported flush API is used here. Doing clean
98 * on already clean cache would be almost NOP.
99 */
100 ldr r1, kernel_flush
101 blx r1
102 /*
103 * The kernel doesn't interwork: v7_flush_dcache_all in particluar will
104 * always return in Thumb state when CONFIG_THUMB2_KERNEL is enabled.
105 * This sequence switches back to ARM. Note that .align may insert a
106 * nop: bx pc needs to be word-aligned in order to work.
107 */
108 THUMB( .thumb )
109 THUMB( .align )
110 THUMB( bx pc )
111 THUMB( nop )
112 .arm
113
114 /* Data memory barrier and Data sync barrier */
115 dsb
116 dmb
117
118/*
119 * ===================================
120 * == WFI instruction => Enter idle ==
121 * ===================================
122 */
123 wfi @ wait for interrupt
124
125/*
126 * ===================================
127 * == Resume path for non-OFF modes ==
128 * ===================================
129 */
130 mrc p15, 0, r0, c1, c0, 0
131 tst r0, #(1 << 2) @ Check C bit enabled?
132 orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared
133 mcreq p15, 0, r0, c1, c0, 0
134 isb
135
136/*
137 * ===================================
138 * == Exit point from non-OFF modes ==
139 * ===================================
140 */
141 ldmfd sp!, {r0-r12, pc} @ restore regs and return
142
143 .pool
144
145 .align 12 37 .align 12
146 .text 38 .text
147 .global sh7372_cpu_resume 39 .global sh7372_resume_core_standby
148sh7372_cpu_resume: 40sh7372_resume_core_standby:
149 41 ldr pc, 1f
150 mov r1, #0 421: .long cpu_resume - PAGE_OFFSET + PLAT_PHYS_OFFSET
151 /*
152 * Invalidate all instruction caches to PoU
153 * and flush branch target cache
154 */
155 mcr p15, 0, r1, c7, c5, 0
156
157 ldr r3, =SMFRAM
158
159 ldmia r3!, {r4-r6}
160 mov sp, r4 @ Restore sp
161 msr spsr_cxsf, r5 @ Restore spsr
162 mov lr, r6 @ Restore lr
163
164 ldmia r3!, {r4-r7}
165 mcr p15, 0, r4, c1, c0, 2 @ Coprocessor access Control Register
166 mcr p15, 0, r5, c2, c0, 0 @ TTBR0
167 mcr p15, 0, r6, c2, c0, 1 @ TTBR1
168 mcr p15, 0, r7, c2, c0, 2 @ TTBCR
169
170 ldmia r3!,{r4-r6}
171 mcr p15, 0, r4, c3, c0, 0 @ Domain access Control Register
172 mcr p15, 0, r5, c10, c2, 0 @ PRRR
173 mcr p15, 0, r6, c10, c2, 1 @ NMRR
174
175 ldmia r3!,{r4-r7}
176 mcr p15, 0, r4, c13, c0, 1 @ Context ID
177 mcr p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
178 mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
179 msr cpsr, r7 @ store cpsr
180
181 /* Starting to enable MMU here */
182 mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl
183 /* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */
184 and r7, #0x7
185 cmp r7, #0x0
186 beq usettbr0
187ttbr_error:
188 /*
189 * More work needs to be done to support N[0:2] value other than 0
190 * So looping here so that the error can be detected
191 */
192 b ttbr_error
193
194 .align
195cache_pred_disable_mask:
196 .word 0xFFFFE7FB
197ttbrbit_mask:
198 .word 0xFFFFC000
199table_index_mask:
200 .word 0xFFF00000
201table_entry:
202 .word 0x00000C02
203usettbr0:
204
205 mrc p15, 0, r2, c2, c0, 0
206 ldr r5, ttbrbit_mask
207 and r2, r5
208 mov r4, pc
209 ldr r5, table_index_mask
210 and r4, r5 @ r4 = 31 to 20 bits of pc
211 /* Extract the value to be written to table entry */
212 ldr r6, table_entry
213 /* r6 has the value to be written to table entry */
214 add r6, r6, r4
215 /* Getting the address of table entry to modify */
216 lsr r4, #18
217 /* r2 has the location which needs to be modified */
218 add r2, r4
219 ldr r4, [r2]
220 str r6, [r2] /* modify the table entry */
221
222 mov r7, r6
223 mov r5, r2
224 mov r6, r4
225 /* r5 = original page table address */
226 /* r6 = original page table data */
227
228 mov r0, #0
229 mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer
230 mcr p15, 0, r0, c7, c5, 6 @ Invalidate branch predictor array
231 mcr p15, 0, r0, c8, c5, 0 @ Invalidate instruction TLB
232 mcr p15, 0, r0, c8, c6, 0 @ Invalidate data TLB
233
234 /*
235 * Restore control register. This enables the MMU.
236 * The caches and prediction are not enabled here, they
237 * will be enabled after restoring the MMU table entry.
238 */
239 ldmia r3!, {r4}
240 stmia r3!, {r5} /* save original page table address */
241 stmia r3!, {r6} /* save original page table data */
242 stmia r3!, {r7} /* save modified page table data */
243
244 ldr r2, cache_pred_disable_mask
245 and r4, r2
246 mcr p15, 0, r4, c1, c0, 0
247 dsb
248 isb
249
250 ldr r0, =restoremmu_on
251 bx r0
252
253/*
254 * ==============================
255 * == Exit point from OFF mode ==
256 * ==============================
257 */
258restoremmu_on:
259
260 ldmfd sp!, {r0-r12, pc} @ restore regs and return