aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-shmobile/sleep-sh7372.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mach-shmobile/sleep-sh7372.S')
-rw-r--r--arch/arm/mach-shmobile/sleep-sh7372.S221
1 files changed, 28 insertions, 193 deletions
diff --git a/arch/arm/mach-shmobile/sleep-sh7372.S b/arch/arm/mach-shmobile/sleep-sh7372.S
index d37d3ca4d18f..f3ab3c5810ea 100644
--- a/arch/arm/mach-shmobile/sleep-sh7372.S
+++ b/arch/arm/mach-shmobile/sleep-sh7372.S
@@ -30,58 +30,20 @@
30 */ 30 */
31 31
32#include <linux/linkage.h> 32#include <linux/linkage.h>
33#include <linux/init.h>
34#include <asm/memory.h>
33#include <asm/assembler.h> 35#include <asm/assembler.h>
34 36
35#define SMFRAM 0xe6a70000 37#if defined(CONFIG_SUSPEND) || defined(CONFIG_CPU_IDLE)
36 38 .align 12
37 .align 39 .text
38kernel_flush: 40 .global sh7372_resume_core_standby_a3sm
39 .word v7_flush_dcache_all 41sh7372_resume_core_standby_a3sm:
40 42 ldr pc, 1f
41 .align 3 431: .long cpu_resume - PAGE_OFFSET + PLAT_PHYS_OFFSET
42ENTRY(sh7372_cpu_suspend)
43 stmfd sp!, {r0-r12, lr} @ save registers on stack
44
45 ldr r8, =SMFRAM
46
47 mov r4, sp @ Store sp
48 mrs r5, spsr @ Store spsr
49 mov r6, lr @ Store lr
50 stmia r8!, {r4-r6}
51
52 mrc p15, 0, r4, c1, c0, 2 @ Coprocessor access control register
53 mrc p15, 0, r5, c2, c0, 0 @ TTBR0
54 mrc p15, 0, r6, c2, c0, 1 @ TTBR1
55 mrc p15, 0, r7, c2, c0, 2 @ TTBCR
56 stmia r8!, {r4-r7}
57
58 mrc p15, 0, r4, c3, c0, 0 @ Domain access Control Register
59 mrc p15, 0, r5, c10, c2, 0 @ PRRR
60 mrc p15, 0, r6, c10, c2, 1 @ NMRR
61 stmia r8!,{r4-r6}
62
63 mrc p15, 0, r4, c13, c0, 1 @ Context ID
64 mrc p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
65 mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
66 mrs r7, cpsr @ Store current cpsr
67 stmia r8!, {r4-r7}
68
69 mrc p15, 0, r4, c1, c0, 0 @ save control register
70 stmia r8!, {r4}
71
72 /*
73 * jump out to kernel flush routine
74 * - reuse that code is better
75 * - it executes in a cached space so is faster than refetch per-block
76 * - should be faster and will change with kernel
77 * - 'might' have to copy address, load and jump to it
78 * Flush all data from the L1 data cache before disabling
79 * SCTLR.C bit.
80 */
81 ldr r1, kernel_flush
82 mov lr, pc
83 bx r1
84 44
45 .global sh7372_do_idle_a3sm
46sh7372_do_idle_a3sm:
85 /* 47 /*
86 * Clear the SCTLR.C bit to prevent further data cache 48 * Clear the SCTLR.C bit to prevent further data cache
87 * allocation. Clearing SCTLR.C would make all the data accesses 49 * allocation. Clearing SCTLR.C would make all the data accesses
@@ -92,10 +54,13 @@ ENTRY(sh7372_cpu_suspend)
92 mcr p15, 0, r0, c1, c0, 0 54 mcr p15, 0, r0, c1, c0, 0
93 isb 55 isb
94 56
57 /* disable L2 cache in the aux control register */
58 mrc p15, 0, r10, c1, c0, 1
59 bic r10, r10, #2
60 mcr p15, 0, r10, c1, c0, 1
61
95 /* 62 /*
96 * Invalidate L1 data cache. Even though only invalidate is 63 * Invalidate data cache again.
97 * necessary exported flush API is used here. Doing clean
98 * on already clean cache would be almost NOP.
99 */ 64 */
100 ldr r1, kernel_flush 65 ldr r1, kernel_flush
101 blx r1 66 blx r1
@@ -115,146 +80,16 @@ ENTRY(sh7372_cpu_suspend)
115 dsb 80 dsb
116 dmb 81 dmb
117 82
118/* 83#define SPDCR 0xe6180008
119 * =================================== 84#define A3SM (1 << 12)
120 * == WFI instruction => Enter idle ==
121 * ===================================
122 */
123 wfi @ wait for interrupt
124
125/*
126 * ===================================
127 * == Resume path for non-OFF modes ==
128 * ===================================
129 */
130 mrc p15, 0, r0, c1, c0, 0
131 tst r0, #(1 << 2) @ Check C bit enabled?
132 orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared
133 mcreq p15, 0, r0, c1, c0, 0
134 isb
135
136/*
137 * ===================================
138 * == Exit point from non-OFF modes ==
139 * ===================================
140 */
141 ldmfd sp!, {r0-r12, pc} @ restore regs and return
142 85
143 .pool 86 /* A3SM power down */
87 ldr r0, =SPDCR
88 ldr r1, =A3SM
89 str r1, [r0]
901:
91 b 1b
144 92
145 .align 12 93kernel_flush:
146 .text 94 .word v7_flush_dcache_all
147 .global sh7372_cpu_resume 95#endif
148sh7372_cpu_resume:
149
150 mov r1, #0
151 /*
152 * Invalidate all instruction caches to PoU
153 * and flush branch target cache
154 */
155 mcr p15, 0, r1, c7, c5, 0
156
157 ldr r3, =SMFRAM
158
159 ldmia r3!, {r4-r6}
160 mov sp, r4 @ Restore sp
161 msr spsr_cxsf, r5 @ Restore spsr
162 mov lr, r6 @ Restore lr
163
164 ldmia r3!, {r4-r7}
165 mcr p15, 0, r4, c1, c0, 2 @ Coprocessor access Control Register
166 mcr p15, 0, r5, c2, c0, 0 @ TTBR0
167 mcr p15, 0, r6, c2, c0, 1 @ TTBR1
168 mcr p15, 0, r7, c2, c0, 2 @ TTBCR
169
170 ldmia r3!,{r4-r6}
171 mcr p15, 0, r4, c3, c0, 0 @ Domain access Control Register
172 mcr p15, 0, r5, c10, c2, 0 @ PRRR
173 mcr p15, 0, r6, c10, c2, 1 @ NMRR
174
175 ldmia r3!,{r4-r7}
176 mcr p15, 0, r4, c13, c0, 1 @ Context ID
177 mcr p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
178 mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
179 msr cpsr, r7 @ store cpsr
180
181 /* Starting to enable MMU here */
182 mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl
183 /* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */
184 and r7, #0x7
185 cmp r7, #0x0
186 beq usettbr0
187ttbr_error:
188 /*
189 * More work needs to be done to support N[0:2] value other than 0
190 * So looping here so that the error can be detected
191 */
192 b ttbr_error
193
194 .align
195cache_pred_disable_mask:
196 .word 0xFFFFE7FB
197ttbrbit_mask:
198 .word 0xFFFFC000
199table_index_mask:
200 .word 0xFFF00000
201table_entry:
202 .word 0x00000C02
203usettbr0:
204
205 mrc p15, 0, r2, c2, c0, 0
206 ldr r5, ttbrbit_mask
207 and r2, r5
208 mov r4, pc
209 ldr r5, table_index_mask
210 and r4, r5 @ r4 = 31 to 20 bits of pc
211 /* Extract the value to be written to table entry */
212 ldr r6, table_entry
213 /* r6 has the value to be written to table entry */
214 add r6, r6, r4
215 /* Getting the address of table entry to modify */
216 lsr r4, #18
217 /* r2 has the location which needs to be modified */
218 add r2, r4
219 ldr r4, [r2]
220 str r6, [r2] /* modify the table entry */
221
222 mov r7, r6
223 mov r5, r2
224 mov r6, r4
225 /* r5 = original page table address */
226 /* r6 = original page table data */
227
228 mov r0, #0
229 mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer
230 mcr p15, 0, r0, c7, c5, 6 @ Invalidate branch predictor array
231 mcr p15, 0, r0, c8, c5, 0 @ Invalidate instruction TLB
232 mcr p15, 0, r0, c8, c6, 0 @ Invalidate data TLB
233
234 /*
235 * Restore control register. This enables the MMU.
236 * The caches and prediction are not enabled here, they
237 * will be enabled after restoring the MMU table entry.
238 */
239 ldmia r3!, {r4}
240 stmia r3!, {r5} /* save original page table address */
241 stmia r3!, {r6} /* save original page table data */
242 stmia r3!, {r7} /* save modified page table data */
243
244 ldr r2, cache_pred_disable_mask
245 and r4, r2
246 mcr p15, 0, r4, c1, c0, 0
247 dsb
248 isb
249
250 ldr r0, =restoremmu_on
251 bx r0
252
253/*
254 * ==============================
255 * == Exit point from OFF mode ==
256 * ==============================
257 */
258restoremmu_on:
259
260 ldmfd sp!, {r0-r12, pc} @ restore regs and return