diff options
Diffstat (limited to 'arch/arm/mach-shmobile/sleep-sh7372.S')
-rw-r--r-- | arch/arm/mach-shmobile/sleep-sh7372.S | 230 |
1 files changed, 6 insertions, 224 deletions
diff --git a/arch/arm/mach-shmobile/sleep-sh7372.S b/arch/arm/mach-shmobile/sleep-sh7372.S index d37d3ca4d18f..dedf6126789c 100644 --- a/arch/arm/mach-shmobile/sleep-sh7372.S +++ b/arch/arm/mach-shmobile/sleep-sh7372.S | |||
@@ -30,231 +30,13 @@ | |||
30 | */ | 30 | */ |
31 | 31 | ||
32 | #include <linux/linkage.h> | 32 | #include <linux/linkage.h> |
33 | #include <linux/init.h> | ||
34 | #include <asm/memory.h> | ||
33 | #include <asm/assembler.h> | 35 | #include <asm/assembler.h> |
34 | 36 | ||
35 | #define SMFRAM 0xe6a70000 | ||
36 | |||
37 | .align | ||
38 | kernel_flush: | ||
39 | .word v7_flush_dcache_all | ||
40 | |||
41 | .align 3 | ||
42 | ENTRY(sh7372_cpu_suspend) | ||
43 | stmfd sp!, {r0-r12, lr} @ save registers on stack | ||
44 | |||
45 | ldr r8, =SMFRAM | ||
46 | |||
47 | mov r4, sp @ Store sp | ||
48 | mrs r5, spsr @ Store spsr | ||
49 | mov r6, lr @ Store lr | ||
50 | stmia r8!, {r4-r6} | ||
51 | |||
52 | mrc p15, 0, r4, c1, c0, 2 @ Coprocessor access control register | ||
53 | mrc p15, 0, r5, c2, c0, 0 @ TTBR0 | ||
54 | mrc p15, 0, r6, c2, c0, 1 @ TTBR1 | ||
55 | mrc p15, 0, r7, c2, c0, 2 @ TTBCR | ||
56 | stmia r8!, {r4-r7} | ||
57 | |||
58 | mrc p15, 0, r4, c3, c0, 0 @ Domain access Control Register | ||
59 | mrc p15, 0, r5, c10, c2, 0 @ PRRR | ||
60 | mrc p15, 0, r6, c10, c2, 1 @ NMRR | ||
61 | stmia r8!,{r4-r6} | ||
62 | |||
63 | mrc p15, 0, r4, c13, c0, 1 @ Context ID | ||
64 | mrc p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID | ||
65 | mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address | ||
66 | mrs r7, cpsr @ Store current cpsr | ||
67 | stmia r8!, {r4-r7} | ||
68 | |||
69 | mrc p15, 0, r4, c1, c0, 0 @ save control register | ||
70 | stmia r8!, {r4} | ||
71 | |||
72 | /* | ||
73 | * jump out to kernel flush routine | ||
74 | * - reuse that code is better | ||
75 | * - it executes in a cached space so is faster than refetch per-block | ||
76 | * - should be faster and will change with kernel | ||
77 | * - 'might' have to copy address, load and jump to it | ||
78 | * Flush all data from the L1 data cache before disabling | ||
79 | * SCTLR.C bit. | ||
80 | */ | ||
81 | ldr r1, kernel_flush | ||
82 | mov lr, pc | ||
83 | bx r1 | ||
84 | |||
85 | /* | ||
86 | * Clear the SCTLR.C bit to prevent further data cache | ||
87 | * allocation. Clearing SCTLR.C would make all the data accesses | ||
88 | * strongly ordered and would not hit the cache. | ||
89 | */ | ||
90 | mrc p15, 0, r0, c1, c0, 0 | ||
91 | bic r0, r0, #(1 << 2) @ Disable the C bit | ||
92 | mcr p15, 0, r0, c1, c0, 0 | ||
93 | isb | ||
94 | |||
95 | /* | ||
96 | * Invalidate L1 data cache. Even though only invalidate is | ||
97 | * necessary exported flush API is used here. Doing clean | ||
98 | * on already clean cache would be almost NOP. | ||
99 | */ | ||
100 | ldr r1, kernel_flush | ||
101 | blx r1 | ||
102 | /* | ||
103 | * The kernel doesn't interwork: v7_flush_dcache_all in particluar will | ||
104 | * always return in Thumb state when CONFIG_THUMB2_KERNEL is enabled. | ||
105 | * This sequence switches back to ARM. Note that .align may insert a | ||
106 | * nop: bx pc needs to be word-aligned in order to work. | ||
107 | */ | ||
108 | THUMB( .thumb ) | ||
109 | THUMB( .align ) | ||
110 | THUMB( bx pc ) | ||
111 | THUMB( nop ) | ||
112 | .arm | ||
113 | |||
114 | /* Data memory barrier and Data sync barrier */ | ||
115 | dsb | ||
116 | dmb | ||
117 | |||
118 | /* | ||
119 | * =================================== | ||
120 | * == WFI instruction => Enter idle == | ||
121 | * =================================== | ||
122 | */ | ||
123 | wfi @ wait for interrupt | ||
124 | |||
125 | /* | ||
126 | * =================================== | ||
127 | * == Resume path for non-OFF modes == | ||
128 | * =================================== | ||
129 | */ | ||
130 | mrc p15, 0, r0, c1, c0, 0 | ||
131 | tst r0, #(1 << 2) @ Check C bit enabled? | ||
132 | orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared | ||
133 | mcreq p15, 0, r0, c1, c0, 0 | ||
134 | isb | ||
135 | |||
136 | /* | ||
137 | * =================================== | ||
138 | * == Exit point from non-OFF modes == | ||
139 | * =================================== | ||
140 | */ | ||
141 | ldmfd sp!, {r0-r12, pc} @ restore regs and return | ||
142 | |||
143 | .pool | ||
144 | |||
145 | .align 12 | 37 | .align 12 |
146 | .text | 38 | .text |
147 | .global sh7372_cpu_resume | 39 | .global sh7372_resume_core_standby |
148 | sh7372_cpu_resume: | 40 | sh7372_resume_core_standby: |
149 | 41 | ldr pc, 1f | |
150 | mov r1, #0 | 42 | 1: .long cpu_resume - PAGE_OFFSET + PLAT_PHYS_OFFSET |
151 | /* | ||
152 | * Invalidate all instruction caches to PoU | ||
153 | * and flush branch target cache | ||
154 | */ | ||
155 | mcr p15, 0, r1, c7, c5, 0 | ||
156 | |||
157 | ldr r3, =SMFRAM | ||
158 | |||
159 | ldmia r3!, {r4-r6} | ||
160 | mov sp, r4 @ Restore sp | ||
161 | msr spsr_cxsf, r5 @ Restore spsr | ||
162 | mov lr, r6 @ Restore lr | ||
163 | |||
164 | ldmia r3!, {r4-r7} | ||
165 | mcr p15, 0, r4, c1, c0, 2 @ Coprocessor access Control Register | ||
166 | mcr p15, 0, r5, c2, c0, 0 @ TTBR0 | ||
167 | mcr p15, 0, r6, c2, c0, 1 @ TTBR1 | ||
168 | mcr p15, 0, r7, c2, c0, 2 @ TTBCR | ||
169 | |||
170 | ldmia r3!,{r4-r6} | ||
171 | mcr p15, 0, r4, c3, c0, 0 @ Domain access Control Register | ||
172 | mcr p15, 0, r5, c10, c2, 0 @ PRRR | ||
173 | mcr p15, 0, r6, c10, c2, 1 @ NMRR | ||
174 | |||
175 | ldmia r3!,{r4-r7} | ||
176 | mcr p15, 0, r4, c13, c0, 1 @ Context ID | ||
177 | mcr p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID | ||
178 | mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address | ||
179 | msr cpsr, r7 @ store cpsr | ||
180 | |||
181 | /* Starting to enable MMU here */ | ||
182 | mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl | ||
183 | /* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */ | ||
184 | and r7, #0x7 | ||
185 | cmp r7, #0x0 | ||
186 | beq usettbr0 | ||
187 | ttbr_error: | ||
188 | /* | ||
189 | * More work needs to be done to support N[0:2] value other than 0 | ||
190 | * So looping here so that the error can be detected | ||
191 | */ | ||
192 | b ttbr_error | ||
193 | |||
194 | .align | ||
195 | cache_pred_disable_mask: | ||
196 | .word 0xFFFFE7FB | ||
197 | ttbrbit_mask: | ||
198 | .word 0xFFFFC000 | ||
199 | table_index_mask: | ||
200 | .word 0xFFF00000 | ||
201 | table_entry: | ||
202 | .word 0x00000C02 | ||
203 | usettbr0: | ||
204 | |||
205 | mrc p15, 0, r2, c2, c0, 0 | ||
206 | ldr r5, ttbrbit_mask | ||
207 | and r2, r5 | ||
208 | mov r4, pc | ||
209 | ldr r5, table_index_mask | ||
210 | and r4, r5 @ r4 = 31 to 20 bits of pc | ||
211 | /* Extract the value to be written to table entry */ | ||
212 | ldr r6, table_entry | ||
213 | /* r6 has the value to be written to table entry */ | ||
214 | add r6, r6, r4 | ||
215 | /* Getting the address of table entry to modify */ | ||
216 | lsr r4, #18 | ||
217 | /* r2 has the location which needs to be modified */ | ||
218 | add r2, r4 | ||
219 | ldr r4, [r2] | ||
220 | str r6, [r2] /* modify the table entry */ | ||
221 | |||
222 | mov r7, r6 | ||
223 | mov r5, r2 | ||
224 | mov r6, r4 | ||
225 | /* r5 = original page table address */ | ||
226 | /* r6 = original page table data */ | ||
227 | |||
228 | mov r0, #0 | ||
229 | mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer | ||
230 | mcr p15, 0, r0, c7, c5, 6 @ Invalidate branch predictor array | ||
231 | mcr p15, 0, r0, c8, c5, 0 @ Invalidate instruction TLB | ||
232 | mcr p15, 0, r0, c8, c6, 0 @ Invalidate data TLB | ||
233 | |||
234 | /* | ||
235 | * Restore control register. This enables the MMU. | ||
236 | * The caches and prediction are not enabled here, they | ||
237 | * will be enabled after restoring the MMU table entry. | ||
238 | */ | ||
239 | ldmia r3!, {r4} | ||
240 | stmia r3!, {r5} /* save original page table address */ | ||
241 | stmia r3!, {r6} /* save original page table data */ | ||
242 | stmia r3!, {r7} /* save modified page table data */ | ||
243 | |||
244 | ldr r2, cache_pred_disable_mask | ||
245 | and r4, r2 | ||
246 | mcr p15, 0, r4, c1, c0, 0 | ||
247 | dsb | ||
248 | isb | ||
249 | |||
250 | ldr r0, =restoremmu_on | ||
251 | bx r0 | ||
252 | |||
253 | /* | ||
254 | * ============================== | ||
255 | * == Exit point from OFF mode == | ||
256 | * ============================== | ||
257 | */ | ||
258 | restoremmu_on: | ||
259 | |||
260 | ldmfd sp!, {r0-r12, pc} @ restore regs and return | ||