aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-omap2/sleep34xx.S
diff options
context:
space:
mode:
authorKevin Hilman <khilman@deeprootsystems.com>2009-05-28 13:56:16 -0400
committerKevin Hilman <khilman@deeprootsystems.com>2009-05-28 13:58:50 -0400
commit8bd229492209c0c7d050e2f9a600c12f035d72f7 (patch)
treec76039df215b1c3ba58adb23845baa4af40f25fe /arch/arm/mach-omap2/sleep34xx.S
parenta330bd4750bc84aebb28faddd525d0bcbdde262d (diff)
OMAP2/3: PM: push core PM code from linux-omap
This patch is to sync the core linux-omap PM code with mainline. This code has evolved and been used for a while the linux-omap tree, but the attempt here is to finally get this into mainline. Following this will be a series of patches from the 'PM branch' of the linux-omap tree to add full PM hardware support from the linux-omap tree. Much of this PM core code was written by Jouni Hogander with significant contributions from Paul Walmsley as well as many others from Nokia, Texas Instruments and linux-omap community. Signed-off-by: Jouni Hogander <jouni.hogander@nokia.com> Cc: Paul Walmsley <paul@pwsan.com> Signed-off-by: Kevin Hilman <khilman@deeprootsystems.com>
Diffstat (limited to 'arch/arm/mach-omap2/sleep34xx.S')
-rw-r--r--arch/arm/mach-omap2/sleep34xx.S436
1 files changed, 436 insertions, 0 deletions
diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
new file mode 100644
index 000000000000..e5e2553e79a6
--- /dev/null
+++ b/arch/arm/mach-omap2/sleep34xx.S
@@ -0,0 +1,436 @@
1/*
2 * linux/arch/arm/mach-omap2/sleep.S
3 *
4 * (C) Copyright 2007
5 * Texas Instruments
6 * Karthik Dasu <karthik-dp@ti.com>
7 *
8 * (C) Copyright 2004
9 * Texas Instruments, <www.ti.com>
10 * Richard Woodruff <r-woodruff2@ti.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License as
14 * published by the Free Software Foundation; either version 2 of
15 * the License, or (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
25 * MA 02111-1307 USA
26 */
27#include <linux/linkage.h>
28#include <asm/assembler.h>
29#include <mach/io.h>
30#include <mach/control.h>
31
32#include "prm.h"
33#include "sdrc.h"
34
35#define PM_PREPWSTST_CORE_V OMAP34XX_PRM_REGADDR(CORE_MOD, \
36 OMAP3430_PM_PREPWSTST)
37#define PM_PREPWSTST_MPU_V OMAP34XX_PRM_REGADDR(MPU_MOD, \
38 OMAP3430_PM_PREPWSTST)
39#define PM_PWSTCTRL_MPU_P OMAP34XX_PRM_REGADDR(MPU_MOD, PM_PWSTCTRL)
40#define SCRATCHPAD_MEM_OFFS 0x310 /* Move this as correct place is
41 * available */
42#define SCRATCHPAD_BASE_P OMAP343X_CTRL_REGADDR(\
43 OMAP343X_CONTROL_MEM_WKUP +\
44 SCRATCHPAD_MEM_OFFS)
45#define SDRC_POWER_V OMAP34XX_SDRC_REGADDR(SDRC_POWER)
46
47 .text
48/* Function call to get the restore pointer for resume from OFF */
49ENTRY(get_restore_pointer)
50 stmfd sp!, {lr} @ save registers on stack
51 adr r0, restore
52 ldmfd sp!, {pc} @ restore regs and return
53ENTRY(get_restore_pointer_sz)
54 .word . - get_restore_pointer_sz
55/*
56 * Forces OMAP into idle state
57 *
58 * omap34xx_suspend() - This bit of code just executes the WFI
59 * for normal idles.
60 *
61 * Note: This code get's copied to internal SRAM at boot. When the OMAP
62 * wakes up it continues execution at the point it went to sleep.
63 */
64ENTRY(omap34xx_cpu_suspend)
65 stmfd sp!, {r0-r12, lr} @ save registers on stack
66loop:
67 /*b loop*/ @Enable to debug by stepping through code
68 /* r0 contains restore pointer in sdram */
69 /* r1 contains information about saving context */
70 ldr r4, sdrc_power @ read the SDRC_POWER register
71 ldr r5, [r4] @ read the contents of SDRC_POWER
72 orr r5, r5, #0x40 @ enable self refresh on idle req
73 str r5, [r4] @ write back to SDRC_POWER register
74
75 cmp r1, #0x0
76 /* If context save is required, do that and execute wfi */
77 bne save_context_wfi
78 /* Data memory barrier and Data sync barrier */
79 mov r1, #0
80 mcr p15, 0, r1, c7, c10, 4
81 mcr p15, 0, r1, c7, c10, 5
82
83 wfi @ wait for interrupt
84
85 nop
86 nop
87 nop
88 nop
89 nop
90 nop
91 nop
92 nop
93 nop
94 nop
95 bl i_dll_wait
96
97 ldmfd sp!, {r0-r12, pc} @ restore regs and return
98restore:
99 /* b restore*/ @ Enable to debug restore code
100 /* Check what was the reason for mpu reset and store the reason in r9*/
101 /* 1 - Only L1 and logic lost */
102 /* 2 - Only L2 lost - In this case, we wont be here */
103 /* 3 - Both L1 and L2 lost */
104 ldr r1, pm_pwstctrl_mpu
105 ldr r2, [r1]
106 and r2, r2, #0x3
107 cmp r2, #0x0 @ Check if target power state was OFF or RET
108 moveq r9, #0x3 @ MPU OFF => L1 and L2 lost
109 movne r9, #0x1 @ Only L1 and L2 lost => avoid L2 invalidation
110 bne logic_l1_restore
111 /* Execute smi to invalidate L2 cache */
112 mov r12, #0x1 @ set up to invalide L2
113smi: .word 0xE1600070 @ Call SMI monitor (smieq)
114logic_l1_restore:
115 mov r1, #0
116 /* Invalidate all instruction caches to PoU
117 * and flush branch target cache */
118 mcr p15, 0, r1, c7, c5, 0
119
120 ldr r4, scratchpad_base
121 ldr r3, [r4,#0xBC]
122 ldmia r3!, {r4-r6}
123 mov sp, r4
124 msr spsr_cxsf, r5
125 mov lr, r6
126
127 ldmia r3!, {r4-r9}
128 /* Coprocessor access Control Register */
129 mcr p15, 0, r4, c1, c0, 2
130
131 /* TTBR0 */
132 MCR p15, 0, r5, c2, c0, 0
133 /* TTBR1 */
134 MCR p15, 0, r6, c2, c0, 1
135 /* Translation table base control register */
136 MCR p15, 0, r7, c2, c0, 2
137 /*domain access Control Register */
138 MCR p15, 0, r8, c3, c0, 0
139 /* data fault status Register */
140 MCR p15, 0, r9, c5, c0, 0
141
142 ldmia r3!,{r4-r8}
143 /* instruction fault status Register */
144 MCR p15, 0, r4, c5, c0, 1
145 /*Data Auxiliary Fault Status Register */
146 MCR p15, 0, r5, c5, c1, 0
147 /*Instruction Auxiliary Fault Status Register*/
148 MCR p15, 0, r6, c5, c1, 1
149 /*Data Fault Address Register */
150 MCR p15, 0, r7, c6, c0, 0
151 /*Instruction Fault Address Register*/
152 MCR p15, 0, r8, c6, c0, 2
153 ldmia r3!,{r4-r7}
154
155 /* user r/w thread and process ID */
156 MCR p15, 0, r4, c13, c0, 2
157 /* user ro thread and process ID */
158 MCR p15, 0, r5, c13, c0, 3
159 /*Privileged only thread and process ID */
160 MCR p15, 0, r6, c13, c0, 4
161 /* cache size selection */
162 MCR p15, 2, r7, c0, c0, 0
163 ldmia r3!,{r4-r8}
164 /* Data TLB lockdown registers */
165 MCR p15, 0, r4, c10, c0, 0
166 /* Instruction TLB lockdown registers */
167 MCR p15, 0, r5, c10, c0, 1
168 /* Secure or Nonsecure Vector Base Address */
169 MCR p15, 0, r6, c12, c0, 0
170 /* FCSE PID */
171 MCR p15, 0, r7, c13, c0, 0
172 /* Context PID */
173 MCR p15, 0, r8, c13, c0, 1
174
175 ldmia r3!,{r4-r5}
176 /* primary memory remap register */
177 MCR p15, 0, r4, c10, c2, 0
178 /*normal memory remap register */
179 MCR p15, 0, r5, c10, c2, 1
180
181 /* Restore cpsr */
182 ldmia r3!,{r4} /*load CPSR from SDRAM*/
183 msr cpsr, r4 /*store cpsr */
184
185 /* Enabling MMU here */
186 mrc p15, 0, r7, c2, c0, 2 /* Read TTBRControl */
187 /* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1*/
188 and r7, #0x7
189 cmp r7, #0x0
190 beq usettbr0
191ttbr_error:
192 /* More work needs to be done to support N[0:2] value other than 0
193 * So looping here so that the error can be detected
194 */
195 b ttbr_error
196usettbr0:
197 mrc p15, 0, r2, c2, c0, 0
198 ldr r5, ttbrbit_mask
199 and r2, r5
200 mov r4, pc
201 ldr r5, table_index_mask
202 and r4, r5 /* r4 = 31 to 20 bits of pc */
203 /* Extract the value to be written to table entry */
204 ldr r1, table_entry
205 add r1, r1, r4 /* r1 has value to be written to table entry*/
206 /* Getting the address of table entry to modify */
207 lsr r4, #18
208 add r2, r4 /* r2 has the location which needs to be modified */
209 /* Storing previous entry of location being modified */
210 ldr r5, scratchpad_base
211 ldr r4, [r2]
212 str r4, [r5, #0xC0]
213 /* Modify the table entry */
214 str r1, [r2]
215 /* Storing address of entry being modified
216 * - will be restored after enabling MMU */
217 ldr r5, scratchpad_base
218 str r2, [r5, #0xC4]
219
220 mov r0, #0
221 mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer
222 mcr p15, 0, r0, c7, c5, 6 @ Invalidate branch predictor array
223 mcr p15, 0, r0, c8, c5, 0 @ Invalidate instruction TLB
224 mcr p15, 0, r0, c8, c6, 0 @ Invalidate data TLB
225 /* Restore control register but dont enable caches here*/
226 /* Caches will be enabled after restoring MMU table entry */
227 ldmia r3!, {r4}
228 /* Store previous value of control register in scratchpad */
229 str r4, [r5, #0xC8]
230 ldr r2, cache_pred_disable_mask
231 and r4, r2
232 mcr p15, 0, r4, c1, c0, 0
233
234 ldmfd sp!, {r0-r12, pc} @ restore regs and return
235save_context_wfi:
236 /*b save_context_wfi*/ @ enable to debug save code
237 mov r8, r0 /* Store SDRAM address in r8 */
238 /* Check what that target sleep state is:stored in r1*/
239 /* 1 - Only L1 and logic lost */
240 /* 2 - Only L2 lost */
241 /* 3 - Both L1 and L2 lost */
242 cmp r1, #0x2 /* Only L2 lost */
243 beq clean_l2
244 cmp r1, #0x1 /* L2 retained */
245 /* r9 stores whether to clean L2 or not*/
246 moveq r9, #0x0 /* Dont Clean L2 */
247 movne r9, #0x1 /* Clean L2 */
248l1_logic_lost:
249 /* Store sp and spsr to SDRAM */
250 mov r4, sp
251 mrs r5, spsr
252 mov r6, lr
253 stmia r8!, {r4-r6}
254 /* Save all ARM registers */
255 /* Coprocessor access control register */
256 mrc p15, 0, r6, c1, c0, 2
257 stmia r8!, {r6}
258 /* TTBR0, TTBR1 and Translation table base control */
259 mrc p15, 0, r4, c2, c0, 0
260 mrc p15, 0, r5, c2, c0, 1
261 mrc p15, 0, r6, c2, c0, 2
262 stmia r8!, {r4-r6}
263 /* Domain access control register, data fault status register,
264 and instruction fault status register */
265 mrc p15, 0, r4, c3, c0, 0
266 mrc p15, 0, r5, c5, c0, 0
267 mrc p15, 0, r6, c5, c0, 1
268 stmia r8!, {r4-r6}
269 /* Data aux fault status register, instruction aux fault status,
270 datat fault address register and instruction fault address register*/
271 mrc p15, 0, r4, c5, c1, 0
272 mrc p15, 0, r5, c5, c1, 1
273 mrc p15, 0, r6, c6, c0, 0
274 mrc p15, 0, r7, c6, c0, 2
275 stmia r8!, {r4-r7}
276 /* user r/w thread and process ID, user r/o thread and process ID,
277 priv only thread and process ID, cache size selection */
278 mrc p15, 0, r4, c13, c0, 2
279 mrc p15, 0, r5, c13, c0, 3
280 mrc p15, 0, r6, c13, c0, 4
281 mrc p15, 2, r7, c0, c0, 0
282 stmia r8!, {r4-r7}
283 /* Data TLB lockdown, instruction TLB lockdown registers */
284 mrc p15, 0, r5, c10, c0, 0
285 mrc p15, 0, r6, c10, c0, 1
286 stmia r8!, {r5-r6}
287 /* Secure or non secure vector base address, FCSE PID, Context PID*/
288 mrc p15, 0, r4, c12, c0, 0
289 mrc p15, 0, r5, c13, c0, 0
290 mrc p15, 0, r6, c13, c0, 1
291 stmia r8!, {r4-r6}
292 /* Primary remap, normal remap registers */
293 mrc p15, 0, r4, c10, c2, 0
294 mrc p15, 0, r5, c10, c2, 1
295 stmia r8!,{r4-r5}
296
297 /* Store current cpsr*/
298 mrs r2, cpsr
299 stmia r8!, {r2}
300
301 mrc p15, 0, r4, c1, c0, 0
302 /* save control register */
303 stmia r8!, {r4}
304clean_caches:
305 /* Clean Data or unified cache to POU*/
306 /* How to invalidate only L1 cache???? - #FIX_ME# */
307 /* mcr p15, 0, r11, c7, c11, 1 */
308 cmp r9, #1 /* Check whether L2 inval is required or not*/
309 bne skip_l2_inval
310clean_l2:
311 /* read clidr */
312 mrc p15, 1, r0, c0, c0, 1
313 /* extract loc from clidr */
314 ands r3, r0, #0x7000000
315 /* left align loc bit field */
316 mov r3, r3, lsr #23
317 /* if loc is 0, then no need to clean */
318 beq finished
319 /* start clean at cache level 0 */
320 mov r10, #0
321loop1:
322 /* work out 3x current cache level */
323 add r2, r10, r10, lsr #1
324 /* extract cache type bits from clidr*/
325 mov r1, r0, lsr r2
326 /* mask of the bits for current cache only */
327 and r1, r1, #7
328 /* see what cache we have at this level */
329 cmp r1, #2
330 /* skip if no cache, or just i-cache */
331 blt skip
332 /* select current cache level in cssr */
333 mcr p15, 2, r10, c0, c0, 0
334 /* isb to sych the new cssr&csidr */
335 isb
336 /* read the new csidr */
337 mrc p15, 1, r1, c0, c0, 0
338 /* extract the length of the cache lines */
339 and r2, r1, #7
340 /* add 4 (line length offset) */
341 add r2, r2, #4
342 ldr r4, assoc_mask
343 /* find maximum number on the way size */
344 ands r4, r4, r1, lsr #3
345 /* find bit position of way size increment */
346 clz r5, r4
347 ldr r7, numset_mask
348 /* extract max number of the index size*/
349 ands r7, r7, r1, lsr #13
350loop2:
351 mov r9, r4
352 /* create working copy of max way size*/
353loop3:
354 /* factor way and cache number into r11 */
355 orr r11, r10, r9, lsl r5
356 /* factor index number into r11 */
357 orr r11, r11, r7, lsl r2
358 /*clean & invalidate by set/way */
359 mcr p15, 0, r11, c7, c10, 2
360 /* decrement the way*/
361 subs r9, r9, #1
362 bge loop3
363 /*decrement the index */
364 subs r7, r7, #1
365 bge loop2
366skip:
367 add r10, r10, #2
368 /* increment cache number */
369 cmp r3, r10
370 bgt loop1
371finished:
372 /*swith back to cache level 0 */
373 mov r10, #0
374 /* select current cache level in cssr */
375 mcr p15, 2, r10, c0, c0, 0
376 isb
377skip_l2_inval:
378 /* Data memory barrier and Data sync barrier */
379 mov r1, #0
380 mcr p15, 0, r1, c7, c10, 4
381 mcr p15, 0, r1, c7, c10, 5
382
383 wfi @ wait for interrupt
384 nop
385 nop
386 nop
387 nop
388 nop
389 nop
390 nop
391 nop
392 nop
393 nop
394 bl i_dll_wait
395 /* restore regs and return */
396 ldmfd sp!, {r0-r12, pc}
397
398i_dll_wait:
399 ldr r4, clk_stabilize_delay
400
401i_dll_delay:
402 subs r4, r4, #0x1
403 bne i_dll_delay
404 ldr r4, sdrc_power
405 ldr r5, [r4]
406 bic r5, r5, #0x40
407 str r5, [r4]
408 bx lr
409pm_prepwstst_core:
410 .word PM_PREPWSTST_CORE_V
411pm_prepwstst_mpu:
412 .word PM_PREPWSTST_MPU_V
413pm_pwstctrl_mpu:
414 .word PM_PWSTCTRL_MPU_P
415scratchpad_base:
416 .word SCRATCHPAD_BASE_P
417sdrc_power:
418 .word SDRC_POWER_V
419context_mem:
420 .word 0x803E3E14
421clk_stabilize_delay:
422 .word 0x000001FF
423assoc_mask:
424 .word 0x3ff
425numset_mask:
426 .word 0x7fff
427ttbrbit_mask:
428 .word 0xFFFFC000
429table_index_mask:
430 .word 0xFFF00000
431table_entry:
432 .word 0x00000C02
433cache_pred_disable_mask:
434 .word 0xFFFFE7FB
435ENTRY(omap34xx_cpu_suspend_sz)
436 .word . - omap34xx_cpu_suspend