aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/sleep.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel/sleep.S')
-rw-r--r--arch/arm/kernel/sleep.S84
1 files changed, 37 insertions, 47 deletions
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 6398ead9d1c..dc902f2c684 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -10,64 +10,61 @@
10/* 10/*
11 * Save CPU state for a suspend 11 * Save CPU state for a suspend
12 * r1 = v:p offset 12 * r1 = v:p offset
13 * r3 = virtual return function 13 * r2 = suspend function arg0
14 * Note: sp is decremented to allocate space for CPU state on stack 14 * r3 = suspend function
15 * r0-r3,r9,r10,lr corrupted
16 */ 15 */
17ENTRY(cpu_suspend) 16ENTRY(__cpu_suspend)
18 mov r9, lr 17 stmfd sp!, {r4 - r11, lr}
19#ifdef MULTI_CPU 18#ifdef MULTI_CPU
20 ldr r10, =processor 19 ldr r10, =processor
21 mov r2, sp @ current virtual SP 20 ldr r5, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
22 ldr r0, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
23 ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function 21 ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function
24 sub sp, sp, r0 @ allocate CPU state on stack 22#else
25 mov r0, sp @ save pointer 23 ldr r5, =cpu_suspend_size
24 ldr ip, =cpu_do_resume
25#endif
26 mov r6, sp @ current virtual SP
27 sub sp, sp, r5 @ allocate CPU state on stack
28 mov r0, sp @ save pointer to CPU save block
26 add ip, ip, r1 @ convert resume fn to phys 29 add ip, ip, r1 @ convert resume fn to phys
27 stmfd sp!, {r1, r2, r3, ip} @ save v:p, virt SP, retfn, phys resume fn 30 stmfd sp!, {r1, r6, ip} @ save v:p, virt SP, phys resume fn
28 ldr r3, =sleep_save_sp 31 ldr r5, =sleep_save_sp
29 add r2, sp, r1 @ convert SP to phys 32 add r6, sp, r1 @ convert SP to phys
33 stmfd sp!, {r2, r3} @ save suspend func arg and pointer
30#ifdef CONFIG_SMP 34#ifdef CONFIG_SMP
31 ALT_SMP(mrc p15, 0, lr, c0, c0, 5) 35 ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
32 ALT_UP(mov lr, #0) 36 ALT_UP(mov lr, #0)
33 and lr, lr, #15 37 and lr, lr, #15
34 str r2, [r3, lr, lsl #2] @ save phys SP 38 str r6, [r5, lr, lsl #2] @ save phys SP
35#else 39#else
36 str r2, [r3] @ save phys SP 40 str r6, [r5] @ save phys SP
37#endif 41#endif
42#ifdef MULTI_CPU
38 mov lr, pc 43 mov lr, pc
39 ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state 44 ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state
40#else 45#else
41 mov r2, sp @ current virtual SP
42 ldr r0, =cpu_suspend_size
43 sub sp, sp, r0 @ allocate CPU state on stack
44 mov r0, sp @ save pointer
45 stmfd sp!, {r1, r2, r3} @ save v:p, virt SP, return fn
46 ldr r3, =sleep_save_sp
47 add r2, sp, r1 @ convert SP to phys
48#ifdef CONFIG_SMP
49 ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
50 ALT_UP(mov lr, #0)
51 and lr, lr, #15
52 str r2, [r3, lr, lsl #2] @ save phys SP
53#else
54 str r2, [r3] @ save phys SP
55#endif
56 bl cpu_do_suspend 46 bl cpu_do_suspend
57#endif 47#endif
58 48
59 @ flush data cache 49 @ flush data cache
60#ifdef MULTI_CACHE 50#ifdef MULTI_CACHE
61 ldr r10, =cpu_cache 51 ldr r10, =cpu_cache
62 mov lr, r9 52 mov lr, pc
63 ldr pc, [r10, #CACHE_FLUSH_KERN_ALL] 53 ldr pc, [r10, #CACHE_FLUSH_KERN_ALL]
64#else 54#else
65 mov lr, r9 55 bl __cpuc_flush_kern_all
66 b __cpuc_flush_kern_all
67#endif 56#endif
68ENDPROC(cpu_suspend) 57 adr lr, BSYM(cpu_suspend_abort)
58 ldmfd sp!, {r0, pc} @ call suspend fn
59ENDPROC(__cpu_suspend)
69 .ltorg 60 .ltorg
70 61
62cpu_suspend_abort:
63 ldmia sp!, {r1 - r3} @ pop v:p, virt SP, phys resume fn
64 mov sp, r2
65 ldmfd sp!, {r4 - r11, pc}
66ENDPROC(cpu_suspend_abort)
67
71/* 68/*
72 * r0 = control register value 69 * r0 = control register value
73 * r1 = v:p offset (preserved by cpu_do_resume) 70 * r1 = v:p offset (preserved by cpu_do_resume)
@@ -97,7 +94,9 @@ ENDPROC(cpu_resume_turn_mmu_on)
97cpu_resume_after_mmu: 94cpu_resume_after_mmu:
98 str r5, [r2, r4, lsl #2] @ restore old mapping 95 str r5, [r2, r4, lsl #2] @ restore old mapping
99 mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache 96 mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache
100 mov pc, lr 97 bl cpu_init @ restore the und/abt/irq banked regs
98 mov r0, #0 @ return zero on success
99 ldmfd sp!, {r4 - r11, pc}
101ENDPROC(cpu_resume_after_mmu) 100ENDPROC(cpu_resume_after_mmu)
102 101
103/* 102/*
@@ -120,20 +119,11 @@ ENTRY(cpu_resume)
120 ldr r0, sleep_save_sp @ stack phys addr 119 ldr r0, sleep_save_sp @ stack phys addr
121#endif 120#endif
122 setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off 121 setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off
123#ifdef MULTI_CPU 122 @ load v:p, stack, resume fn
124 @ load v:p, stack, return fn, resume fn 123 ARM( ldmia r0!, {r1, sp, pc} )
125 ARM( ldmia r0!, {r1, sp, lr, pc} ) 124THUMB( ldmia r0!, {r1, r2, r3} )
126THUMB( ldmia r0!, {r1, r2, r3, r4} )
127THUMB( mov sp, r2 ) 125THUMB( mov sp, r2 )
128THUMB( mov lr, r3 ) 126THUMB( bx r3 )
129THUMB( bx r4 )
130#else
131 @ load v:p, stack, return fn
132 ARM( ldmia r0!, {r1, sp, lr} )
133THUMB( ldmia r0!, {r1, r2, lr} )
134THUMB( mov sp, r2 )
135 b cpu_do_resume
136#endif
137ENDPROC(cpu_resume) 127ENDPROC(cpu_resume)
138 128
139sleep_save_sp: 129sleep_save_sp: