diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-08-26 15:28:52 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-09-20 18:33:36 -0400 |
commit | e8ce0eb5e2254b85415e4b58e73f24a5d13846a1 (patch) | |
tree | 26aaee04d5a4bb872eea215f65073825258ecd76 /arch/arm/kernel/sleep.S | |
parent | f5fa68d9674156ddaafa12a058ccc93c8866d5f9 (diff) |
ARM: pm: preallocate a page table for suspend/resume
Preallocate a page table and setup an identity mapping for the MMU
enable code. This means we don't have to "borrow" a page table to
do this, avoiding complexities with L2 cache coherency.
Tested-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Tested-by: Shawn Guo <shawn.guo@linaro.org>
Tested-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel/sleep.S')
-rw-r--r-- | arch/arm/kernel/sleep.S | 33 |
1 files changed, 12 insertions, 21 deletions
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index 46a9f460db83..8cf13de1e368 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S | |||
@@ -27,7 +27,7 @@ ENTRY(__cpu_suspend) | |||
27 | sub sp, sp, r5 @ allocate CPU state on stack | 27 | sub sp, sp, r5 @ allocate CPU state on stack |
28 | mov r0, sp @ save pointer to CPU save block | 28 | mov r0, sp @ save pointer to CPU save block |
29 | add ip, ip, r1 @ convert resume fn to phys | 29 | add ip, ip, r1 @ convert resume fn to phys |
30 | stmfd sp!, {r1, r6, ip} @ save v:p, virt SP, phys resume fn | 30 | stmfd sp!, {r6, ip} @ save virt SP, phys resume fn |
31 | ldr r5, =sleep_save_sp | 31 | ldr r5, =sleep_save_sp |
32 | add r6, sp, r1 @ convert SP to phys | 32 | add r6, sp, r1 @ convert SP to phys |
33 | stmfd sp!, {r2, r3} @ save suspend func arg and pointer | 33 | stmfd sp!, {r2, r3} @ save suspend func arg and pointer |
@@ -60,7 +60,7 @@ ENDPROC(__cpu_suspend) | |||
60 | .ltorg | 60 | .ltorg |
61 | 61 | ||
62 | cpu_suspend_abort: | 62 | cpu_suspend_abort: |
63 | ldmia sp!, {r1 - r3} @ pop v:p, virt SP, phys resume fn | 63 | ldmia sp!, {r2 - r3} @ pop virt SP, phys resume fn |
64 | teq r0, #0 | 64 | teq r0, #0 |
65 | moveq r0, #1 @ force non-zero value | 65 | moveq r0, #1 @ force non-zero value |
66 | mov sp, r2 | 66 | mov sp, r2 |
@@ -74,28 +74,19 @@ ENDPROC(cpu_suspend_abort) | |||
74 | * r3 = L1 section flags | 74 | * r3 = L1 section flags |
75 | */ | 75 | */ |
76 | ENTRY(cpu_resume_mmu) | 76 | ENTRY(cpu_resume_mmu) |
77 | adr r4, cpu_resume_turn_mmu_on | ||
78 | mov r4, r4, lsr #20 | ||
79 | orr r3, r3, r4, lsl #20 | ||
80 | ldr r5, [r2, r4, lsl #2] @ save old mapping | ||
81 | str r3, [r2, r4, lsl #2] @ setup 1:1 mapping for mmu code | ||
82 | sub r2, r2, r1 | ||
83 | ldr r3, =cpu_resume_after_mmu | 77 | ldr r3, =cpu_resume_after_mmu |
84 | bic r1, r0, #CR_C @ ensure D-cache is disabled | ||
85 | b cpu_resume_turn_mmu_on | 78 | b cpu_resume_turn_mmu_on |
86 | ENDPROC(cpu_resume_mmu) | 79 | ENDPROC(cpu_resume_mmu) |
87 | .ltorg | 80 | .ltorg |
88 | .align 5 | 81 | .align 5 |
89 | cpu_resume_turn_mmu_on: | 82 | ENTRY(cpu_resume_turn_mmu_on) |
90 | mcr p15, 0, r1, c1, c0, 0 @ turn on MMU, I-cache, etc | 83 | mcr p15, 0, r0, c1, c0, 0 @ turn on MMU, I-cache, etc |
91 | mrc p15, 0, r1, c0, c0, 0 @ read id reg | 84 | mrc p15, 0, r0, c0, c0, 0 @ read id reg |
92 | mov r1, r1 | 85 | mov r0, r0 |
93 | mov r1, r1 | 86 | mov r0, r0 |
94 | mov pc, r3 @ jump to virtual address | 87 | mov pc, r3 @ jump to virtual address |
95 | ENDPROC(cpu_resume_turn_mmu_on) | 88 | ENDPROC(cpu_resume_turn_mmu_on) |
96 | cpu_resume_after_mmu: | 89 | cpu_resume_after_mmu: |
97 | str r5, [r2, r4, lsl #2] @ restore old mapping | ||
98 | mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache | ||
99 | bl cpu_init @ restore the und/abt/irq banked regs | 90 | bl cpu_init @ restore the und/abt/irq banked regs |
100 | mov r0, #0 @ return zero on success | 91 | mov r0, #0 @ return zero on success |
101 | ldmfd sp!, {r4 - r11, pc} | 92 | ldmfd sp!, {r4 - r11, pc} |
@@ -121,11 +112,11 @@ ENTRY(cpu_resume) | |||
121 | ldr r0, sleep_save_sp @ stack phys addr | 112 | ldr r0, sleep_save_sp @ stack phys addr |
122 | #endif | 113 | #endif |
123 | setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off | 114 | setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off |
124 | @ load v:p, stack, resume fn | 115 | @ load stack, resume fn |
125 | ARM( ldmia r0!, {r1, sp, pc} ) | 116 | ARM( ldmia r0!, {sp, pc} ) |
126 | THUMB( ldmia r0!, {r1, r2, r3} ) | 117 | THUMB( ldmia r0!, {r2, r3} ) |
127 | THUMB( mov sp, r2 ) | 118 | THUMB( mov sp, r2 ) |
128 | THUMB( bx r3 ) | 119 | THUMB( bx r3 ) |
129 | ENDPROC(cpu_resume) | 120 | ENDPROC(cpu_resume) |
130 | 121 | ||
131 | sleep_save_sp: | 122 | sleep_save_sp: |