diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-07-02 04:54:01 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-07-02 04:54:01 -0400 |
commit | 29cb3cd208dd0e4471bb80bec4facc49ceb199fa (patch) | |
tree | 035128bf7af997d5e1e5208c900ba78c5a1df46d /arch/arm/kernel/sleep.S | |
parent | cbe263497def23befb6f475977661bae5d1f82e4 (diff) |
ARM: pm: allow suspend finisher to return error codes
There are SoCs where attempting to enter a low power state is ignored,
and the CPU continues executing instructions with all state preserved.
It is over-complex at that point to disable the MMU just to call the
resume path.
Instead, allow the suspend finisher to return error codes to abort
suspend in this circumstance, where the cpu_suspend internals will then
unwind the saved state on the stack. Also omit the tlb flush as no
changes to the page tables will have happened.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel/sleep.S')
-rw-r--r-- | arch/arm/kernel/sleep.S | 11 |
1 files changed, 9 insertions, 2 deletions
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index c156d0e5f455..dc902f2c6845 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S | |||
@@ -12,7 +12,6 @@ | |||
12 | * r1 = v:p offset | 12 | * r1 = v:p offset |
13 | * r2 = suspend function arg0 | 13 | * r2 = suspend function arg0 |
14 | * r3 = suspend function | 14 | * r3 = suspend function |
15 | * Note: does not return until system resumes | ||
16 | */ | 15 | */ |
17 | ENTRY(__cpu_suspend) | 16 | ENTRY(__cpu_suspend) |
18 | stmfd sp!, {r4 - r11, lr} | 17 | stmfd sp!, {r4 - r11, lr} |
@@ -26,7 +25,7 @@ ENTRY(__cpu_suspend) | |||
26 | #endif | 25 | #endif |
27 | mov r6, sp @ current virtual SP | 26 | mov r6, sp @ current virtual SP |
28 | sub sp, sp, r5 @ allocate CPU state on stack | 27 | sub sp, sp, r5 @ allocate CPU state on stack |
29 | mov r0, sp @ save pointer | 28 | mov r0, sp @ save pointer to CPU save block |
30 | add ip, ip, r1 @ convert resume fn to phys | 29 | add ip, ip, r1 @ convert resume fn to phys |
31 | stmfd sp!, {r1, r6, ip} @ save v:p, virt SP, phys resume fn | 30 | stmfd sp!, {r1, r6, ip} @ save v:p, virt SP, phys resume fn |
32 | ldr r5, =sleep_save_sp | 31 | ldr r5, =sleep_save_sp |
@@ -55,10 +54,17 @@ ENTRY(__cpu_suspend) | |||
55 | #else | 54 | #else |
56 | bl __cpuc_flush_kern_all | 55 | bl __cpuc_flush_kern_all |
57 | #endif | 56 | #endif |
57 | adr lr, BSYM(cpu_suspend_abort) | ||
58 | ldmfd sp!, {r0, pc} @ call suspend fn | 58 | ldmfd sp!, {r0, pc} @ call suspend fn |
59 | ENDPROC(__cpu_suspend) | 59 | ENDPROC(__cpu_suspend) |
60 | .ltorg | 60 | .ltorg |
61 | 61 | ||
62 | cpu_suspend_abort: | ||
63 | ldmia sp!, {r1 - r3} @ pop v:p, virt SP, phys resume fn | ||
64 | mov sp, r2 | ||
65 | ldmfd sp!, {r4 - r11, pc} | ||
66 | ENDPROC(cpu_suspend_abort) | ||
67 | |||
62 | /* | 68 | /* |
63 | * r0 = control register value | 69 | * r0 = control register value |
64 | * r1 = v:p offset (preserved by cpu_do_resume) | 70 | * r1 = v:p offset (preserved by cpu_do_resume) |
@@ -89,6 +95,7 @@ cpu_resume_after_mmu: | |||
89 | str r5, [r2, r4, lsl #2] @ restore old mapping | 95 | str r5, [r2, r4, lsl #2] @ restore old mapping |
90 | mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache | 96 | mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache |
91 | bl cpu_init @ restore the und/abt/irq banked regs | 97 | bl cpu_init @ restore the und/abt/irq banked regs |
98 | mov r0, #0 @ return zero on success | ||
92 | ldmfd sp!, {r4 - r11, pc} | 99 | ldmfd sp!, {r4 - r11, pc} |
93 | ENDPROC(cpu_resume_after_mmu) | 100 | ENDPROC(cpu_resume_after_mmu) |
94 | 101 | ||