diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-09-22 17:39:23 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-09-22 17:39:23 -0400 |
commit | b0a37dca72a05b7b579f288d8a67afeed96bffa5 (patch) | |
tree | a3057a3debd078f569e90e709d7b320006d8cb32 /arch/arm/kernel | |
parent | f70cac8d9c7125f83048f8b3d1c60f5a041a165c (diff) | |
parent | 8e6f83bbdf770014c070c5a41c8e89617cb2a66b (diff) |
Merge branch 'pm' into devel-stable
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/arm/kernel/sleep.S | 85 | ||||
-rw-r--r-- | arch/arm/kernel/suspend.c | 72 |
3 files changed, 100 insertions, 59 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index af32e466e1c..8fa83f54c96 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile | |||
@@ -29,7 +29,7 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o | |||
29 | obj-$(CONFIG_ARTHUR) += arthur.o | 29 | obj-$(CONFIG_ARTHUR) += arthur.o |
30 | obj-$(CONFIG_ISA_DMA) += dma-isa.o | 30 | obj-$(CONFIG_ISA_DMA) += dma-isa.o |
31 | obj-$(CONFIG_PCI) += bios32.o isa.o | 31 | obj-$(CONFIG_PCI) += bios32.o isa.o |
32 | obj-$(CONFIG_PM_SLEEP) += sleep.o | 32 | obj-$(CONFIG_PM_SLEEP) += sleep.o suspend.o |
33 | obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o | 33 | obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o |
34 | obj-$(CONFIG_SMP) += smp.o smp_tlb.o | 34 | obj-$(CONFIG_SMP) += smp.o smp_tlb.o |
35 | obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o | 35 | obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o |
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index dc902f2c684..020e99c845e 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S | |||
@@ -8,92 +8,61 @@ | |||
8 | .text | 8 | .text |
9 | 9 | ||
10 | /* | 10 | /* |
11 | * Save CPU state for a suspend | 11 | * Save CPU state for a suspend. This saves the CPU general purpose |
12 | * r1 = v:p offset | 12 | * registers, and allocates space on the kernel stack to save the CPU |
13 | * r2 = suspend function arg0 | 13 | * specific registers and some other data for resume. |
14 | * r3 = suspend function | 14 | * r0 = suspend function arg0 |
15 | * r1 = suspend function | ||
15 | */ | 16 | */ |
16 | ENTRY(__cpu_suspend) | 17 | ENTRY(__cpu_suspend) |
17 | stmfd sp!, {r4 - r11, lr} | 18 | stmfd sp!, {r4 - r11, lr} |
18 | #ifdef MULTI_CPU | 19 | #ifdef MULTI_CPU |
19 | ldr r10, =processor | 20 | ldr r10, =processor |
20 | ldr r5, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state | 21 | ldr r4, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state |
21 | ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function | ||
22 | #else | 22 | #else |
23 | ldr r5, =cpu_suspend_size | 23 | ldr r4, =cpu_suspend_size |
24 | ldr ip, =cpu_do_resume | ||
25 | #endif | 24 | #endif |
26 | mov r6, sp @ current virtual SP | 25 | mov r5, sp @ current virtual SP |
27 | sub sp, sp, r5 @ allocate CPU state on stack | 26 | add r4, r4, #12 @ Space for pgd, virt sp, phys resume fn |
28 | mov r0, sp @ save pointer to CPU save block | 27 | sub sp, sp, r4 @ allocate CPU state on stack |
29 | add ip, ip, r1 @ convert resume fn to phys | 28 | stmfd sp!, {r0, r1} @ save suspend func arg and pointer |
30 | stmfd sp!, {r1, r6, ip} @ save v:p, virt SP, phys resume fn | 29 | add r0, sp, #8 @ save pointer to save block |
31 | ldr r5, =sleep_save_sp | 30 | mov r1, r4 @ size of save block |
32 | add r6, sp, r1 @ convert SP to phys | 31 | mov r2, r5 @ virtual SP |
33 | stmfd sp!, {r2, r3} @ save suspend func arg and pointer | 32 | ldr r3, =sleep_save_sp |
34 | #ifdef CONFIG_SMP | 33 | #ifdef CONFIG_SMP |
35 | ALT_SMP(mrc p15, 0, lr, c0, c0, 5) | 34 | ALT_SMP(mrc p15, 0, lr, c0, c0, 5) |
36 | ALT_UP(mov lr, #0) | 35 | ALT_UP(mov lr, #0) |
37 | and lr, lr, #15 | 36 | and lr, lr, #15 |
38 | str r6, [r5, lr, lsl #2] @ save phys SP | 37 | add r3, r3, lr, lsl #2 |
39 | #else | ||
40 | str r6, [r5] @ save phys SP | ||
41 | #endif | ||
42 | #ifdef MULTI_CPU | ||
43 | mov lr, pc | ||
44 | ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state | ||
45 | #else | ||
46 | bl cpu_do_suspend | ||
47 | #endif | ||
48 | |||
49 | @ flush data cache | ||
50 | #ifdef MULTI_CACHE | ||
51 | ldr r10, =cpu_cache | ||
52 | mov lr, pc | ||
53 | ldr pc, [r10, #CACHE_FLUSH_KERN_ALL] | ||
54 | #else | ||
55 | bl __cpuc_flush_kern_all | ||
56 | #endif | 38 | #endif |
39 | bl __cpu_suspend_save | ||
57 | adr lr, BSYM(cpu_suspend_abort) | 40 | adr lr, BSYM(cpu_suspend_abort) |
58 | ldmfd sp!, {r0, pc} @ call suspend fn | 41 | ldmfd sp!, {r0, pc} @ call suspend fn |
59 | ENDPROC(__cpu_suspend) | 42 | ENDPROC(__cpu_suspend) |
60 | .ltorg | 43 | .ltorg |
61 | 44 | ||
62 | cpu_suspend_abort: | 45 | cpu_suspend_abort: |
63 | ldmia sp!, {r1 - r3} @ pop v:p, virt SP, phys resume fn | 46 | ldmia sp!, {r1 - r3} @ pop phys pgd, virt SP, phys resume fn |
47 | teq r0, #0 | ||
48 | moveq r0, #1 @ force non-zero value | ||
64 | mov sp, r2 | 49 | mov sp, r2 |
65 | ldmfd sp!, {r4 - r11, pc} | 50 | ldmfd sp!, {r4 - r11, pc} |
66 | ENDPROC(cpu_suspend_abort) | 51 | ENDPROC(cpu_suspend_abort) |
67 | 52 | ||
68 | /* | 53 | /* |
69 | * r0 = control register value | 54 | * r0 = control register value |
70 | * r1 = v:p offset (preserved by cpu_do_resume) | ||
71 | * r2 = phys page table base | ||
72 | * r3 = L1 section flags | ||
73 | */ | 55 | */ |
56 | .align 5 | ||
74 | ENTRY(cpu_resume_mmu) | 57 | ENTRY(cpu_resume_mmu) |
75 | adr r4, cpu_resume_turn_mmu_on | ||
76 | mov r4, r4, lsr #20 | ||
77 | orr r3, r3, r4, lsl #20 | ||
78 | ldr r5, [r2, r4, lsl #2] @ save old mapping | ||
79 | str r3, [r2, r4, lsl #2] @ setup 1:1 mapping for mmu code | ||
80 | sub r2, r2, r1 | ||
81 | ldr r3, =cpu_resume_after_mmu | 58 | ldr r3, =cpu_resume_after_mmu |
82 | bic r1, r0, #CR_C @ ensure D-cache is disabled | 59 | mcr p15, 0, r0, c1, c0, 0 @ turn on MMU, I-cache, etc |
83 | b cpu_resume_turn_mmu_on | 60 | mrc p15, 0, r0, c0, c0, 0 @ read id reg |
84 | ENDPROC(cpu_resume_mmu) | 61 | mov r0, r0 |
85 | .ltorg | 62 | mov r0, r0 |
86 | .align 5 | ||
87 | cpu_resume_turn_mmu_on: | ||
88 | mcr p15, 0, r1, c1, c0, 0 @ turn on MMU, I-cache, etc | ||
89 | mrc p15, 0, r1, c0, c0, 0 @ read id reg | ||
90 | mov r1, r1 | ||
91 | mov r1, r1 | ||
92 | mov pc, r3 @ jump to virtual address | 63 | mov pc, r3 @ jump to virtual address |
93 | ENDPROC(cpu_resume_turn_mmu_on) | 64 | ENDPROC(cpu_resume_mmu) |
94 | cpu_resume_after_mmu: | 65 | cpu_resume_after_mmu: |
95 | str r5, [r2, r4, lsl #2] @ restore old mapping | ||
96 | mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache | ||
97 | bl cpu_init @ restore the und/abt/irq banked regs | 66 | bl cpu_init @ restore the und/abt/irq banked regs |
98 | mov r0, #0 @ return zero on success | 67 | mov r0, #0 @ return zero on success |
99 | ldmfd sp!, {r4 - r11, pc} | 68 | ldmfd sp!, {r4 - r11, pc} |
@@ -119,7 +88,7 @@ ENTRY(cpu_resume) | |||
119 | ldr r0, sleep_save_sp @ stack phys addr | 88 | ldr r0, sleep_save_sp @ stack phys addr |
120 | #endif | 89 | #endif |
121 | setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off | 90 | setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off |
122 | @ load v:p, stack, resume fn | 91 | @ load phys pgd, stack, resume fn |
123 | ARM( ldmia r0!, {r1, sp, pc} ) | 92 | ARM( ldmia r0!, {r1, sp, pc} ) |
124 | THUMB( ldmia r0!, {r1, r2, r3} ) | 93 | THUMB( ldmia r0!, {r1, r2, r3} ) |
125 | THUMB( mov sp, r2 ) | 94 | THUMB( mov sp, r2 ) |
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c new file mode 100644 index 00000000000..93a22d282c1 --- /dev/null +++ b/arch/arm/kernel/suspend.c | |||
@@ -0,0 +1,72 @@ | |||
1 | #include <linux/init.h> | ||
2 | |||
3 | #include <asm/pgalloc.h> | ||
4 | #include <asm/pgtable.h> | ||
5 | #include <asm/memory.h> | ||
6 | #include <asm/suspend.h> | ||
7 | #include <asm/tlbflush.h> | ||
8 | |||
9 | static pgd_t *suspend_pgd; | ||
10 | |||
11 | extern int __cpu_suspend(unsigned long, int (*)(unsigned long)); | ||
12 | extern void cpu_resume_mmu(void); | ||
13 | |||
14 | /* | ||
15 | * This is called by __cpu_suspend() to save the state, and do whatever | ||
16 | * flushing is required to ensure that when the CPU goes to sleep we have | ||
17 | * the necessary data available when the caches are not searched. | ||
18 | */ | ||
19 | void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr) | ||
20 | { | ||
21 | *save_ptr = virt_to_phys(ptr); | ||
22 | |||
23 | /* This must correspond to the LDM in cpu_resume() assembly */ | ||
24 | *ptr++ = virt_to_phys(suspend_pgd); | ||
25 | *ptr++ = sp; | ||
26 | *ptr++ = virt_to_phys(cpu_do_resume); | ||
27 | |||
28 | cpu_do_suspend(ptr); | ||
29 | |||
30 | flush_cache_all(); | ||
31 | outer_clean_range(*save_ptr, *save_ptr + ptrsz); | ||
32 | outer_clean_range(virt_to_phys(save_ptr), | ||
33 | virt_to_phys(save_ptr) + sizeof(*save_ptr)); | ||
34 | } | ||
35 | |||
36 | /* | ||
37 | * Hide the first two arguments to __cpu_suspend - these are an implementation | ||
38 | * detail which platform code shouldn't have to know about. | ||
39 | */ | ||
40 | int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) | ||
41 | { | ||
42 | struct mm_struct *mm = current->active_mm; | ||
43 | int ret; | ||
44 | |||
45 | if (!suspend_pgd) | ||
46 | return -EINVAL; | ||
47 | |||
48 | /* | ||
49 | * Provide a temporary page table with an identity mapping for | ||
50 | * the MMU-enable code, required for resuming. On successful | ||
51 | * resume (indicated by a zero return code), we need to switch | ||
52 | * back to the correct page tables. | ||
53 | */ | ||
54 | ret = __cpu_suspend(arg, fn); | ||
55 | if (ret == 0) { | ||
56 | cpu_switch_mm(mm->pgd, mm); | ||
57 | local_flush_tlb_all(); | ||
58 | } | ||
59 | |||
60 | return ret; | ||
61 | } | ||
62 | |||
63 | static int __init cpu_suspend_init(void) | ||
64 | { | ||
65 | suspend_pgd = pgd_alloc(&init_mm); | ||
66 | if (suspend_pgd) { | ||
67 | unsigned long addr = virt_to_phys(cpu_resume_mmu); | ||
68 | identity_mapping_add(suspend_pgd, addr, addr + SECTION_SIZE); | ||
69 | } | ||
70 | return suspend_pgd ? 0 : -ENOMEM; | ||
71 | } | ||
72 | core_initcall(cpu_suspend_init); | ||