aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/mach-pxa/mfp.c9
-rw-r--r--arch/arm/mach-pxa/pxa3xx.c73
-rw-r--r--arch/arm/mach-pxa/sleep.S102
3 files changed, 170 insertions, 14 deletions
diff --git a/arch/arm/mach-pxa/mfp.c b/arch/arm/mach-pxa/mfp.c
index 6ce35041c7ed..f5809adce298 100644
--- a/arch/arm/mach-pxa/mfp.c
+++ b/arch/arm/mach-pxa/mfp.c
@@ -22,6 +22,7 @@
22#include <asm/hardware.h> 22#include <asm/hardware.h>
23#include <asm/arch/mfp.h> 23#include <asm/arch/mfp.h>
24#include <asm/arch/mfp-pxa3xx.h> 24#include <asm/arch/mfp-pxa3xx.h>
25#include <asm/arch/pxa3xx-regs.h>
25 26
26/* mfp_spin_lock is used to ensure that MFP register configuration 27/* mfp_spin_lock is used to ensure that MFP register configuration
27 * (most likely a read-modify-write operation) is atomic, and that 28 * (most likely a read-modify-write operation) is atomic, and that
@@ -223,6 +224,14 @@ static int pxa3xx_mfp_resume(struct sys_device *d)
223 struct pxa3xx_mfp_pin *p = &mfp_table[pin]; 224 struct pxa3xx_mfp_pin *p = &mfp_table[pin];
224 __mfp_config_run(p); 225 __mfp_config_run(p);
225 } 226 }
227
228 /* clear RDH bit when MFP settings are restored
229 *
230 * NOTE: the last 3 bits DxS are write-1-to-clear so carefully
231 * preserve them here in case they will be referenced later
232 */
233 ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S);
234
226 return 0; 235 return 0;
227} 236}
228 237
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index d0b2afd4368a..e47e67c11afe 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -40,6 +40,7 @@
40#define RO_CLK 60000000 40#define RO_CLK 60000000
41 41
42#define ACCR_D0CS (1 << 26) 42#define ACCR_D0CS (1 << 26)
43#define ACCR_PCCE (1 << 11)
43 44
44/* crystal frequency to static memory controller multiplier (SMCFS) */ 45/* crystal frequency to static memory controller multiplier (SMCFS) */
45static unsigned char smcfs_mult[8] = { 6, 0, 8, 0, 0, 16, }; 46static unsigned char smcfs_mult[8] = { 6, 0, 8, 0, 0, 16, };
@@ -204,7 +205,6 @@ static struct clk pxa3xx_clks[] = {
204}; 205};
205 206
206#ifdef CONFIG_PM 207#ifdef CONFIG_PM
207#define SLEEP_SAVE_SIZE 4
208 208
209#define ISRAM_START 0x5c000000 209#define ISRAM_START 0x5c000000
210#define ISRAM_SIZE SZ_256K 210#define ISRAM_SIZE SZ_256K
@@ -212,25 +212,29 @@ static struct clk pxa3xx_clks[] = {
212static void __iomem *sram; 212static void __iomem *sram;
213static unsigned long wakeup_src; 213static unsigned long wakeup_src;
214 214
215static void pxa3xx_cpu_pm_save(unsigned long *sleep_save) 215#define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x
216{ 216#define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x]
217 pr_debug("PM: CKENA=%08x CKENB=%08x\n", CKENA, CKENB);
218 217
219 if (CKENA & (1 << CKEN_USBH)) { 218enum { SLEEP_SAVE_START = 0,
220 printk(KERN_ERR "PM: USB host clock not stopped?\n"); 219 SLEEP_SAVE_CKENA,
221 CKENA &= ~(1 << CKEN_USBH); 220 SLEEP_SAVE_CKENB,
222 } 221 SLEEP_SAVE_ACCR,
223// CKENA |= 1 << (CKEN_ISC & 31);
224 222
225 /* 223 SLEEP_SAVE_SIZE,
226 * Low power modes require the HSIO2 clock to be enabled. 224};
227 */ 225
228 CKENB |= 1 << (CKEN_HSIO2 & 31); 226static void pxa3xx_cpu_pm_save(unsigned long *sleep_save)
227{
228 SAVE(CKENA);
229 SAVE(CKENB);
230 SAVE(ACCR);
229} 231}
230 232
231static void pxa3xx_cpu_pm_restore(unsigned long *sleep_save) 233static void pxa3xx_cpu_pm_restore(unsigned long *sleep_save)
232{ 234{
233 CKENB &= ~(1 << (CKEN_HSIO2 & 31)); 235 RESTORE(ACCR);
236 RESTORE(CKENA);
237 RESTORE(CKENB);
234} 238}
235 239
236/* 240/*
@@ -266,6 +270,46 @@ static void pxa3xx_cpu_standby(unsigned int pwrmode)
266 printk("PM: AD2D0SR=%08x ASCR=%08x\n", AD2D0SR, ASCR); 270 printk("PM: AD2D0SR=%08x ASCR=%08x\n", AD2D0SR, ASCR);
267} 271}
268 272
273/*
274 * NOTE: currently, the OBM (OEM Boot Module) binary comes along with
275 * PXA3xx development kits assumes that the resuming process continues
276 * with the address stored within the first 4 bytes of SDRAM. The PSPR
277 * register is used privately by BootROM and OBM, and _must_ be set to
278 * 0x5c014000 for the moment.
279 */
280static void pxa3xx_cpu_pm_suspend(void)
281{
282 volatile unsigned long *p = (volatile void *)0xc0000000;
283 unsigned long saved_data = *p;
284
285 extern void pxa3xx_cpu_suspend(void);
286 extern void pxa3xx_cpu_resume(void);
287
288 /* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */
289 CKENA |= (1 << CKEN_BOOT) | (1 << CKEN_TPM);
290 CKENB |= 1 << (CKEN_HSIO2 & 0x1f);
291
292 /* clear and setup wakeup source */
293 AD3SR = ~0;
294 AD3ER = wakeup_src;
295 ASCR = ASCR;
296 ARSR = ARSR;
297
298 PCFR |= (1u << 13); /* L1_DIS */
299 PCFR &= ~((1u << 12) | (1u << 1)); /* L0_EN | SL_ROD */
300
301 PSPR = 0x5c014000;
302
303 /* overwrite with the resume address */
304 *p = virt_to_phys(pxa3xx_cpu_resume);
305
306 pxa3xx_cpu_suspend();
307
308 *p = saved_data;
309
310 AD3ER = 0;
311}
312
269static void pxa3xx_cpu_pm_enter(suspend_state_t state) 313static void pxa3xx_cpu_pm_enter(suspend_state_t state)
270{ 314{
271 /* 315 /*
@@ -280,6 +324,7 @@ static void pxa3xx_cpu_pm_enter(suspend_state_t state)
280 break; 324 break;
281 325
282 case PM_SUSPEND_MEM: 326 case PM_SUSPEND_MEM:
327 pxa3xx_cpu_pm_suspend();
283 break; 328 break;
284 } 329 }
285} 330}
diff --git a/arch/arm/mach-pxa/sleep.S b/arch/arm/mach-pxa/sleep.S
index 14bb4a93ea52..784716eb7fc5 100644
--- a/arch/arm/mach-pxa/sleep.S
+++ b/arch/arm/mach-pxa/sleep.S
@@ -50,6 +50,108 @@ pxa_cpu_save_sp:
50 str r0, [r1] 50 str r0, [r1]
51 ldr pc, [sp], #4 51 ldr pc, [sp], #4
52 52
53#ifdef CONFIG_PXA3xx
54/*
55 * pxa3xx_cpu_suspend() - forces CPU into sleep state (S2D3C4)
56 *
57 * NOTE: unfortunately, pxa_cpu_save_cp can not be reused here since
58 * the auxiliary control register address is different between pxa3xx
59 * and pxa{25x,27x}
60 */
61
62ENTRY(pxa3xx_cpu_suspend)
63
64#ifndef CONFIG_IWMMXT
65 mra r2, r3, acc0
66#endif
67 stmfd sp!, {r2 - r12, lr} @ save registers on stack
68
69 mrc p14, 0, r3, c6, c0, 0 @ clock configuration, for turbo mode
70 mrc p15, 0, r4, c15, c1, 0 @ CP access reg
71 mrc p15, 0, r5, c13, c0, 0 @ PID
72 mrc p15, 0, r6, c3, c0, 0 @ domain ID
73 mrc p15, 0, r7, c2, c0, 0 @ translation table base addr
74 mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg
75 mrc p15, 0, r9, c1, c0, 0 @ control reg
76
77 bic r3, r3, #2 @ clear frequency change bit
78
79 @ store them plus current virtual stack ptr on stack
80 mov r10, sp
81 stmfd sp!, {r3 - r10}
82
83 @ store physical address of stack pointer
84 mov r0, sp
85 bl sleep_phys_sp
86 ldr r1, =sleep_save_sp
87 str r0, [r1]
88
89 @ clean data cache
90 bl xsc3_flush_kern_cache_all
91
92 mov r0, #0x06 @ S2D3C4 mode
93 mcr p14, 0, r0, c7, c0, 0 @ enter sleep
94
9520: b 20b @ waiting for sleep
96
97 .data
98 .align 5
99/*
100 * pxa3xx_cpu_resume
101 */
102
103ENTRY(pxa3xx_cpu_resume)
104
105 mov r0, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off
106 msr cpsr_c, r0
107
108 ldr r0, sleep_save_sp @ stack phys addr
109 ldmfd r0, {r3 - r9, sp} @ CP regs + virt stack ptr
110
111 mov r1, #0
112 mcr p15, 0, r1, c7, c7, 0 @ invalidate I & D caches, BTB
113 mcr p15, 0, r1, c7, c10, 4 @ drain write (&fill) buffer
114 mcr p15, 0, r1, c7, c5, 4 @ flush prefetch buffer
115 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
116
117 mcr p14, 0, r3, c6, c0, 0 @ clock configuration, turbo mode.
118 mcr p15, 0, r4, c15, c1, 0 @ CP access reg
119 mcr p15, 0, r5, c13, c0, 0 @ PID
120 mcr p15, 0, r6, c3, c0, 0 @ domain ID
121 mcr p15, 0, r7, c2, c0, 0 @ translation table base addr
122 mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg
123
124 @ temporarily map resume_turn_on_mmu into the page table,
125 @ otherwise prefetch abort occurs after MMU is turned on
126 mov r1, r7
127 bic r1, r1, #0x00ff
128 bic r1, r1, #0x3f00
129 ldr r2, =0x542e
130
131 adr r3, resume_turn_on_mmu
132 mov r3, r3, lsr #20
133 orr r4, r2, r3, lsl #20
134 ldr r5, [r1, r3, lsl #2]
135 str r4, [r1, r3, lsl #2]
136
137 @ Mapping page table address in the page table
138 mov r6, r1, lsr #20
139 orr r7, r2, r6, lsl #20
140 ldr r8, [r1, r6, lsl #2]
141 str r7, [r1, r6, lsl #2]
142
143 ldr r2, =pxa3xx_resume_after_mmu @ absolute virtual address
144 b resume_turn_on_mmu @ cache align execution
145
146 .text
147pxa3xx_resume_after_mmu:
148 /* restore the temporary mapping */
149 str r5, [r1, r3, lsl #2]
150 str r8, [r1, r6, lsl #2]
151 b resume_after_mmu
152
153#endif /* CONFIG_PXA3xx */
154
53#ifdef CONFIG_PXA27x 155#ifdef CONFIG_PXA27x
54/* 156/*
55 * pxa27x_cpu_suspend() 157 * pxa27x_cpu_suspend()