aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-omap2
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mach-omap2')
-rw-r--r--arch/arm/mach-omap2/control.c7
-rw-r--r--arch/arm/mach-omap2/control.h6
-rw-r--r--arch/arm/mach-omap2/gpio.c34
-rw-r--r--arch/arm/mach-omap2/include/mach/entry-macro.S3
-rw-r--r--arch/arm/mach-omap2/omap-smp.c8
-rw-r--r--arch/arm/mach-omap2/pm.h22
-rw-r--r--arch/arm/mach-omap2/pm34xx.c80
-rw-r--r--arch/arm/mach-omap2/serial.c1
-rw-r--r--arch/arm/mach-omap2/sleep34xx.S518
9 files changed, 284 insertions, 395 deletions
diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c
index da53ba3917ca..aab884fecc55 100644
--- a/arch/arm/mach-omap2/control.c
+++ b/arch/arm/mach-omap2/control.c
@@ -286,14 +286,15 @@ void omap3_save_scratchpad_contents(void)
286 scratchpad_contents.boot_config_ptr = 0x0; 286 scratchpad_contents.boot_config_ptr = 0x0;
287 if (cpu_is_omap3630()) 287 if (cpu_is_omap3630())
288 scratchpad_contents.public_restore_ptr = 288 scratchpad_contents.public_restore_ptr =
289 virt_to_phys(get_omap3630_restore_pointer()); 289 virt_to_phys(omap3_restore_3630);
290 else if (omap_rev() != OMAP3430_REV_ES3_0 && 290 else if (omap_rev() != OMAP3430_REV_ES3_0 &&
291 omap_rev() != OMAP3430_REV_ES3_1) 291 omap_rev() != OMAP3430_REV_ES3_1)
292 scratchpad_contents.public_restore_ptr = 292 scratchpad_contents.public_restore_ptr =
293 virt_to_phys(get_restore_pointer()); 293 virt_to_phys(omap3_restore);
294 else 294 else
295 scratchpad_contents.public_restore_ptr = 295 scratchpad_contents.public_restore_ptr =
296 virt_to_phys(get_es3_restore_pointer()); 296 virt_to_phys(omap3_restore_es3);
297
297 if (omap_type() == OMAP2_DEVICE_TYPE_GP) 298 if (omap_type() == OMAP2_DEVICE_TYPE_GP)
298 scratchpad_contents.secure_ram_restore_ptr = 0x0; 299 scratchpad_contents.secure_ram_restore_ptr = 0x0;
299 else 300 else
diff --git a/arch/arm/mach-omap2/control.h b/arch/arm/mach-omap2/control.h
index a016c8b59e00..d4ef75d5a382 100644
--- a/arch/arm/mach-omap2/control.h
+++ b/arch/arm/mach-omap2/control.h
@@ -386,9 +386,9 @@ extern void omap4_ctrl_pad_writel(u32 val, u16 offset);
386 386
387extern void omap3_save_scratchpad_contents(void); 387extern void omap3_save_scratchpad_contents(void);
388extern void omap3_clear_scratchpad_contents(void); 388extern void omap3_clear_scratchpad_contents(void);
389extern u32 *get_restore_pointer(void); 389extern void omap3_restore(void);
390extern u32 *get_es3_restore_pointer(void); 390extern void omap3_restore_es3(void);
391extern u32 *get_omap3630_restore_pointer(void); 391extern void omap3_restore_3630(void);
392extern u32 omap3_arm_context[128]; 392extern u32 omap3_arm_context[128];
393extern void omap3_control_save_context(void); 393extern void omap3_control_save_context(void);
394extern void omap3_control_restore_context(void); 394extern void omap3_control_restore_context(void);
diff --git a/arch/arm/mach-omap2/gpio.c b/arch/arm/mach-omap2/gpio.c
index 9529842ae054..2765cdc3152d 100644
--- a/arch/arm/mach-omap2/gpio.c
+++ b/arch/arm/mach-omap2/gpio.c
@@ -61,13 +61,45 @@ static int omap2_gpio_dev_init(struct omap_hwmod *oh, void *unused)
61 pdata->dbck_flag = dev_attr->dbck_flag; 61 pdata->dbck_flag = dev_attr->dbck_flag;
62 pdata->virtual_irq_start = IH_GPIO_BASE + 32 * (id - 1); 62 pdata->virtual_irq_start = IH_GPIO_BASE + 32 * (id - 1);
63 63
64 pdata->regs = kzalloc(sizeof(struct omap_gpio_reg_offs), GFP_KERNEL);
65 if (!pdata) {
66 pr_err("gpio%d: Memory allocation failed\n", id);
67 return -ENOMEM;
68 }
69
64 switch (oh->class->rev) { 70 switch (oh->class->rev) {
65 case 0: 71 case 0:
66 case 1: 72 case 1:
67 pdata->bank_type = METHOD_GPIO_24XX; 73 pdata->bank_type = METHOD_GPIO_24XX;
74 pdata->regs->revision = OMAP24XX_GPIO_REVISION;
75 pdata->regs->direction = OMAP24XX_GPIO_OE;
76 pdata->regs->datain = OMAP24XX_GPIO_DATAIN;
77 pdata->regs->dataout = OMAP24XX_GPIO_DATAOUT;
78 pdata->regs->set_dataout = OMAP24XX_GPIO_SETDATAOUT;
79 pdata->regs->clr_dataout = OMAP24XX_GPIO_CLEARDATAOUT;
80 pdata->regs->irqstatus = OMAP24XX_GPIO_IRQSTATUS1;
81 pdata->regs->irqstatus2 = OMAP24XX_GPIO_IRQSTATUS2;
82 pdata->regs->irqenable = OMAP24XX_GPIO_IRQENABLE1;
83 pdata->regs->set_irqenable = OMAP24XX_GPIO_SETIRQENABLE1;
84 pdata->regs->clr_irqenable = OMAP24XX_GPIO_CLEARIRQENABLE1;
85 pdata->regs->debounce = OMAP24XX_GPIO_DEBOUNCE_VAL;
86 pdata->regs->debounce_en = OMAP24XX_GPIO_DEBOUNCE_EN;
68 break; 87 break;
69 case 2: 88 case 2:
70 pdata->bank_type = METHOD_GPIO_44XX; 89 pdata->bank_type = METHOD_GPIO_44XX;
90 pdata->regs->revision = OMAP4_GPIO_REVISION;
91 pdata->regs->direction = OMAP4_GPIO_OE;
92 pdata->regs->datain = OMAP4_GPIO_DATAIN;
93 pdata->regs->dataout = OMAP4_GPIO_DATAOUT;
94 pdata->regs->set_dataout = OMAP4_GPIO_SETDATAOUT;
95 pdata->regs->clr_dataout = OMAP4_GPIO_CLEARDATAOUT;
96 pdata->regs->irqstatus = OMAP4_GPIO_IRQSTATUS0;
97 pdata->regs->irqstatus2 = OMAP4_GPIO_IRQSTATUS1;
98 pdata->regs->irqenable = OMAP4_GPIO_IRQSTATUSSET0;
99 pdata->regs->set_irqenable = OMAP4_GPIO_IRQSTATUSSET0;
100 pdata->regs->clr_irqenable = OMAP4_GPIO_IRQSTATUSCLR0;
101 pdata->regs->debounce = OMAP4_GPIO_DEBOUNCINGTIME;
102 pdata->regs->debounce_en = OMAP4_GPIO_DEBOUNCENABLE;
71 break; 103 break;
72 default: 104 default:
73 WARN(1, "Invalid gpio bank_type\n"); 105 WARN(1, "Invalid gpio bank_type\n");
@@ -87,6 +119,8 @@ static int omap2_gpio_dev_init(struct omap_hwmod *oh, void *unused)
87 return PTR_ERR(od); 119 return PTR_ERR(od);
88 } 120 }
89 121
122 omap_device_disable_idle_on_suspend(od);
123
90 gpio_bank_count++; 124 gpio_bank_count++;
91 return 0; 125 return 0;
92} 126}
diff --git a/arch/arm/mach-omap2/include/mach/entry-macro.S b/arch/arm/mach-omap2/include/mach/entry-macro.S
index a48690b90990..ceb8b7e593d7 100644
--- a/arch/arm/mach-omap2/include/mach/entry-macro.S
+++ b/arch/arm/mach-omap2/include/mach/entry-macro.S
@@ -165,6 +165,3 @@
165#endif 165#endif
166 166
167#endif /* MULTI_OMAP2 */ 167#endif /* MULTI_OMAP2 */
168
169 .macro irq_prio_table
170 .endm
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index ecfe93c4b585..ce65e9329c7b 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -125,14 +125,6 @@ void __init smp_init_cpus(void)
125 125
126void __init platform_smp_prepare_cpus(unsigned int max_cpus) 126void __init platform_smp_prepare_cpus(unsigned int max_cpus)
127{ 127{
128 int i;
129
130 /*
131 * Initialise the present map, which describes the set of CPUs
132 * actually populated at the present time.
133 */
134 for (i = 0; i < max_cpus; i++)
135 set_cpu_present(i, true);
136 128
137 /* 129 /*
138 * Initialise the SCU and wake up the secondary core using 130 * Initialise the SCU and wake up the secondary core using
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
index babac19e3ec1..4e166add2f35 100644
--- a/arch/arm/mach-omap2/pm.h
+++ b/arch/arm/mach-omap2/pm.h
@@ -72,18 +72,28 @@ extern void pm_dbg_update_time(struct powerdomain *pwrdm, int prev);
72#define pm_dbg_update_time(pwrdm, prev) do {} while (0); 72#define pm_dbg_update_time(pwrdm, prev) do {} while (0);
73#endif /* CONFIG_PM_DEBUG */ 73#endif /* CONFIG_PM_DEBUG */
74 74
75/* 24xx */
75extern void omap24xx_idle_loop_suspend(void); 76extern void omap24xx_idle_loop_suspend(void);
77extern unsigned int omap24xx_idle_loop_suspend_sz;
76 78
77extern void omap24xx_cpu_suspend(u32 dll_ctrl, void __iomem *sdrc_dlla_ctrl, 79extern void omap24xx_cpu_suspend(u32 dll_ctrl, void __iomem *sdrc_dlla_ctrl,
78 void __iomem *sdrc_power); 80 void __iomem *sdrc_power);
79extern void omap34xx_cpu_suspend(u32 *addr, int save_state); 81extern unsigned int omap24xx_cpu_suspend_sz;
80extern int save_secure_ram_context(u32 *addr);
81extern void omap3_save_scratchpad_contents(void);
82 82
83extern unsigned int omap24xx_idle_loop_suspend_sz; 83/* 3xxx */
84extern void omap34xx_cpu_suspend(int save_state);
85
86/* omap3_do_wfi function pointer and size, for copy to SRAM */
87extern void omap3_do_wfi(void);
88extern unsigned int omap3_do_wfi_sz;
89/* ... and its pointer from SRAM after copy */
90extern void (*omap3_do_wfi_sram)(void);
91
92/* save_secure_ram_context function pointer and size, for copy to SRAM */
93extern int save_secure_ram_context(u32 *addr);
84extern unsigned int save_secure_ram_context_sz; 94extern unsigned int save_secure_ram_context_sz;
85extern unsigned int omap24xx_cpu_suspend_sz; 95
86extern unsigned int omap34xx_cpu_suspend_sz; 96extern void omap3_save_scratchpad_contents(void);
87 97
88#define PM_RTA_ERRATUM_i608 (1 << 0) 98#define PM_RTA_ERRATUM_i608 (1 << 0)
89#define PM_SDRC_WAKEUP_ERRATUM_i583 (1 << 1) 99#define PM_SDRC_WAKEUP_ERRATUM_i583 (1 << 1)
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 96a76245284c..7255d9bce868 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -31,6 +31,8 @@
31#include <linux/console.h> 31#include <linux/console.h>
32#include <trace/events/power.h> 32#include <trace/events/power.h>
33 33
34#include <asm/suspend.h>
35
34#include <plat/sram.h> 36#include <plat/sram.h>
35#include "clockdomain.h" 37#include "clockdomain.h"
36#include "powerdomain.h" 38#include "powerdomain.h"
@@ -40,8 +42,6 @@
40#include <plat/gpmc.h> 42#include <plat/gpmc.h>
41#include <plat/dma.h> 43#include <plat/dma.h>
42 44
43#include <asm/tlbflush.h>
44
45#include "cm2xxx_3xxx.h" 45#include "cm2xxx_3xxx.h"
46#include "cm-regbits-34xx.h" 46#include "cm-regbits-34xx.h"
47#include "prm-regbits-34xx.h" 47#include "prm-regbits-34xx.h"
@@ -64,11 +64,6 @@ static inline bool is_suspending(void)
64} 64}
65#endif 65#endif
66 66
67/* Scratchpad offsets */
68#define OMAP343X_TABLE_ADDRESS_OFFSET 0xc4
69#define OMAP343X_TABLE_VALUE_OFFSET 0xc0
70#define OMAP343X_CONTROL_REG_VALUE_OFFSET 0xc8
71
72/* pm34xx errata defined in pm.h */ 67/* pm34xx errata defined in pm.h */
73u16 pm34xx_errata; 68u16 pm34xx_errata;
74 69
@@ -83,9 +78,8 @@ struct power_state {
83 78
84static LIST_HEAD(pwrst_list); 79static LIST_HEAD(pwrst_list);
85 80
86static void (*_omap_sram_idle)(u32 *addr, int save_state);
87
88static int (*_omap_save_secure_sram)(u32 *addr); 81static int (*_omap_save_secure_sram)(u32 *addr);
82void (*omap3_do_wfi_sram)(void);
89 83
90static struct powerdomain *mpu_pwrdm, *neon_pwrdm; 84static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
91static struct powerdomain *core_pwrdm, *per_pwrdm; 85static struct powerdomain *core_pwrdm, *per_pwrdm;
@@ -312,28 +306,25 @@ static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id)
312 return IRQ_HANDLED; 306 return IRQ_HANDLED;
313} 307}
314 308
315/* Function to restore the table entry that was modified for enabling MMU */ 309static void omap34xx_save_context(u32 *save)
316static void restore_table_entry(void)
317{ 310{
318 void __iomem *scratchpad_address; 311 u32 val;
319 u32 previous_value, control_reg_value;
320 u32 *address;
321 312
322 scratchpad_address = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD); 313 /* Read Auxiliary Control Register */
314 asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (val));
315 *save++ = 1;
316 *save++ = val;
323 317
324 /* Get address of entry that was modified */ 318 /* Read L2 AUX ctrl register */
325 address = (u32 *)__raw_readl(scratchpad_address + 319 asm("mrc p15, 1, %0, c9, c0, 2" : "=r" (val));
326 OMAP343X_TABLE_ADDRESS_OFFSET); 320 *save++ = 1;
327 /* Get the previous value which needs to be restored */ 321 *save++ = val;
328 previous_value = __raw_readl(scratchpad_address + 322}
329 OMAP343X_TABLE_VALUE_OFFSET); 323
330 address = __va(address); 324static int omap34xx_do_sram_idle(unsigned long save_state)
331 *address = previous_value; 325{
332 flush_tlb_all(); 326 omap34xx_cpu_suspend(save_state);
333 control_reg_value = __raw_readl(scratchpad_address 327 return 0;
334 + OMAP343X_CONTROL_REG_VALUE_OFFSET);
335 /* This will enable caches and prediction */
336 set_cr(control_reg_value);
337} 328}
338 329
339void omap_sram_idle(void) 330void omap_sram_idle(void)
@@ -352,9 +343,6 @@ void omap_sram_idle(void)
352 int core_prev_state, per_prev_state; 343 int core_prev_state, per_prev_state;
353 u32 sdrc_pwr = 0; 344 u32 sdrc_pwr = 0;
354 345
355 if (!_omap_sram_idle)
356 return;
357
358 pwrdm_clear_all_prev_pwrst(mpu_pwrdm); 346 pwrdm_clear_all_prev_pwrst(mpu_pwrdm);
359 pwrdm_clear_all_prev_pwrst(neon_pwrdm); 347 pwrdm_clear_all_prev_pwrst(neon_pwrdm);
360 pwrdm_clear_all_prev_pwrst(core_pwrdm); 348 pwrdm_clear_all_prev_pwrst(core_pwrdm);
@@ -432,12 +420,16 @@ void omap_sram_idle(void)
432 sdrc_pwr = sdrc_read_reg(SDRC_POWER); 420 sdrc_pwr = sdrc_read_reg(SDRC_POWER);
433 421
434 /* 422 /*
435 * omap3_arm_context is the location where ARM registers 423 * omap3_arm_context is the location where some ARM context
436 * get saved. The restore path then reads from this 424 * get saved. The rest is placed on the stack, and restored
437 * location and restores them back. 425 * from there before resuming.
438 */ 426 */
439 _omap_sram_idle(omap3_arm_context, save_state); 427 if (save_state)
440 cpu_init(); 428 omap34xx_save_context(omap3_arm_context);
429 if (save_state == 1 || save_state == 3)
430 cpu_suspend(save_state, omap34xx_do_sram_idle);
431 else
432 omap34xx_do_sram_idle(save_state);
441 433
442 /* Restore normal SDRC POWER settings */ 434 /* Restore normal SDRC POWER settings */
443 if (omap_rev() >= OMAP3430_REV_ES3_0 && 435 if (omap_rev() >= OMAP3430_REV_ES3_0 &&
@@ -445,10 +437,6 @@ void omap_sram_idle(void)
445 core_next_state == PWRDM_POWER_OFF) 437 core_next_state == PWRDM_POWER_OFF)
446 sdrc_write_reg(sdrc_pwr, SDRC_POWER); 438 sdrc_write_reg(sdrc_pwr, SDRC_POWER);
447 439
448 /* Restore table entry modified during MMU restoration */
449 if (pwrdm_read_prev_pwrst(mpu_pwrdm) == PWRDM_POWER_OFF)
450 restore_table_entry();
451
452 /* CORE */ 440 /* CORE */
453 if (core_next_state < PWRDM_POWER_ON) { 441 if (core_next_state < PWRDM_POWER_ON) {
454 core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm); 442 core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm);
@@ -846,10 +834,17 @@ static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
846 return 0; 834 return 0;
847} 835}
848 836
837/*
838 * Push functions to SRAM
839 *
840 * The minimum set of functions is pushed to SRAM for execution:
841 * - omap3_do_wfi for erratum i581 WA,
842 * - save_secure_ram_context for security extensions.
843 */
849void omap_push_sram_idle(void) 844void omap_push_sram_idle(void)
850{ 845{
851 _omap_sram_idle = omap_sram_push(omap34xx_cpu_suspend, 846 omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz);
852 omap34xx_cpu_suspend_sz); 847
853 if (omap_type() != OMAP2_DEVICE_TYPE_GP) 848 if (omap_type() != OMAP2_DEVICE_TYPE_GP)
854 _omap_save_secure_sram = omap_sram_push(save_secure_ram_context, 849 _omap_save_secure_sram = omap_sram_push(save_secure_ram_context,
855 save_secure_ram_context_sz); 850 save_secure_ram_context_sz);
@@ -914,7 +909,6 @@ static int __init omap3_pm_init(void)
914 per_clkdm = clkdm_lookup("per_clkdm"); 909 per_clkdm = clkdm_lookup("per_clkdm");
915 core_clkdm = clkdm_lookup("core_clkdm"); 910 core_clkdm = clkdm_lookup("core_clkdm");
916 911
917 omap_push_sram_idle();
918#ifdef CONFIG_SUSPEND 912#ifdef CONFIG_SUSPEND
919 suspend_set_ops(&omap_pm_ops); 913 suspend_set_ops(&omap_pm_ops);
920#endif /* CONFIG_SUSPEND */ 914#endif /* CONFIG_SUSPEND */
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 1ac361b7b8cb..466fc722fa0f 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -805,6 +805,7 @@ void __init omap_serial_init_port(struct omap_board_data *bdata)
805 WARN(IS_ERR(od), "Could not build omap_device for %s: %s.\n", 805 WARN(IS_ERR(od), "Could not build omap_device for %s: %s.\n",
806 name, oh->name); 806 name, oh->name);
807 807
808 omap_device_disable_idle_on_suspend(od);
808 oh->mux = omap_hwmod_mux_init(bdata->pads, bdata->pads_cnt); 809 oh->mux = omap_hwmod_mux_init(bdata->pads, bdata->pads_cnt);
809 810
810 uart->irq = oh->mpu_irqs[0].irq; 811 uart->irq = oh->mpu_irqs[0].irq;
diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
index 63f10669571a..f2ea1bd1c691 100644
--- a/arch/arm/mach-omap2/sleep34xx.S
+++ b/arch/arm/mach-omap2/sleep34xx.S
@@ -74,46 +74,6 @@
74 * API functions 74 * API functions
75 */ 75 */
76 76
77/*
78 * The "get_*restore_pointer" functions are used to provide a
79 * physical restore address where the ROM code jumps while waking
80 * up from MPU OFF/OSWR state.
81 * The restore pointer is stored into the scratchpad.
82 */
83
84 .text
85/* Function call to get the restore pointer for resume from OFF */
86ENTRY(get_restore_pointer)
87 stmfd sp!, {lr} @ save registers on stack
88 adr r0, restore
89 ldmfd sp!, {pc} @ restore regs and return
90ENDPROC(get_restore_pointer)
91 .align
92ENTRY(get_restore_pointer_sz)
93 .word . - get_restore_pointer
94
95 .text
96/* Function call to get the restore pointer for 3630 resume from OFF */
97ENTRY(get_omap3630_restore_pointer)
98 stmfd sp!, {lr} @ save registers on stack
99 adr r0, restore_3630
100 ldmfd sp!, {pc} @ restore regs and return
101ENDPROC(get_omap3630_restore_pointer)
102 .align
103ENTRY(get_omap3630_restore_pointer_sz)
104 .word . - get_omap3630_restore_pointer
105
106 .text
107/* Function call to get the restore pointer for ES3 to resume from OFF */
108ENTRY(get_es3_restore_pointer)
109 stmfd sp!, {lr} @ save registers on stack
110 adr r0, restore_es3
111 ldmfd sp!, {pc} @ restore regs and return
112ENDPROC(get_es3_restore_pointer)
113 .align
114ENTRY(get_es3_restore_pointer_sz)
115 .word . - get_es3_restore_pointer
116
117 .text 77 .text
118/* 78/*
119 * L2 cache needs to be toggled for stable OFF mode functionality on 3630. 79 * L2 cache needs to be toggled for stable OFF mode functionality on 3630.
@@ -133,7 +93,7 @@ ENDPROC(enable_omap3630_toggle_l2_on_restore)
133/* Function to call rom code to save secure ram context */ 93/* Function to call rom code to save secure ram context */
134 .align 3 94 .align 3
135ENTRY(save_secure_ram_context) 95ENTRY(save_secure_ram_context)
136 stmfd sp!, {r1-r12, lr} @ save registers on stack 96 stmfd sp!, {r4 - r11, lr} @ save registers on stack
137 adr r3, api_params @ r3 points to parameters 97 adr r3, api_params @ r3 points to parameters
138 str r0, [r3,#0x4] @ r0 has sdram address 98 str r0, [r3,#0x4] @ r0 has sdram address
139 ldr r12, high_mask 99 ldr r12, high_mask
@@ -152,7 +112,7 @@ ENTRY(save_secure_ram_context)
152 nop 112 nop
153 nop 113 nop
154 nop 114 nop
155 ldmfd sp!, {r1-r12, pc} 115 ldmfd sp!, {r4 - r11, pc}
156 .align 116 .align
157sram_phy_addr_mask: 117sram_phy_addr_mask:
158 .word SRAM_BASE_P 118 .word SRAM_BASE_P
@@ -179,69 +139,38 @@ ENTRY(save_secure_ram_context_sz)
179 * 139 *
180 * 140 *
181 * Notes: 141 * Notes:
182 * - this code gets copied to internal SRAM at boot and after wake-up 142 * - only the minimum set of functions gets copied to internal SRAM at boot
183 * from OFF mode. The execution pointer in SRAM is _omap_sram_idle. 143 * and after wake-up from OFF mode, cf. omap_push_sram_idle. The function
144 * pointers in SDRAM or SRAM are called depending on the desired low power
145 * target state.
184 * - when the OMAP wakes up it continues at different execution points 146 * - when the OMAP wakes up it continues at different execution points
185 * depending on the low power mode (non-OFF vs OFF modes), 147 * depending on the low power mode (non-OFF vs OFF modes),
186 * cf. 'Resume path for xxx mode' comments. 148 * cf. 'Resume path for xxx mode' comments.
187 */ 149 */
188 .align 3 150 .align 3
189ENTRY(omap34xx_cpu_suspend) 151ENTRY(omap34xx_cpu_suspend)
190 stmfd sp!, {r0-r12, lr} @ save registers on stack 152 stmfd sp!, {r4 - r11, lr} @ save registers on stack
191 153
192 /* 154 /*
193 * r0 contains CPU context save/restore pointer in sdram 155 * r0 contains information about saving context:
194 * r1 contains information about saving context:
195 * 0 - No context lost 156 * 0 - No context lost
196 * 1 - Only L1 and logic lost 157 * 1 - Only L1 and logic lost
197 * 2 - Only L2 lost (Even L1 is retained we clean it along with L2) 158 * 2 - Only L2 lost (Even L1 is retained we clean it along with L2)
198 * 3 - Both L1 and L2 lost and logic lost 159 * 3 - Both L1 and L2 lost and logic lost
199 */ 160 */
200 161
201 /* Directly jump to WFI is the context save is not required */ 162 /*
202 cmp r1, #0x0 163 * For OFF mode: save context and jump to WFI in SDRAM (omap3_do_wfi)
203 beq omap3_do_wfi 164 * For non-OFF modes: jump to the WFI code in SRAM (omap3_do_wfi_sram)
165 */
166 ldr r4, omap3_do_wfi_sram_addr
167 ldr r5, [r4]
168 cmp r0, #0x0 @ If no context save required,
169 bxeq r5 @ jump to the WFI code in SRAM
170
204 171
205 /* Otherwise fall through to the save context code */ 172 /* Otherwise fall through to the save context code */
206save_context_wfi: 173save_context_wfi:
207 mov r8, r0 @ Store SDRAM address in r8
208 mrc p15, 0, r5, c1, c0, 1 @ Read Auxiliary Control Register
209 mov r4, #0x1 @ Number of parameters for restore call
210 stmia r8!, {r4-r5} @ Push parameters for restore call
211 mrc p15, 1, r5, c9, c0, 2 @ Read L2 AUX ctrl register
212 stmia r8!, {r4-r5} @ Push parameters for restore call
213
214 /* Check what that target sleep state is from r1 */
215 cmp r1, #0x2 @ Only L2 lost, no need to save context
216 beq clean_caches
217
218l1_logic_lost:
219 mov r4, sp @ Store sp
220 mrs r5, spsr @ Store spsr
221 mov r6, lr @ Store lr
222 stmia r8!, {r4-r6}
223
224 mrc p15, 0, r4, c1, c0, 2 @ Coprocessor access control register
225 mrc p15, 0, r5, c2, c0, 0 @ TTBR0
226 mrc p15, 0, r6, c2, c0, 1 @ TTBR1
227 mrc p15, 0, r7, c2, c0, 2 @ TTBCR
228 stmia r8!, {r4-r7}
229
230 mrc p15, 0, r4, c3, c0, 0 @ Domain access Control Register
231 mrc p15, 0, r5, c10, c2, 0 @ PRRR
232 mrc p15, 0, r6, c10, c2, 1 @ NMRR
233 stmia r8!,{r4-r6}
234
235 mrc p15, 0, r4, c13, c0, 1 @ Context ID
236 mrc p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
237 mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
238 mrs r7, cpsr @ Store current cpsr
239 stmia r8!, {r4-r7}
240
241 mrc p15, 0, r4, c1, c0, 0 @ save control register
242 stmia r8!, {r4}
243
244clean_caches:
245 /* 174 /*
246 * jump out to kernel flush routine 175 * jump out to kernel flush routine
247 * - reuse that code is better 176 * - reuse that code is better
@@ -284,7 +213,32 @@ clean_caches:
284 THUMB( nop ) 213 THUMB( nop )
285 .arm 214 .arm
286 215
287omap3_do_wfi: 216 b omap3_do_wfi
217
218/*
219 * Local variables
220 */
221omap3_do_wfi_sram_addr:
222 .word omap3_do_wfi_sram
223kernel_flush:
224 .word v7_flush_dcache_all
225
226/* ===================================
227 * == WFI instruction => Enter idle ==
228 * ===================================
229 */
230
231/*
232 * Do WFI instruction
233 * Includes the resume path for non-OFF modes
234 *
235 * This code gets copied to internal SRAM and is accessible
236 * from both SDRAM and SRAM:
237 * - executed from SRAM for non-off modes (omap3_do_wfi_sram),
238 * - executed from SDRAM for OFF mode (omap3_do_wfi).
239 */
240 .align 3
241ENTRY(omap3_do_wfi)
288 ldr r4, sdrc_power @ read the SDRC_POWER register 242 ldr r4, sdrc_power @ read the SDRC_POWER register
289 ldr r5, [r4] @ read the contents of SDRC_POWER 243 ldr r5, [r4] @ read the contents of SDRC_POWER
290 orr r5, r5, #0x40 @ enable self refresh on idle req 244 orr r5, r5, #0x40 @ enable self refresh on idle req
@@ -316,8 +270,86 @@ omap3_do_wfi:
316 nop 270 nop
317 nop 271 nop
318 nop 272 nop
319 bl wait_sdrc_ok
320 273
274/*
275 * This function implements the erratum ID i581 WA:
276 * SDRC state restore before accessing the SDRAM
277 *
278 * Only used at return from non-OFF mode. For OFF
279 * mode the ROM code configures the SDRC and
280 * the DPLL before calling the restore code directly
281 * from DDR.
282 */
283
284/* Make sure SDRC accesses are ok */
285wait_sdrc_ok:
286
287/* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */
288 ldr r4, cm_idlest_ckgen
289wait_dpll3_lock:
290 ldr r5, [r4]
291 tst r5, #1
292 beq wait_dpll3_lock
293
294 ldr r4, cm_idlest1_core
295wait_sdrc_ready:
296 ldr r5, [r4]
297 tst r5, #0x2
298 bne wait_sdrc_ready
299 /* allow DLL powerdown upon hw idle req */
300 ldr r4, sdrc_power
301 ldr r5, [r4]
302 bic r5, r5, #0x40
303 str r5, [r4]
304
305/*
306 * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a
307 * base instead.
308 * Be careful not to clobber r7 when maintaing this code.
309 */
310
311is_dll_in_lock_mode:
312 /* Is dll in lock mode? */
313 ldr r4, sdrc_dlla_ctrl
314 ldr r5, [r4]
315 tst r5, #0x4
316 bne exit_nonoff_modes @ Return if locked
317 /* wait till dll locks */
318 adr r7, kick_counter
319wait_dll_lock_timed:
320 ldr r4, wait_dll_lock_counter
321 add r4, r4, #1
322 str r4, [r7, #wait_dll_lock_counter - kick_counter]
323 ldr r4, sdrc_dlla_status
324 /* Wait 20uS for lock */
325 mov r6, #8
326wait_dll_lock:
327 subs r6, r6, #0x1
328 beq kick_dll
329 ldr r5, [r4]
330 and r5, r5, #0x4
331 cmp r5, #0x4
332 bne wait_dll_lock
333 b exit_nonoff_modes @ Return when locked
334
335 /* disable/reenable DLL if not locked */
336kick_dll:
337 ldr r4, sdrc_dlla_ctrl
338 ldr r5, [r4]
339 mov r6, r5
340 bic r6, #(1<<3) @ disable dll
341 str r6, [r4]
342 dsb
343 orr r6, r6, #(1<<3) @ enable dll
344 str r6, [r4]
345 dsb
346 ldr r4, kick_counter
347 add r4, r4, #1
348 str r4, [r7] @ kick_counter
349 b wait_dll_lock_timed
350
351exit_nonoff_modes:
352 /* Re-enable C-bit if needed */
321 mrc p15, 0, r0, c1, c0, 0 353 mrc p15, 0, r0, c1, c0, 0
322 tst r0, #(1 << 2) @ Check C bit enabled? 354 tst r0, #(1 << 2) @ Check C bit enabled?
323 orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared 355 orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared
@@ -329,7 +361,32 @@ omap3_do_wfi:
329 * == Exit point from non-OFF modes == 361 * == Exit point from non-OFF modes ==
330 * =================================== 362 * ===================================
331 */ 363 */
332 ldmfd sp!, {r0-r12, pc} @ restore regs and return 364 ldmfd sp!, {r4 - r11, pc} @ restore regs and return
365
366/*
367 * Local variables
368 */
369sdrc_power:
370 .word SDRC_POWER_V
371cm_idlest1_core:
372 .word CM_IDLEST1_CORE_V
373cm_idlest_ckgen:
374 .word CM_IDLEST_CKGEN_V
375sdrc_dlla_status:
376 .word SDRC_DLLA_STATUS_V
377sdrc_dlla_ctrl:
378 .word SDRC_DLLA_CTRL_V
379 /*
380 * When exporting to userspace while the counters are in SRAM,
381 * these 2 words need to be at the end to facilitate retrival!
382 */
383kick_counter:
384 .word 0
385wait_dll_lock_counter:
386 .word 0
387
388ENTRY(omap3_do_wfi_sz)
389 .word . - omap3_do_wfi
333 390
334 391
335/* 392/*
@@ -346,13 +403,17 @@ omap3_do_wfi:
346 * restore_es3: applies to 34xx >= ES3.0 403 * restore_es3: applies to 34xx >= ES3.0
347 * restore_3630: applies to 36xx 404 * restore_3630: applies to 36xx
348 * restore: common code for 3xxx 405 * restore: common code for 3xxx
406 *
407 * Note: when back from CORE and MPU OFF mode we are running
408 * from SDRAM, without MMU, without the caches and prediction.
409 * Also the SRAM content has been cleared.
349 */ 410 */
350restore_es3: 411ENTRY(omap3_restore_es3)
351 ldr r5, pm_prepwstst_core_p 412 ldr r5, pm_prepwstst_core_p
352 ldr r4, [r5] 413 ldr r4, [r5]
353 and r4, r4, #0x3 414 and r4, r4, #0x3
354 cmp r4, #0x0 @ Check if previous power state of CORE is OFF 415 cmp r4, #0x0 @ Check if previous power state of CORE is OFF
355 bne restore 416 bne omap3_restore @ Fall through to OMAP3 common code
356 adr r0, es3_sdrc_fix 417 adr r0, es3_sdrc_fix
357 ldr r1, sram_base 418 ldr r1, sram_base
358 ldr r2, es3_sdrc_fix_sz 419 ldr r2, es3_sdrc_fix_sz
@@ -364,35 +425,32 @@ copy_to_sram:
364 bne copy_to_sram 425 bne copy_to_sram
365 ldr r1, sram_base 426 ldr r1, sram_base
366 blx r1 427 blx r1
367 b restore 428 b omap3_restore @ Fall through to OMAP3 common code
429ENDPROC(omap3_restore_es3)
368 430
369restore_3630: 431ENTRY(omap3_restore_3630)
370 ldr r1, pm_prepwstst_core_p 432 ldr r1, pm_prepwstst_core_p
371 ldr r2, [r1] 433 ldr r2, [r1]
372 and r2, r2, #0x3 434 and r2, r2, #0x3
373 cmp r2, #0x0 @ Check if previous power state of CORE is OFF 435 cmp r2, #0x0 @ Check if previous power state of CORE is OFF
374 bne restore 436 bne omap3_restore @ Fall through to OMAP3 common code
375 /* Disable RTA before giving control */ 437 /* Disable RTA before giving control */
376 ldr r1, control_mem_rta 438 ldr r1, control_mem_rta
377 mov r2, #OMAP36XX_RTA_DISABLE 439 mov r2, #OMAP36XX_RTA_DISABLE
378 str r2, [r1] 440 str r2, [r1]
441ENDPROC(omap3_restore_3630)
379 442
380 /* Fall through to common code for the remaining logic */ 443 /* Fall through to common code for the remaining logic */
381 444
382restore: 445ENTRY(omap3_restore)
383 /* 446 /*
384 * Check what was the reason for mpu reset and store the reason in r9: 447 * Read the pwstctrl register to check the reason for mpu reset.
385 * 0 - No context lost 448 * This tells us what was lost.
386 * 1 - Only L1 and logic lost
387 * 2 - Only L2 lost - In this case, we wont be here
388 * 3 - Both L1 and L2 lost
389 */ 449 */
390 ldr r1, pm_pwstctrl_mpu 450 ldr r1, pm_pwstctrl_mpu
391 ldr r2, [r1] 451 ldr r2, [r1]
392 and r2, r2, #0x3 452 and r2, r2, #0x3
393 cmp r2, #0x0 @ Check if target power state was OFF or RET 453 cmp r2, #0x0 @ Check if target power state was OFF or RET
394 moveq r9, #0x3 @ MPU OFF => L1 and L2 lost
395 movne r9, #0x1 @ Only L1 and L2 lost => avoid L2 invalidation
396 bne logic_l1_restore 454 bne logic_l1_restore
397 455
398 ldr r0, l2dis_3630 456 ldr r0, l2dis_3630
@@ -471,115 +529,39 @@ logic_l1_restore:
471 orr r1, r1, #2 @ re-enable L2 cache 529 orr r1, r1, #2 @ re-enable L2 cache
472 mcr p15, 0, r1, c1, c0, 1 530 mcr p15, 0, r1, c1, c0, 1
473skipl2reen: 531skipl2reen:
474 mov r1, #0
475 /*
476 * Invalidate all instruction caches to PoU
477 * and flush branch target cache
478 */
479 mcr p15, 0, r1, c7, c5, 0
480 532
481 ldr r4, scratchpad_base 533 /* Now branch to the common CPU resume function */
482 ldr r3, [r4,#0xBC] 534 b cpu_resume
483 adds r3, r3, #16 535ENDPROC(omap3_restore)
484 536
485 ldmia r3!, {r4-r6} 537 .ltorg
486 mov sp, r4 @ Restore sp
487 msr spsr_cxsf, r5 @ Restore spsr
488 mov lr, r6 @ Restore lr
489
490 ldmia r3!, {r4-r7}
491 mcr p15, 0, r4, c1, c0, 2 @ Coprocessor access Control Register
492 mcr p15, 0, r5, c2, c0, 0 @ TTBR0
493 mcr p15, 0, r6, c2, c0, 1 @ TTBR1
494 mcr p15, 0, r7, c2, c0, 2 @ TTBCR
495
496 ldmia r3!,{r4-r6}
497 mcr p15, 0, r4, c3, c0, 0 @ Domain access Control Register
498 mcr p15, 0, r5, c10, c2, 0 @ PRRR
499 mcr p15, 0, r6, c10, c2, 1 @ NMRR
500
501
502 ldmia r3!,{r4-r7}
503 mcr p15, 0, r4, c13, c0, 1 @ Context ID
504 mcr p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
505 mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
506 msr cpsr, r7 @ store cpsr
507
508 /* Enabling MMU here */
509 mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl
510 /* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */
511 and r7, #0x7
512 cmp r7, #0x0
513 beq usettbr0
514ttbr_error:
515 /*
516 * More work needs to be done to support N[0:2] value other than 0
517 * So looping here so that the error can be detected
518 */
519 b ttbr_error
520usettbr0:
521 mrc p15, 0, r2, c2, c0, 0
522 ldr r5, ttbrbit_mask
523 and r2, r5
524 mov r4, pc
525 ldr r5, table_index_mask
526 and r4, r5 @ r4 = 31 to 20 bits of pc
527 /* Extract the value to be written to table entry */
528 ldr r1, table_entry
529 /* r1 has the value to be written to table entry*/
530 add r1, r1, r4
531 /* Getting the address of table entry to modify */
532 lsr r4, #18
533 /* r2 has the location which needs to be modified */
534 add r2, r4
535 /* Storing previous entry of location being modified */
536 ldr r5, scratchpad_base
537 ldr r4, [r2]
538 str r4, [r5, #0xC0]
539 /* Modify the table entry */
540 str r1, [r2]
541 /*
542 * Storing address of entry being modified
543 * - will be restored after enabling MMU
544 */
545 ldr r5, scratchpad_base
546 str r2, [r5, #0xC4]
547
548 mov r0, #0
549 mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer
550 mcr p15, 0, r0, c7, c5, 6 @ Invalidate branch predictor array
551 mcr p15, 0, r0, c8, c5, 0 @ Invalidate instruction TLB
552 mcr p15, 0, r0, c8, c6, 0 @ Invalidate data TLB
553 /*
554 * Restore control register. This enables the MMU.
555 * The caches and prediction are not enabled here, they
556 * will be enabled after restoring the MMU table entry.
557 */
558 ldmia r3!, {r4}
559 /* Store previous value of control register in scratchpad */
560 str r4, [r5, #0xC8]
561 ldr r2, cache_pred_disable_mask
562 and r4, r2
563 mcr p15, 0, r4, c1, c0, 0
564 dsb
565 isb
566 ldr r0, =restoremmu_on
567 bx r0
568 538
569/* 539/*
570 * ============================== 540 * Local variables
571 * == Exit point from OFF mode ==
572 * ==============================
573 */ 541 */
574restoremmu_on: 542pm_prepwstst_core_p:
575 ldmfd sp!, {r0-r12, pc} @ restore regs and return 543 .word PM_PREPWSTST_CORE_P
576 544pm_pwstctrl_mpu:
545 .word PM_PWSTCTRL_MPU_P
546scratchpad_base:
547 .word SCRATCHPAD_BASE_P
548sram_base:
549 .word SRAM_BASE_P + 0x8000
550control_stat:
551 .word CONTROL_STAT
552control_mem_rta:
553 .word CONTROL_MEM_RTA_CTRL
554l2dis_3630:
555 .word 0
577 556
578/* 557/*
579 * Internal functions 558 * Internal functions
580 */ 559 */
581 560
582/* This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0 */ 561/*
562 * This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0
563 * Copied to and run from SRAM in order to reconfigure the SDRC parameters.
564 */
583 .text 565 .text
584 .align 3 566 .align 3
585ENTRY(es3_sdrc_fix) 567ENTRY(es3_sdrc_fix)
@@ -609,6 +591,9 @@ ENTRY(es3_sdrc_fix)
609 str r5, [r4] @ kick off refreshes 591 str r5, [r4] @ kick off refreshes
610 bx lr 592 bx lr
611 593
594/*
595 * Local variables
596 */
612 .align 597 .align
613sdrc_syscfg: 598sdrc_syscfg:
614 .word SDRC_SYSCONFIG_P 599 .word SDRC_SYSCONFIG_P
@@ -627,128 +612,3 @@ sdrc_manual_1:
627ENDPROC(es3_sdrc_fix) 612ENDPROC(es3_sdrc_fix)
628ENTRY(es3_sdrc_fix_sz) 613ENTRY(es3_sdrc_fix_sz)
629 .word . - es3_sdrc_fix 614 .word . - es3_sdrc_fix
630
631/*
632 * This function implements the erratum ID i581 WA:
633 * SDRC state restore before accessing the SDRAM
634 *
635 * Only used at return from non-OFF mode. For OFF
636 * mode the ROM code configures the SDRC and
637 * the DPLL before calling the restore code directly
638 * from DDR.
639 */
640
641/* Make sure SDRC accesses are ok */
642wait_sdrc_ok:
643
644/* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */
645 ldr r4, cm_idlest_ckgen
646wait_dpll3_lock:
647 ldr r5, [r4]
648 tst r5, #1
649 beq wait_dpll3_lock
650
651 ldr r4, cm_idlest1_core
652wait_sdrc_ready:
653 ldr r5, [r4]
654 tst r5, #0x2
655 bne wait_sdrc_ready
656 /* allow DLL powerdown upon hw idle req */
657 ldr r4, sdrc_power
658 ldr r5, [r4]
659 bic r5, r5, #0x40
660 str r5, [r4]
661
662/*
663 * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a
664 * base instead.
665 * Be careful not to clobber r7 when maintaing this code.
666 */
667
668is_dll_in_lock_mode:
669 /* Is dll in lock mode? */
670 ldr r4, sdrc_dlla_ctrl
671 ldr r5, [r4]
672 tst r5, #0x4
673 bxne lr @ Return if locked
674 /* wait till dll locks */
675 adr r7, kick_counter
676wait_dll_lock_timed:
677 ldr r4, wait_dll_lock_counter
678 add r4, r4, #1
679 str r4, [r7, #wait_dll_lock_counter - kick_counter]
680 ldr r4, sdrc_dlla_status
681 /* Wait 20uS for lock */
682 mov r6, #8
683wait_dll_lock:
684 subs r6, r6, #0x1
685 beq kick_dll
686 ldr r5, [r4]
687 and r5, r5, #0x4
688 cmp r5, #0x4
689 bne wait_dll_lock
690 bx lr @ Return when locked
691
692 /* disable/reenable DLL if not locked */
693kick_dll:
694 ldr r4, sdrc_dlla_ctrl
695 ldr r5, [r4]
696 mov r6, r5
697 bic r6, #(1<<3) @ disable dll
698 str r6, [r4]
699 dsb
700 orr r6, r6, #(1<<3) @ enable dll
701 str r6, [r4]
702 dsb
703 ldr r4, kick_counter
704 add r4, r4, #1
705 str r4, [r7] @ kick_counter
706 b wait_dll_lock_timed
707
708 .align
709cm_idlest1_core:
710 .word CM_IDLEST1_CORE_V
711cm_idlest_ckgen:
712 .word CM_IDLEST_CKGEN_V
713sdrc_dlla_status:
714 .word SDRC_DLLA_STATUS_V
715sdrc_dlla_ctrl:
716 .word SDRC_DLLA_CTRL_V
717pm_prepwstst_core_p:
718 .word PM_PREPWSTST_CORE_P
719pm_pwstctrl_mpu:
720 .word PM_PWSTCTRL_MPU_P
721scratchpad_base:
722 .word SCRATCHPAD_BASE_P
723sram_base:
724 .word SRAM_BASE_P + 0x8000
725sdrc_power:
726 .word SDRC_POWER_V
727ttbrbit_mask:
728 .word 0xFFFFC000
729table_index_mask:
730 .word 0xFFF00000
731table_entry:
732 .word 0x00000C02
733cache_pred_disable_mask:
734 .word 0xFFFFE7FB
735control_stat:
736 .word CONTROL_STAT
737control_mem_rta:
738 .word CONTROL_MEM_RTA_CTRL
739kernel_flush:
740 .word v7_flush_dcache_all
741l2dis_3630:
742 .word 0
743 /*
744 * When exporting to userspace while the counters are in SRAM,
745 * these 2 words need to be at the end to facilitate retrival!
746 */
747kick_counter:
748 .word 0
749wait_dll_lock_counter:
750 .word 0
751ENDPROC(omap34xx_cpu_suspend)
752
753ENTRY(omap34xx_cpu_suspend_sz)
754 .word . - omap34xx_cpu_suspend