aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/mach-omap2/Kconfig1
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c112
2 files changed, 67 insertions, 46 deletions
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 4cf5142f22cc..cc83f5e13d5c 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -55,6 +55,7 @@ config ARCH_OMAP4
55 select PM_OPP if PM 55 select PM_OPP if PM
56 select USB_ARCH_HAS_EHCI if USB_SUPPORT 56 select USB_ARCH_HAS_EHCI if USB_SUPPORT
57 select ARM_CPU_SUSPEND if PM 57 select ARM_CPU_SUSPEND if PM
58 select ARCH_NEEDS_CPU_IDLE_COUPLED
58 59
59comment "OMAP Core Type" 60comment "OMAP Core Type"
60 depends on ARCH_OMAP2 61 depends on ARCH_OMAP2
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index be1617ca84bd..25655eb69408 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -21,6 +21,7 @@
21#include "common.h" 21#include "common.h"
22#include "pm.h" 22#include "pm.h"
23#include "prm.h" 23#include "prm.h"
24#include "clockdomain.h"
24 25
25#ifdef CONFIG_CPU_IDLE 26#ifdef CONFIG_CPU_IDLE
26 27
@@ -49,10 +50,11 @@ static struct omap4_idle_statedata omap4_idle_data[] = {
49 }, 50 },
50}; 51};
51 52
52static struct powerdomain *mpu_pd, *cpu0_pd, *cpu1_pd; 53static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS];
54static struct clockdomain *cpu_clkdm[NR_CPUS];
53 55
54/** 56/**
55 * omap4_enter_idle - Programs OMAP4 to enter the specified state 57 * omap4_enter_idle_coupled_[simple/coupled] - OMAP4 cpuidle entry functions
56 * @dev: cpuidle device 58 * @dev: cpuidle device
57 * @drv: cpuidle driver 59 * @drv: cpuidle driver
58 * @index: the index of state to be entered 60 * @index: the index of state to be entered
@@ -61,60 +63,71 @@ static struct powerdomain *mpu_pd, *cpu0_pd, *cpu1_pd;
61 * specified low power state selected by the governor. 63 * specified low power state selected by the governor.
62 * Returns the amount of time spent in the low power state. 64 * Returns the amount of time spent in the low power state.
63 */ 65 */
64static int omap4_enter_idle(struct cpuidle_device *dev, 66static int omap4_enter_idle_simple(struct cpuidle_device *dev,
67 struct cpuidle_driver *drv,
68 int index)
69{
70 local_fiq_disable();
71 omap_do_wfi();
72 local_fiq_enable();
73
74 return index;
75}
76
77static int omap4_enter_idle_coupled(struct cpuidle_device *dev,
65 struct cpuidle_driver *drv, 78 struct cpuidle_driver *drv,
66 int index) 79 int index)
67{ 80{
68 struct omap4_idle_statedata *cx = &omap4_idle_data[index]; 81 struct omap4_idle_statedata *cx = &omap4_idle_data[index];
69 u32 cpu1_state;
70 int cpu_id = smp_processor_id(); 82 int cpu_id = smp_processor_id();
71 83
72 local_fiq_disable(); 84 local_fiq_disable();
73 85
74 /* 86 /*
75 * CPU0 has to stay ON (i.e in C1) until CPU1 is OFF state. 87 * CPU0 has to wait and stay ON until CPU1 is OFF state.
76 * This is necessary to honour hardware recommondation 88 * This is necessary to honour hardware recommondation
77 * of triggeing all the possible low power modes once CPU1 is 89 * of triggeing all the possible low power modes once CPU1 is
78 * out of coherency and in OFF mode. 90 * out of coherency and in OFF mode.
79 * Update dev->last_state so that governor stats reflects right
80 * data.
81 */ 91 */
82 cpu1_state = pwrdm_read_pwrst(cpu1_pd); 92 if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
83 if (cpu1_state != PWRDM_POWER_OFF) { 93 while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF)
84 index = drv->safe_state_index; 94 cpu_relax();
85 cx = &omap4_idle_data[index];
86 } 95 }
87 96
88 if (index > 0) 97 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
89 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
90 98
91 /* 99 /*
92 * Call idle CPU PM enter notifier chain so that 100 * Call idle CPU PM enter notifier chain so that
93 * VFP and per CPU interrupt context is saved. 101 * VFP and per CPU interrupt context is saved.
94 */ 102 */
95 if (cx->cpu_state == PWRDM_POWER_OFF) 103 cpu_pm_enter();
96 cpu_pm_enter(); 104
97 105 if (dev->cpu == 0) {
98 pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); 106 pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
99 omap_set_pwrdm_state(mpu_pd, cx->mpu_state); 107 omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
100 108
101 /* 109 /*
102 * Call idle CPU cluster PM enter notifier chain 110 * Call idle CPU cluster PM enter notifier chain
103 * to save GIC and wakeupgen context. 111 * to save GIC and wakeupgen context.
104 */ 112 */
105 if ((cx->mpu_state == PWRDM_POWER_RET) && 113 if ((cx->mpu_state == PWRDM_POWER_RET) &&
106 (cx->mpu_logic_state == PWRDM_POWER_OFF)) 114 (cx->mpu_logic_state == PWRDM_POWER_OFF))
107 cpu_cluster_pm_enter(); 115 cpu_cluster_pm_enter();
116 }
108 117
109 omap4_enter_lowpower(dev->cpu, cx->cpu_state); 118 omap4_enter_lowpower(dev->cpu, cx->cpu_state);
110 119
120 /* Wakeup CPU1 only if it is not offlined */
121 if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
122 clkdm_wakeup(cpu_clkdm[1]);
123 clkdm_allow_idle(cpu_clkdm[1]);
124 }
125
111 /* 126 /*
112 * Call idle CPU PM exit notifier chain to restore 127 * Call idle CPU PM exit notifier chain to restore
113 * VFP and per CPU IRQ context. Only CPU0 state is 128 * VFP and per CPU IRQ context.
114 * considered since CPU1 is managed by CPU hotplug.
115 */ 129 */
116 if (pwrdm_read_prev_pwrst(cpu0_pd) == PWRDM_POWER_OFF) 130 cpu_pm_exit();
117 cpu_pm_exit();
118 131
119 /* 132 /*
120 * Call idle CPU cluster PM exit notifier chain 133 * Call idle CPU cluster PM exit notifier chain
@@ -123,8 +136,7 @@ static int omap4_enter_idle(struct cpuidle_device *dev,
123 if (omap4_mpuss_read_prev_context_state()) 136 if (omap4_mpuss_read_prev_context_state())
124 cpu_cluster_pm_exit(); 137 cpu_cluster_pm_exit();
125 138
126 if (index > 0) 139 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
127 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
128 140
129 local_fiq_enable(); 141 local_fiq_enable();
130 142
@@ -143,7 +155,7 @@ struct cpuidle_driver omap4_idle_driver = {
143 .exit_latency = 2 + 2, 155 .exit_latency = 2 + 2,
144 .target_residency = 5, 156 .target_residency = 5,
145 .flags = CPUIDLE_FLAG_TIME_VALID, 157 .flags = CPUIDLE_FLAG_TIME_VALID,
146 .enter = omap4_enter_idle, 158 .enter = omap4_enter_idle_simple,
147 .name = "C1", 159 .name = "C1",
148 .desc = "MPUSS ON" 160 .desc = "MPUSS ON"
149 }, 161 },
@@ -151,8 +163,8 @@ struct cpuidle_driver omap4_idle_driver = {
151 /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */ 163 /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
152 .exit_latency = 328 + 440, 164 .exit_latency = 328 + 440,
153 .target_residency = 960, 165 .target_residency = 960,
154 .flags = CPUIDLE_FLAG_TIME_VALID, 166 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
155 .enter = omap4_enter_idle, 167 .enter = omap4_enter_idle_coupled,
156 .name = "C2", 168 .name = "C2",
157 .desc = "MPUSS CSWR", 169 .desc = "MPUSS CSWR",
158 }, 170 },
@@ -160,8 +172,8 @@ struct cpuidle_driver omap4_idle_driver = {
160 /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */ 172 /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
161 .exit_latency = 460 + 518, 173 .exit_latency = 460 + 518,
162 .target_residency = 1100, 174 .target_residency = 1100,
163 .flags = CPUIDLE_FLAG_TIME_VALID, 175 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
164 .enter = omap4_enter_idle, 176 .enter = omap4_enter_idle_coupled,
165 .name = "C3", 177 .name = "C3",
166 .desc = "MPUSS OSWR", 178 .desc = "MPUSS OSWR",
167 }, 179 },
@@ -182,19 +194,27 @@ int __init omap4_idle_init(void)
182 unsigned int cpu_id = 0; 194 unsigned int cpu_id = 0;
183 195
184 mpu_pd = pwrdm_lookup("mpu_pwrdm"); 196 mpu_pd = pwrdm_lookup("mpu_pwrdm");
185 cpu0_pd = pwrdm_lookup("cpu0_pwrdm"); 197 cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
186 cpu1_pd = pwrdm_lookup("cpu1_pwrdm"); 198 cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
187 if ((!mpu_pd) || (!cpu0_pd) || (!cpu1_pd)) 199 if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1]))
200 return -ENODEV;
201
202 cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm");
203 cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm");
204 if (!cpu_clkdm[0] || !cpu_clkdm[1])
188 return -ENODEV; 205 return -ENODEV;
189 206
190 dev = &per_cpu(omap4_idle_dev, cpu_id); 207 for_each_cpu(cpu_id, cpu_online_mask) {
191 dev->cpu = cpu_id; 208 dev = &per_cpu(omap4_idle_dev, cpu_id);
209 dev->cpu = cpu_id;
210 dev->coupled_cpus = *cpu_online_mask;
192 211
193 cpuidle_register_driver(&omap4_idle_driver); 212 cpuidle_register_driver(&omap4_idle_driver);
194 213
195 if (cpuidle_register_device(dev)) { 214 if (cpuidle_register_device(dev)) {
196 pr_err("%s: CPUidle register device failed\n", __func__); 215 pr_err("%s: CPUidle register failed\n", __func__);
197 return -EIO; 216 return -EIO;
217 }
198 } 218 }
199 219
200 return 0; 220 return 0;