aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorKevin Hilman <khilman@ti.com>2012-03-14 20:26:17 -0400
committerKevin Hilman <khilman@ti.com>2012-07-25 19:06:04 -0400
commit5b4d5bcc68940497722d98d99abee72a0ab1d6f1 (patch)
tree62e03aa78d40e7b47157b03b4d5c6e2e7bb00cd5 /arch
parentdd3ad97c5621aa853843dd5e6783ca787466158c (diff)
ARM: OMAP4: CPUidle: add synchronization for coupled idle states
With coupled idle states, a failure for any CPU to hit a low power state must be coordinated such that all CPUs abort. On OMAP4, when entering a coupled state, CPU0 has to wait for CPU1 to enter its low power state before it can enter its low power state. This is implemented by letting CPU0 wait for the CPU1 powerdomain to hit off. However, there are conditions where CPU1 might abort/fail and not hit off while CPU0 is waiting for it. For example, a CPU1 wakeup or a failed attempt to hit off due to hardware conditions. To avoid the deadlock where CPU0 would continually wait for CPU1 to hit off-mode, this patch adds a flag to signal when each CPU has come out of its low-power state. CPU0 then checks whether CPU1 has hit off *or* has already completed its attempt to hit off. If the latter, CPU0 must abort its attempt to hit a low-power state so the coupled state enter method can return. In addition, cpuidle_coupled_parallel_barrier() is used to ensure the clearing of the 'done' flag is synchronized on all CPUs. Signed-off-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Signed-off-by: Kevin Hilman <khilman@ti.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c22
1 files changed, 21 insertions, 1 deletions
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index 25655eb69408..eb93e45d3271 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -53,6 +53,9 @@ static struct omap4_idle_statedata omap4_idle_data[] = {
53static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS]; 53static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS];
54static struct clockdomain *cpu_clkdm[NR_CPUS]; 54static struct clockdomain *cpu_clkdm[NR_CPUS];
55 55
56static atomic_t abort_barrier;
57static bool cpu_done[NR_CPUS];
58
56/** 59/**
57 * omap4_enter_idle_coupled_[simple/coupled] - OMAP4 cpuidle entry functions 60 * omap4_enter_idle_coupled_[simple/coupled] - OMAP4 cpuidle entry functions
58 * @dev: cpuidle device 61 * @dev: cpuidle device
@@ -90,8 +93,20 @@ static int omap4_enter_idle_coupled(struct cpuidle_device *dev,
90 * out of coherency and in OFF mode. 93 * out of coherency and in OFF mode.
91 */ 94 */
92 if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) { 95 if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
93 while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) 96 while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) {
94 cpu_relax(); 97 cpu_relax();
98
99 /*
100 * CPU1 could have already entered & exited idle
101 * without hitting off because of a wakeup
102 * or a failed attempt to hit off mode. Check for
103 * that here, otherwise we could spin forever
104 * waiting for CPU1 off.
105 */
106 if (cpu_done[1])
107 goto fail;
108
109 }
95 } 110 }
96 111
97 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id); 112 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
@@ -116,6 +131,7 @@ static int omap4_enter_idle_coupled(struct cpuidle_device *dev,
116 } 131 }
117 132
118 omap4_enter_lowpower(dev->cpu, cx->cpu_state); 133 omap4_enter_lowpower(dev->cpu, cx->cpu_state);
134 cpu_done[dev->cpu] = true;
119 135
120 /* Wakeup CPU1 only if it is not offlined */ 136 /* Wakeup CPU1 only if it is not offlined */
121 if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) { 137 if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
@@ -138,6 +154,10 @@ static int omap4_enter_idle_coupled(struct cpuidle_device *dev,
138 154
139 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id); 155 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
140 156
157fail:
158 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
159 cpu_done[dev->cpu] = false;
160
141 local_fiq_enable(); 161 local_fiq_enable();
142 162
143 return index; 163 return index;