diff options
author | Colin Cross <ccross@android.com> | 2012-05-07 20:57:42 -0400 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2012-06-02 00:49:36 -0400 |
commit | 20ff51a36b2cd25ee7eb3216b6d02b68935435ba (patch) | |
tree | 3c50651ca3cdc64c409afe9f867c13a8d50200e0 /drivers | |
parent | 4126c0197bc8c58a0bb7fcda07b01b596b6fb4c5 (diff) |
cpuidle: coupled: add parallel barrier function
Adds cpuidle_coupled_parallel_barrier, which can be used by coupled
cpuidle state enter functions to handle resynchronization after
determining if any cpu needs to abort. The normal use case will
be:
static bool abort_flag;
static atomic_t abort_barrier;
int arch_cpuidle_enter(struct cpuidle_device *dev, ...)
{
if (arch_turn_off_irq_controller()) {
/* returns an error if an irq is pending and would be lost
if idle continued and turned off power */
abort_flag = true;
}
cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
if (abort_flag) {
/* One of the cpus didn't turn off it's irq controller */
arch_turn_on_irq_controller();
return -EINTR;
}
/* continue with idle */
...
}
This will cause all cpus to abort idle together if one of them needs
to abort.
Reviewed-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Tested-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Reviewed-by: Kevin Hilman <khilman@ti.com>
Tested-by: Kevin Hilman <khilman@ti.com>
Signed-off-by: Colin Cross <ccross@android.com>
Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/cpuidle/coupled.c | 37 |
1 files changed, 37 insertions, 0 deletions
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c index aab6bba8daec..2c9bf2692232 100644 --- a/drivers/cpuidle/coupled.c +++ b/drivers/cpuidle/coupled.c | |||
@@ -130,6 +130,43 @@ static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb); | |||
130 | static cpumask_t cpuidle_coupled_poked_mask; | 130 | static cpumask_t cpuidle_coupled_poked_mask; |
131 | 131 | ||
132 | /** | 132 | /** |
133 | * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus | ||
134 | * @dev: cpuidle_device of the calling cpu | ||
135 | * @a: atomic variable to hold the barrier | ||
136 | * | ||
137 | * No caller to this function will return from this function until all online | ||
138 | * cpus in the same coupled group have called this function. Once any caller | ||
139 | * has returned from this function, the barrier is immediately available for | ||
140 | * reuse. | ||
141 | * | ||
142 | * The atomic variable a must be initialized to 0 before any cpu calls | ||
143 | * this function, will be reset to 0 before any cpu returns from this function. | ||
144 | * | ||
145 | * Must only be called from within a coupled idle state handler | ||
146 | * (state.enter when state.flags has CPUIDLE_FLAG_COUPLED set). | ||
147 | * | ||
148 | * Provides full smp barrier semantics before and after calling. | ||
149 | */ | ||
150 | void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a) | ||
151 | { | ||
152 | int n = dev->coupled->online_count; | ||
153 | |||
154 | smp_mb__before_atomic_inc(); | ||
155 | atomic_inc(a); | ||
156 | |||
157 | while (atomic_read(a) < n) | ||
158 | cpu_relax(); | ||
159 | |||
160 | if (atomic_inc_return(a) == n * 2) { | ||
161 | atomic_set(a, 0); | ||
162 | return; | ||
163 | } | ||
164 | |||
165 | while (atomic_read(a) > n) | ||
166 | cpu_relax(); | ||
167 | } | ||
168 | |||
169 | /** | ||
133 | * cpuidle_state_is_coupled - check if a state is part of a coupled set | 170 | * cpuidle_state_is_coupled - check if a state is part of a coupled set |
134 | * @dev: struct cpuidle_device for the current cpu | 171 | * @dev: struct cpuidle_device for the current cpu |
135 | * @drv: struct cpuidle_driver for the platform | 172 | * @drv: struct cpuidle_driver for the platform |