diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2013-07-01 16:14:10 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-07-25 17:07:29 -0400 |
commit | 1c0d08e652c18e3f3198969435fef31941b2eec3 (patch) | |
tree | 47f205d38ddcc6a2a9e6d2f0b1cd41bfbf87b448 | |
parent | 2dc04d3333049098691eb652e06d52fbd80771d2 (diff) |
tick: Sanitize broadcast control logic
commit 07bd1172902e782f288e4d44b1fde7dec0f08b6f upstream.
The recent implementation of a generic dummy timer resulted in a
different registration order of per cpu local timers which made the
broadcast control logic go belly up.
If the dummy timer is the first clock event device which is registered
for a CPU, then it is installed, the broadcast timer is initialized
and the CPU is marked as broadcast target.
If a real clock event device is installed after that, we can fail to
take the CPU out of the broadcast mask. In the worst case we end up
with two periodic timer events firing for the same CPU. One from the
per cpu hardware device and one from the broadcast.
Now the problem is that we have no way to distinguish whether the
system is in a state which makes broadcasting necessary or the
broadcast bit was set due to the nonfunctional dummy timer
installment.
To solve this we need to keep track of the system state seperately and
provide a more detailed decision logic whether we keep the CPU in
broadcast mode or not.
The old decision logic only clears the broadcast mode, if the newly
installed clock event device is not affected by power states.
The new logic clears the broadcast mode if one of the following is
true:
- The new device is not affected by power states.
- The system is not in a power state affected mode
- The system has switched to oneshot mode. The oneshot broadcast is
controlled from the deep idle state. The CPU is not in idle at
this point, so it's safe to remove it from the mask.
If we clear the broadcast bit for the CPU when a new device is
installed, we also shutdown the broadcast device when this was the
last CPU in the broadcast mask.
If the broadcast bit is kept, then we leave the new device in shutdown
state and rely on the broadcast to deliver the timer interrupts via
the broadcast ipis.
Reported-and-tested-by: Stehle Vincent-B46079 <B46079@freescale.com>
Reviewed-by: Stephen Boyd <sboyd@codeaurora.org>
Cc: John Stultz <john.stultz@linaro.org>,
Cc: Mark Rutland <mark.rutland@arm.com>
Link: http://lkml.kernel.org/r/alpine.DEB.2.02.1307012153060.4013@ionos.tec.linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | kernel/time/tick-broadcast.c | 70 | ||||
-rw-r--r-- | kernel/time/tick-common.c | 3 |
2 files changed, 61 insertions, 12 deletions
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 20d6fba70652..c389f068aca2 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -29,6 +29,7 @@ | |||
29 | 29 | ||
30 | static struct tick_device tick_broadcast_device; | 30 | static struct tick_device tick_broadcast_device; |
31 | static cpumask_var_t tick_broadcast_mask; | 31 | static cpumask_var_t tick_broadcast_mask; |
32 | static cpumask_var_t tick_broadcast_on; | ||
32 | static cpumask_var_t tmpmask; | 33 | static cpumask_var_t tmpmask; |
33 | static DEFINE_RAW_SPINLOCK(tick_broadcast_lock); | 34 | static DEFINE_RAW_SPINLOCK(tick_broadcast_lock); |
34 | static int tick_broadcast_force; | 35 | static int tick_broadcast_force; |
@@ -123,8 +124,9 @@ static void tick_device_setup_broadcast_func(struct clock_event_device *dev) | |||
123 | */ | 124 | */ |
124 | int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) | 125 | int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) |
125 | { | 126 | { |
127 | struct clock_event_device *bc = tick_broadcast_device.evtdev; | ||
126 | unsigned long flags; | 128 | unsigned long flags; |
127 | int ret = 0; | 129 | int ret; |
128 | 130 | ||
129 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); | 131 | raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
130 | 132 | ||
@@ -138,20 +140,59 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) | |||
138 | dev->event_handler = tick_handle_periodic; | 140 | dev->event_handler = tick_handle_periodic; |
139 | tick_device_setup_broadcast_func(dev); | 141 | tick_device_setup_broadcast_func(dev); |
140 | cpumask_set_cpu(cpu, tick_broadcast_mask); | 142 | cpumask_set_cpu(cpu, tick_broadcast_mask); |
141 | tick_broadcast_start_periodic(tick_broadcast_device.evtdev); | 143 | tick_broadcast_start_periodic(bc); |
142 | ret = 1; | 144 | ret = 1; |
143 | } else { | 145 | } else { |
144 | /* | 146 | /* |
145 | * When the new device is not affected by the stop | 147 | * Clear the broadcast bit for this cpu if the |
146 | * feature and the cpu is marked in the broadcast mask | 148 | * device is not power state affected. |
147 | * then clear the broadcast bit. | ||
148 | */ | 149 | */ |
149 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) { | 150 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) |
150 | int cpu = smp_processor_id(); | ||
151 | cpumask_clear_cpu(cpu, tick_broadcast_mask); | 151 | cpumask_clear_cpu(cpu, tick_broadcast_mask); |
152 | tick_broadcast_clear_oneshot(cpu); | 152 | else |
153 | } else { | ||
154 | tick_device_setup_broadcast_func(dev); | 153 | tick_device_setup_broadcast_func(dev); |
154 | |||
155 | /* | ||
156 | * Clear the broadcast bit if the CPU is not in | ||
157 | * periodic broadcast on state. | ||
158 | */ | ||
159 | if (!cpumask_test_cpu(cpu, tick_broadcast_on)) | ||
160 | cpumask_clear_cpu(cpu, tick_broadcast_mask); | ||
161 | |||
162 | switch (tick_broadcast_device.mode) { | ||
163 | case TICKDEV_MODE_ONESHOT: | ||
164 | /* | ||
165 | * If the system is in oneshot mode we can | ||
166 | * unconditionally clear the oneshot mask bit, | ||
167 | * because the CPU is running and therefore | ||
168 | * not in an idle state which causes the power | ||
169 | * state affected device to stop. Let the | ||
170 | * caller initialize the device. | ||
171 | */ | ||
172 | tick_broadcast_clear_oneshot(cpu); | ||
173 | ret = 0; | ||
174 | break; | ||
175 | |||
176 | case TICKDEV_MODE_PERIODIC: | ||
177 | /* | ||
178 | * If the system is in periodic mode, check | ||
179 | * whether the broadcast device can be | ||
180 | * switched off now. | ||
181 | */ | ||
182 | if (cpumask_empty(tick_broadcast_mask) && bc) | ||
183 | clockevents_shutdown(bc); | ||
184 | /* | ||
185 | * If we kept the cpu in the broadcast mask, | ||
186 | * tell the caller to leave the per cpu device | ||
187 | * in shutdown state. The periodic interrupt | ||
188 | * is delivered by the broadcast device. | ||
189 | */ | ||
190 | ret = cpumask_test_cpu(cpu, tick_broadcast_mask); | ||
191 | break; | ||
192 | default: | ||
193 | /* Nothing to do */ | ||
194 | ret = 0; | ||
195 | break; | ||
155 | } | 196 | } |
156 | } | 197 | } |
157 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 198 | raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
@@ -281,6 +322,7 @@ static void tick_do_broadcast_on_off(unsigned long *reason) | |||
281 | switch (*reason) { | 322 | switch (*reason) { |
282 | case CLOCK_EVT_NOTIFY_BROADCAST_ON: | 323 | case CLOCK_EVT_NOTIFY_BROADCAST_ON: |
283 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: | 324 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: |
325 | cpumask_set_cpu(cpu, tick_broadcast_on); | ||
284 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) { | 326 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) { |
285 | if (tick_broadcast_device.mode == | 327 | if (tick_broadcast_device.mode == |
286 | TICKDEV_MODE_PERIODIC) | 328 | TICKDEV_MODE_PERIODIC) |
@@ -290,8 +332,12 @@ static void tick_do_broadcast_on_off(unsigned long *reason) | |||
290 | tick_broadcast_force = 1; | 332 | tick_broadcast_force = 1; |
291 | break; | 333 | break; |
292 | case CLOCK_EVT_NOTIFY_BROADCAST_OFF: | 334 | case CLOCK_EVT_NOTIFY_BROADCAST_OFF: |
293 | if (!tick_broadcast_force && | 335 | if (tick_broadcast_force) |
294 | cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) { | 336 | break; |
337 | cpumask_clear_cpu(cpu, tick_broadcast_on); | ||
338 | if (!tick_device_is_functional(dev)) | ||
339 | break; | ||
340 | if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) { | ||
295 | if (tick_broadcast_device.mode == | 341 | if (tick_broadcast_device.mode == |
296 | TICKDEV_MODE_PERIODIC) | 342 | TICKDEV_MODE_PERIODIC) |
297 | tick_setup_periodic(dev, 0); | 343 | tick_setup_periodic(dev, 0); |
@@ -349,6 +395,7 @@ void tick_shutdown_broadcast(unsigned int *cpup) | |||
349 | 395 | ||
350 | bc = tick_broadcast_device.evtdev; | 396 | bc = tick_broadcast_device.evtdev; |
351 | cpumask_clear_cpu(cpu, tick_broadcast_mask); | 397 | cpumask_clear_cpu(cpu, tick_broadcast_mask); |
398 | cpumask_clear_cpu(cpu, tick_broadcast_on); | ||
352 | 399 | ||
353 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { | 400 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { |
354 | if (bc && cpumask_empty(tick_broadcast_mask)) | 401 | if (bc && cpumask_empty(tick_broadcast_mask)) |
@@ -792,6 +839,7 @@ bool tick_broadcast_oneshot_available(void) | |||
792 | void __init tick_broadcast_init(void) | 839 | void __init tick_broadcast_init(void) |
793 | { | 840 | { |
794 | zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT); | 841 | zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT); |
842 | zalloc_cpumask_var(&tick_broadcast_on, GFP_NOWAIT); | ||
795 | zalloc_cpumask_var(&tmpmask, GFP_NOWAIT); | 843 | zalloc_cpumask_var(&tmpmask, GFP_NOWAIT); |
796 | #ifdef CONFIG_TICK_ONESHOT | 844 | #ifdef CONFIG_TICK_ONESHOT |
797 | zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT); | 845 | zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT); |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 5d3fb100bc06..7ce5e5a4a4c5 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -194,7 +194,8 @@ static void tick_setup_device(struct tick_device *td, | |||
194 | * When global broadcasting is active, check if the current | 194 | * When global broadcasting is active, check if the current |
195 | * device is registered as a placeholder for broadcast mode. | 195 | * device is registered as a placeholder for broadcast mode. |
196 | * This allows us to handle this x86 misfeature in a generic | 196 | * This allows us to handle this x86 misfeature in a generic |
197 | * way. | 197 | * way. This function also returns !=0 when we keep the |
198 | * current active broadcast state for this CPU. | ||
198 | */ | 199 | */ |
199 | if (tick_device_uses_broadcast(newdev, cpu)) | 200 | if (tick_device_uses_broadcast(newdev, cpu)) |
200 | return; | 201 | return; |