diff options
| author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2015-04-29 09:19:21 -0400 |
|---|---|---|
| committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2015-04-29 09:19:21 -0400 |
| commit | df8d9eeadd0f7a216f2476351d5aee43c6550bf0 (patch) | |
| tree | 5290522027dd4acd6912c69446ad181c2aff4324 | |
| parent | b787f68c36d49bb1d9236f403813641efa74a031 (diff) | |
cpuidle: Run tick_broadcast_exit() with disabled interrupts
Commit 335f49196fd6 (sched/idle: Use explicit broadcast oneshot
control function) replaced clockevents_notify() invocations in
cpuidle_idle_call() with direct calls to tick_broadcast_enter()
and tick_broadcast_exit(), but it overlooked the fact that
interrupts were already enabled before calling the latter which
led to functional breakage on systems using idle states with the
CPUIDLE_FLAG_TIMER_STOP flag set.
Fix that by moving the invocations of tick_broadcast_enter()
and tick_broadcast_exit() down into cpuidle_enter_state() where
interrupts are still disabled when tick_broadcast_exit() is
called. Also ensure that interrupts will be disabled before
running tick_broadcast_exit() even if they have been enabled by
the idle state's ->enter callback. Trigger a WARN_ON_ONCE() in
that case, as we generally don't want that to happen for states
with CPUIDLE_FLAG_TIMER_STOP set.
Fixes: 335f49196fd6 (sched/idle: Use explicit broadcast oneshot control function)
Reported-and-tested-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Daniel Lezcano <daniel.lezcano@linaro.org>
Reported-and-tested-by: Sudeep Holla <sudeep.holla@arm.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
| -rw-r--r-- | drivers/cpuidle/cpuidle.c | 16 | ||||
| -rw-r--r-- | kernel/sched/idle.c | 16 |
2 files changed, 18 insertions, 14 deletions
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 7a73a279e179..61c417b9e53f 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
| @@ -158,9 +158,18 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, | |||
| 158 | int entered_state; | 158 | int entered_state; |
| 159 | 159 | ||
| 160 | struct cpuidle_state *target_state = &drv->states[index]; | 160 | struct cpuidle_state *target_state = &drv->states[index]; |
| 161 | bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP); | ||
| 161 | ktime_t time_start, time_end; | 162 | ktime_t time_start, time_end; |
| 162 | s64 diff; | 163 | s64 diff; |
| 163 | 164 | ||
| 165 | /* | ||
| 166 | * Tell the time framework to switch to a broadcast timer because our | ||
| 167 | * local timer will be shut down. If a local timer is used from another | ||
| 168 | * CPU as a broadcast timer, this call may fail if it is not available. | ||
| 169 | */ | ||
| 170 | if (broadcast && tick_broadcast_enter()) | ||
| 171 | return -EBUSY; | ||
| 172 | |||
| 164 | trace_cpu_idle_rcuidle(index, dev->cpu); | 173 | trace_cpu_idle_rcuidle(index, dev->cpu); |
| 165 | time_start = ktime_get(); | 174 | time_start = ktime_get(); |
| 166 | 175 | ||
| @@ -169,6 +178,13 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, | |||
| 169 | time_end = ktime_get(); | 178 | time_end = ktime_get(); |
| 170 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); | 179 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); |
| 171 | 180 | ||
| 181 | if (broadcast) { | ||
| 182 | if (WARN_ON_ONCE(!irqs_disabled())) | ||
| 183 | local_irq_disable(); | ||
| 184 | |||
| 185 | tick_broadcast_exit(); | ||
| 186 | } | ||
| 187 | |||
| 172 | if (!cpuidle_state_is_coupled(dev, drv, entered_state)) | 188 | if (!cpuidle_state_is_coupled(dev, drv, entered_state)) |
| 173 | local_irq_enable(); | 189 | local_irq_enable(); |
| 174 | 190 | ||
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index deef1caa94c6..fefcb1fa5160 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c | |||
| @@ -81,7 +81,6 @@ static void cpuidle_idle_call(void) | |||
| 81 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | 81 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); |
| 82 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | 82 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
| 83 | int next_state, entered_state; | 83 | int next_state, entered_state; |
| 84 | unsigned int broadcast; | ||
| 85 | bool reflect; | 84 | bool reflect; |
| 86 | 85 | ||
| 87 | /* | 86 | /* |
| @@ -150,17 +149,6 @@ static void cpuidle_idle_call(void) | |||
| 150 | goto exit_idle; | 149 | goto exit_idle; |
| 151 | } | 150 | } |
| 152 | 151 | ||
| 153 | broadcast = drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP; | ||
| 154 | |||
| 155 | /* | ||
| 156 | * Tell the time framework to switch to a broadcast timer | ||
| 157 | * because our local timer will be shutdown. If a local timer | ||
| 158 | * is used from another cpu as a broadcast timer, this call may | ||
| 159 | * fail if it is not available | ||
| 160 | */ | ||
| 161 | if (broadcast && tick_broadcast_enter()) | ||
| 162 | goto use_default; | ||
| 163 | |||
| 164 | /* Take note of the planned idle state. */ | 152 | /* Take note of the planned idle state. */ |
| 165 | idle_set_state(this_rq(), &drv->states[next_state]); | 153 | idle_set_state(this_rq(), &drv->states[next_state]); |
| 166 | 154 | ||
| @@ -174,8 +162,8 @@ static void cpuidle_idle_call(void) | |||
| 174 | /* The cpu is no longer idle or about to enter idle. */ | 162 | /* The cpu is no longer idle or about to enter idle. */ |
| 175 | idle_set_state(this_rq(), NULL); | 163 | idle_set_state(this_rq(), NULL); |
| 176 | 164 | ||
| 177 | if (broadcast) | 165 | if (entered_state == -EBUSY) |
| 178 | tick_broadcast_exit(); | 166 | goto use_default; |
| 179 | 167 | ||
| 180 | /* | 168 | /* |
| 181 | * Give the governor an opportunity to reflect on the outcome | 169 | * Give the governor an opportunity to reflect on the outcome |
