diff options
author | Peter Zijlstra <peterz@infradead.org> | 2017-03-13 08:46:21 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-03-23 02:31:48 -0400 |
commit | 71fdb70eb48784c1f28cdf2e67c4c587dd7f2594 (patch) | |
tree | 4df2510a131df21ac31d649fd792b5acae47c78b | |
parent | 093b995e3b55a0ae0670226ddfcb05bfbf0099ae (diff) |
sched/clock: Fix clear_sched_clock_stable() preempt wobbly
Paul reported a problems with clear_sched_clock_stable(). Since we run
all of __clear_sched_clock_stable() from workqueue context, there's a
preempt problem.
Solve it by only running the static_key_disable() from workqueue.
Reported-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: fweisbec@gmail.com
Link: http://lkml.kernel.org/r/20170313124621.GA3328@twins.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | kernel/sched/clock.c | 17 |
1 files changed, 12 insertions, 5 deletions
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index a08795e21628..fec0f58c8dee 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c | |||
@@ -141,7 +141,14 @@ static void __set_sched_clock_stable(void) | |||
141 | tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE); | 141 | tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE); |
142 | } | 142 | } |
143 | 143 | ||
144 | static void __clear_sched_clock_stable(struct work_struct *work) | 144 | static void __sched_clock_work(struct work_struct *work) |
145 | { | ||
146 | static_branch_disable(&__sched_clock_stable); | ||
147 | } | ||
148 | |||
149 | static DECLARE_WORK(sched_clock_work, __sched_clock_work); | ||
150 | |||
151 | static void __clear_sched_clock_stable(void) | ||
145 | { | 152 | { |
146 | struct sched_clock_data *scd = this_scd(); | 153 | struct sched_clock_data *scd = this_scd(); |
147 | 154 | ||
@@ -160,11 +167,11 @@ static void __clear_sched_clock_stable(struct work_struct *work) | |||
160 | scd->tick_gtod, gtod_offset, | 167 | scd->tick_gtod, gtod_offset, |
161 | scd->tick_raw, raw_offset); | 168 | scd->tick_raw, raw_offset); |
162 | 169 | ||
163 | static_branch_disable(&__sched_clock_stable); | ||
164 | tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE); | 170 | tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE); |
165 | } | ||
166 | 171 | ||
167 | static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable); | 172 | if (sched_clock_stable()) |
173 | schedule_work(&sched_clock_work); | ||
174 | } | ||
168 | 175 | ||
169 | void clear_sched_clock_stable(void) | 176 | void clear_sched_clock_stable(void) |
170 | { | 177 | { |
@@ -173,7 +180,7 @@ void clear_sched_clock_stable(void) | |||
173 | smp_mb(); /* matches sched_clock_init_late() */ | 180 | smp_mb(); /* matches sched_clock_init_late() */ |
174 | 181 | ||
175 | if (sched_clock_running == 2) | 182 | if (sched_clock_running == 2) |
176 | schedule_work(&sched_clock_work); | 183 | __clear_sched_clock_stable(); |
177 | } | 184 | } |
178 | 185 | ||
179 | void sched_clock_init_late(void) | 186 | void sched_clock_init_late(void) |