aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2015-07-22 11:03:52 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2016-03-02 10:44:57 -0500
commit4f49b90abb4aca6fe677c95fc352fd0674d489bd (patch)
tree5f553618d1fb296be3b08fd41b3a87dfbbc08e72
parentb78783000d5cb7c5994e6742e1d1ce594bfea15b (diff)
sched-clock: Migrate to use new tick dependency mask model
Instead of checking sched_clock_stable from the nohz subsystem to verify its tick dependency, migrate it to the new mask in order to include it to the all-in-one check. Reviewed-by: Chris Metcalf <cmetcalf@ezchip.com> Cc: Christoph Lameter <cl@linux.com> Cc: Chris Metcalf <cmetcalf@ezchip.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Luiz Capitulino <lcapitulino@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
-rw-r--r--kernel/sched/clock.c5
-rw-r--r--kernel/time/tick-sched.c19
2 files changed, 5 insertions, 19 deletions
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index bc54e84675da..fedb967a9841 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -61,6 +61,7 @@
61#include <linux/static_key.h> 61#include <linux/static_key.h>
62#include <linux/workqueue.h> 62#include <linux/workqueue.h>
63#include <linux/compiler.h> 63#include <linux/compiler.h>
64#include <linux/tick.h>
64 65
65/* 66/*
66 * Scheduler clock - returns current time in nanosec units. 67 * Scheduler clock - returns current time in nanosec units.
@@ -89,6 +90,8 @@ static void __set_sched_clock_stable(void)
89{ 90{
90 if (!sched_clock_stable()) 91 if (!sched_clock_stable())
91 static_key_slow_inc(&__sched_clock_stable); 92 static_key_slow_inc(&__sched_clock_stable);
93
94 tick_dep_clear(TICK_DEP_BIT_CLOCK_UNSTABLE);
92} 95}
93 96
94void set_sched_clock_stable(void) 97void set_sched_clock_stable(void)
@@ -108,6 +111,8 @@ static void __clear_sched_clock_stable(struct work_struct *work)
108 /* XXX worry about clock continuity */ 111 /* XXX worry about clock continuity */
109 if (sched_clock_stable()) 112 if (sched_clock_stable())
110 static_key_slow_dec(&__sched_clock_stable); 113 static_key_slow_dec(&__sched_clock_stable);
114
115 tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE);
111} 116}
112 117
113static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable); 118static DECLARE_WORK(sched_clock_work, __clear_sched_clock_stable);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index e4f2916e66a9..969e6704c3c9 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -204,25 +204,6 @@ static bool can_stop_full_tick(struct tick_sched *ts)
204 return false; 204 return false;
205 } 205 }
206 206
207#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
208 /*
209 * sched_clock_tick() needs us?
210 *
211 * TODO: kick full dynticks CPUs when
212 * sched_clock_stable is set.
213 */
214 if (!sched_clock_stable()) {
215 trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE);
216 /*
217 * Don't allow the user to think they can get
218 * full NO_HZ with this machine.
219 */
220 WARN_ONCE(tick_nohz_full_running,
221 "NO_HZ FULL will not work with unstable sched clock");
222 return false;
223 }
224#endif
225
226 return true; 207 return true;
227} 208}
228 209