aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time/tick-sched.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2013-04-17 18:15:40 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2013-04-18 12:53:34 -0400
commit76c24fb054b52b34af4dcde589cbb9e2b98fc74c (patch)
treebbce968d4f057dce3b028ca5c5b894e68255db02 /kernel/time/tick-sched.c
parent65d798f0f9339ae2c4ebe9480e3260b33382a584 (diff)
nohz: New APIs to re-evaluate the tick on full dynticks CPUs
Provide two new helpers in order to notify the full dynticks CPUs about some internal system changes against which they may reconsider the state of their tick. Some practical examples include: posix cpu timers, perf tick and sched clock tick. For now the notifying handler, implemented through IPIs, is a stub that will be implemented when we get the tick stop/restart infrastructure in. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Christoph Lameter <cl@linux.com> Cc: Geoff Levand <geoff@infradead.org> Cc: Gilad Ben Yossef <gilad@benyossef.com> Cc: Hakan Akkan <hakanakkan@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Kevin Hilman <khilman@linaro.org> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/time/tick-sched.c')
-rw-r--r--kernel/time/tick-sched.c51
1 files changed, 51 insertions, 0 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 369b5769fc97..2bcad5b904d8 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -147,6 +147,57 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
147static cpumask_var_t nohz_full_mask; 147static cpumask_var_t nohz_full_mask;
148bool have_nohz_full_mask; 148bool have_nohz_full_mask;
149 149
150/*
151 * Re-evaluate the need for the tick on the current CPU
152 * and restart it if necessary.
153 */
154static void tick_nohz_full_check(void)
155{
156 /*
157 * STUB for now, will be filled with the full tick stop/restart
158 * infrastructure patches
159 */
160}
161
162static void nohz_full_kick_work_func(struct irq_work *work)
163{
164 tick_nohz_full_check();
165}
166
167static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
168 .func = nohz_full_kick_work_func,
169};
170
171/*
172 * Kick the current CPU if it's full dynticks in order to force it to
173 * re-evaluate its dependency on the tick and restart it if necessary.
174 */
175void tick_nohz_full_kick(void)
176{
177 if (tick_nohz_full_cpu(smp_processor_id()))
178 irq_work_queue(&__get_cpu_var(nohz_full_kick_work));
179}
180
181static void nohz_full_kick_ipi(void *info)
182{
183 tick_nohz_full_check();
184}
185
186/*
187 * Kick all full dynticks CPUs in order to force these to re-evaluate
188 * their dependency on the tick and restart it if necessary.
189 */
190void tick_nohz_full_kick_all(void)
191{
192 if (!have_nohz_full_mask)
193 return;
194
195 preempt_disable();
196 smp_call_function_many(nohz_full_mask,
197 nohz_full_kick_ipi, NULL, false);
198 preempt_enable();
199}
200
150int tick_nohz_full_cpu(int cpu) 201int tick_nohz_full_cpu(int cpu)
151{ 202{
152 if (!have_nohz_full_mask) 203 if (!have_nohz_full_mask)