aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2015-05-27 13:22:08 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2015-07-29 09:45:00 -0400
commit73738a95d00467812664b7f86ba3052f5faf96d7 (patch)
tree9827c57a6391c154fa87e4d103672e5888c7fa71
parent594493594373862ed2a7f91d88a5a2670742faa6 (diff)
nohz: Restart nohz full tick from irq exit
Restart the tick when necessary from the irq exit path. It makes nohz full more flexible, simplify the related IPIs and doesn't bring significant overhead on irq exit. In a longer term view, it will allow us to piggyback the nohz kick on the scheduler IPI in the future instead of sending a dedicated IPI that often doubles the scheduler IPI on task wakeup. This will require more changes though including careful review of resched_curr() callers to include nohz full needs. Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Christoph Lameter <cl@linux.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Preeti U Murthy <preeti@linux.vnet.ibm.com> Cc: Rik van Riel <riel@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
-rw-r--r--include/linux/tick.h8
-rw-r--r--kernel/time/tick-sched.c34
2 files changed, 10 insertions, 32 deletions
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 1ca93f2de6f5..7d35b0fec399 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -147,7 +147,6 @@ static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask)
147 cpumask_or(mask, mask, tick_nohz_full_mask); 147 cpumask_or(mask, mask, tick_nohz_full_mask);
148} 148}
149 149
150extern void __tick_nohz_full_check(void);
151extern void tick_nohz_full_kick(void); 150extern void tick_nohz_full_kick(void);
152extern void tick_nohz_full_kick_cpu(int cpu); 151extern void tick_nohz_full_kick_cpu(int cpu);
153extern void tick_nohz_full_kick_all(void); 152extern void tick_nohz_full_kick_all(void);
@@ -156,7 +155,6 @@ extern void __tick_nohz_task_switch(struct task_struct *tsk);
156static inline bool tick_nohz_full_enabled(void) { return false; } 155static inline bool tick_nohz_full_enabled(void) { return false; }
157static inline bool tick_nohz_full_cpu(int cpu) { return false; } 156static inline bool tick_nohz_full_cpu(int cpu) { return false; }
158static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { } 157static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
159static inline void __tick_nohz_full_check(void) { }
160static inline void tick_nohz_full_kick_cpu(int cpu) { } 158static inline void tick_nohz_full_kick_cpu(int cpu) { }
161static inline void tick_nohz_full_kick(void) { } 159static inline void tick_nohz_full_kick(void) { }
162static inline void tick_nohz_full_kick_all(void) { } 160static inline void tick_nohz_full_kick_all(void) { }
@@ -190,12 +188,6 @@ static inline void housekeeping_affine(struct task_struct *t)
190#endif 188#endif
191} 189}
192 190
193static inline void tick_nohz_full_check(void)
194{
195 if (tick_nohz_full_enabled())
196 __tick_nohz_full_check();
197}
198
199static inline void tick_nohz_task_switch(struct task_struct *tsk) 191static inline void tick_nohz_task_switch(struct task_struct *tsk)
200{ 192{
201 if (tick_nohz_full_enabled()) 193 if (tick_nohz_full_enabled())
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index d6c8eff6e7b4..a06cd4af0ff1 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -197,25 +197,9 @@ static bool can_stop_full_tick(void)
197 return true; 197 return true;
198} 198}
199 199
200static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now);
201
202/*
203 * Re-evaluate the need for the tick on the current CPU
204 * and restart it if necessary.
205 */
206void __tick_nohz_full_check(void)
207{
208 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
209
210 if (tick_nohz_full_cpu(smp_processor_id())) {
211 if (ts->tick_stopped && !can_stop_full_tick())
212 tick_nohz_restart_sched_tick(ts, ktime_get());
213 }
214}
215
216static void nohz_full_kick_work_func(struct irq_work *work) 200static void nohz_full_kick_work_func(struct irq_work *work)
217{ 201{
218 __tick_nohz_full_check(); 202 /* Empty, the tick restart happens on tick_nohz_irq_exit() */
219} 203}
220 204
221static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { 205static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
@@ -250,7 +234,7 @@ void tick_nohz_full_kick_cpu(int cpu)
250 234
251static void nohz_full_kick_ipi(void *info) 235static void nohz_full_kick_ipi(void *info)
252{ 236{
253 __tick_nohz_full_check(); 237 /* Empty, the tick restart happens on tick_nohz_irq_exit() */
254} 238}
255 239
256/* 240/*
@@ -703,7 +687,9 @@ out:
703 return tick; 687 return tick;
704} 688}
705 689
706static void tick_nohz_full_stop_tick(struct tick_sched *ts) 690static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now);
691
692static void tick_nohz_full_update_tick(struct tick_sched *ts)
707{ 693{
708#ifdef CONFIG_NO_HZ_FULL 694#ifdef CONFIG_NO_HZ_FULL
709 int cpu = smp_processor_id(); 695 int cpu = smp_processor_id();
@@ -714,10 +700,10 @@ static void tick_nohz_full_stop_tick(struct tick_sched *ts)
714 if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE) 700 if (!ts->tick_stopped && ts->nohz_mode == NOHZ_MODE_INACTIVE)
715 return; 701 return;
716 702
717 if (!can_stop_full_tick()) 703 if (can_stop_full_tick())
718 return; 704 tick_nohz_stop_sched_tick(ts, ktime_get(), cpu);
719 705 else if (ts->tick_stopped)
720 tick_nohz_stop_sched_tick(ts, ktime_get(), cpu); 706 tick_nohz_restart_sched_tick(ts, ktime_get());
721#endif 707#endif
722} 708}
723 709
@@ -847,7 +833,7 @@ void tick_nohz_irq_exit(void)
847 if (ts->inidle) 833 if (ts->inidle)
848 __tick_nohz_idle_enter(ts); 834 __tick_nohz_idle_enter(ts);
849 else 835 else
850 tick_nohz_full_stop_tick(ts); 836 tick_nohz_full_update_tick(ts);
851} 837}
852 838
853/** 839/**