aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <frederic@kernel.org>2018-02-20 23:17:28 -0500
committerIngo Molnar <mingo@kernel.org>2018-02-21 03:49:09 -0500
commitdcdedb24159be3487e3dbbe1faa79ae7d00c92ac (patch)
tree166945628379cae5ed1e47eef9b5ae1333de79d5
parentd84b31313ef8a8de55a2cbfb72f76f36d8c927fb (diff)
sched/nohz: Remove the 1 Hz tick code
Now that the 1Hz tick is offloaded to workqueues, we can safely remove the residual code that used to handle it locally. Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Chris Metcalf <cmetcalf@mellanox.com> Cc: Christoph Lameter <cl@linux.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Luiz Capitulino <lcapitulino@redhat.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Rik van Riel <riel@redhat.com> Cc: Wanpeng Li <kernellwp@gmail.com> Link: http://lkml.kernel.org/r/1519186649-3242-7-git-send-email-frederic@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/linux/sched/nohz.h4
-rw-r--r--kernel/sched/core.c29
-rw-r--r--kernel/sched/idle_task.c1
-rw-r--r--kernel/sched/sched.h11
-rw-r--r--kernel/time/tick-sched.c6
5 files changed, 1 insertions, 50 deletions
diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
index 3d3a97d9399d..094217273ff9 100644
--- a/include/linux/sched/nohz.h
+++ b/include/linux/sched/nohz.h
@@ -37,8 +37,4 @@ extern void wake_up_nohz_cpu(int cpu);
37static inline void wake_up_nohz_cpu(int cpu) { } 37static inline void wake_up_nohz_cpu(int cpu) { }
38#endif 38#endif
39 39
40#ifdef CONFIG_NO_HZ_FULL
41extern u64 scheduler_tick_max_deferment(void);
42#endif
43
44#endif /* _LINUX_SCHED_NOHZ_H */ 40#endif /* _LINUX_SCHED_NOHZ_H */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5dfef458ab52..8fff4f16c510 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3096,35 +3096,9 @@ void scheduler_tick(void)
3096 rq->idle_balance = idle_cpu(cpu); 3096 rq->idle_balance = idle_cpu(cpu);
3097 trigger_load_balance(rq); 3097 trigger_load_balance(rq);
3098#endif 3098#endif
3099 rq_last_tick_reset(rq);
3100} 3099}
3101 3100
3102#ifdef CONFIG_NO_HZ_FULL 3101#ifdef CONFIG_NO_HZ_FULL
3103/**
3104 * scheduler_tick_max_deferment
3105 *
3106 * Keep at least one tick per second when a single
3107 * active task is running because the scheduler doesn't
3108 * yet completely support full dynticks environment.
3109 *
3110 * This makes sure that uptime, CFS vruntime, load
3111 * balancing, etc... continue to move forward, even
3112 * with a very low granularity.
3113 *
3114 * Return: Maximum deferment in nanoseconds.
3115 */
3116u64 scheduler_tick_max_deferment(void)
3117{
3118 struct rq *rq = this_rq();
3119 unsigned long next, now = READ_ONCE(jiffies);
3120
3121 next = rq->last_sched_tick + HZ;
3122
3123 if (time_before_eq(next, now))
3124 return 0;
3125
3126 return jiffies_to_nsecs(next - now);
3127}
3128 3102
3129struct tick_work { 3103struct tick_work {
3130 int cpu; 3104 int cpu;
@@ -6116,9 +6090,6 @@ void __init sched_init(void)
6116 rq->last_load_update_tick = jiffies; 6090 rq->last_load_update_tick = jiffies;
6117 rq->nohz_flags = 0; 6091 rq->nohz_flags = 0;
6118#endif 6092#endif
6119#ifdef CONFIG_NO_HZ_FULL
6120 rq->last_sched_tick = 0;
6121#endif
6122#endif /* CONFIG_SMP */ 6093#endif /* CONFIG_SMP */
6123 hrtick_rq_init(rq); 6094 hrtick_rq_init(rq);
6124 atomic_set(&rq->nr_iowait, 0); 6095 atomic_set(&rq->nr_iowait, 0);
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index e1b46e08c8e1..48b8a83f5185 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -48,7 +48,6 @@ dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
48 48
49static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) 49static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
50{ 50{
51 rq_last_tick_reset(rq);
52} 51}
53 52
54/* 53/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c1c7c788da1c..dc6c8b5a24ad 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -727,9 +727,7 @@ struct rq {
727#endif /* CONFIG_SMP */ 727#endif /* CONFIG_SMP */
728 unsigned long nohz_flags; 728 unsigned long nohz_flags;
729#endif /* CONFIG_NO_HZ_COMMON */ 729#endif /* CONFIG_NO_HZ_COMMON */
730#ifdef CONFIG_NO_HZ_FULL 730
731 unsigned long last_sched_tick;
732#endif
733 /* capture load from *all* tasks on this cpu: */ 731 /* capture load from *all* tasks on this cpu: */
734 struct load_weight load; 732 struct load_weight load;
735 unsigned long nr_load_updates; 733 unsigned long nr_load_updates;
@@ -1626,13 +1624,6 @@ static inline void sub_nr_running(struct rq *rq, unsigned count)
1626 sched_update_tick_dependency(rq); 1624 sched_update_tick_dependency(rq);
1627} 1625}
1628 1626
1629static inline void rq_last_tick_reset(struct rq *rq)
1630{
1631#ifdef CONFIG_NO_HZ_FULL
1632 rq->last_sched_tick = jiffies;
1633#endif
1634}
1635
1636extern void update_rq_clock(struct rq *rq); 1627extern void update_rq_clock(struct rq *rq);
1637 1628
1638extern void activate_task(struct rq *rq, struct task_struct *p, int flags); 1629extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index d479b21a848b..f2fa2e940fe5 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -748,12 +748,6 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
748 delta = KTIME_MAX; 748 delta = KTIME_MAX;
749 } 749 }
750 750
751#ifdef CONFIG_NO_HZ_FULL
752 /* Limit the tick delta to the maximum scheduler deferment */
753 if (!ts->inidle)
754 delta = min(delta, scheduler_tick_max_deferment());
755#endif
756
757 /* Calculate the next expiry time */ 751 /* Calculate the next expiry time */
758 if (delta < (KTIME_MAX - basemono)) 752 if (delta < (KTIME_MAX - basemono))
759 expires = basemono + delta; 753 expires = basemono + delta;