aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2016-04-19 11:36:51 -0400
committerIngo Molnar <mingo@kernel.org>2016-04-23 08:20:42 -0400
commit9fd81dd5ce0b12341c9f83346f8d32ac68bd3841 (patch)
tree6b189e45a9048f4c328f8f4ad80cd42ed899f1e6 /kernel/sched
parent1f41906a6fda1114debd3898668bd7ab6470ee41 (diff)
sched/fair: Optimize !CONFIG_NO_HZ_COMMON CPU load updates
Some code in CPU load update only concern NO_HZ configs but it is built on all configurations. When NO_HZ isn't built, that code is harmless but just happens to take some useless ressources in CPU and memory: 1) one useless field in struct rq 2) jiffies record on every tick that is never used (cpu_load_update_periodic) 3) decay_load_missed is called two times on every tick to eventually return immediately with no action taken. And that function is dead code. For pure optimization purposes, lets conditionally build the NO_HZ related code. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Byungchul Park <byungchul.park@lge.com> Cc: Chris Metcalf <cmetcalf@ezchip.com> Cc: Christoph Lameter <cl@linux.com> Cc: Luiz Capitulino <lcapitulino@redhat.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul E . McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1461080211-16271-1-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c5
-rw-r--r--kernel/sched/fair.c9
-rw-r--r--kernel/sched/sched.h6
3 files changed, 13 insertions, 7 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c98a2688f390..71dffbb27ce6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7381,8 +7381,6 @@ void __init sched_init(void)
7381 for (j = 0; j < CPU_LOAD_IDX_MAX; j++) 7381 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
7382 rq->cpu_load[j] = 0; 7382 rq->cpu_load[j] = 0;
7383 7383
7384 rq->last_load_update_tick = jiffies;
7385
7386#ifdef CONFIG_SMP 7384#ifdef CONFIG_SMP
7387 rq->sd = NULL; 7385 rq->sd = NULL;
7388 rq->rd = NULL; 7386 rq->rd = NULL;
@@ -7401,12 +7399,13 @@ void __init sched_init(void)
7401 7399
7402 rq_attach_root(rq, &def_root_domain); 7400 rq_attach_root(rq, &def_root_domain);
7403#ifdef CONFIG_NO_HZ_COMMON 7401#ifdef CONFIG_NO_HZ_COMMON
7402 rq->last_load_update_tick = jiffies;
7404 rq->nohz_flags = 0; 7403 rq->nohz_flags = 0;
7405#endif 7404#endif
7406#ifdef CONFIG_NO_HZ_FULL 7405#ifdef CONFIG_NO_HZ_FULL
7407 rq->last_sched_tick = 0; 7406 rq->last_sched_tick = 0;
7408#endif 7407#endif
7409#endif 7408#endif /* CONFIG_SMP */
7410 init_rq_hrtick(rq); 7409 init_rq_hrtick(rq);
7411 atomic_set(&rq->nr_iowait, 0); 7410 atomic_set(&rq->nr_iowait, 0);
7412 } 7411 }
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b70367a3e1ef..b8a33abce650 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4491,7 +4491,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
4491} 4491}
4492 4492
4493#ifdef CONFIG_SMP 4493#ifdef CONFIG_SMP
4494 4494#ifdef CONFIG_NO_HZ_COMMON
4495/* 4495/*
4496 * per rq 'load' arrray crap; XXX kill this. 4496 * per rq 'load' arrray crap; XXX kill this.
4497 */ 4497 */
@@ -4557,6 +4557,7 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
4557 } 4557 }
4558 return load; 4558 return load;
4559} 4559}
4560#endif /* CONFIG_NO_HZ_COMMON */
4560 4561
4561/** 4562/**
4562 * __cpu_load_update - update the rq->cpu_load[] statistics 4563 * __cpu_load_update - update the rq->cpu_load[] statistics
@@ -4596,7 +4597,7 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
4596static void cpu_load_update(struct rq *this_rq, unsigned long this_load, 4597static void cpu_load_update(struct rq *this_rq, unsigned long this_load,
4597 unsigned long pending_updates) 4598 unsigned long pending_updates)
4598{ 4599{
4599 unsigned long tickless_load = this_rq->cpu_load[0]; 4600 unsigned long __maybe_unused tickless_load = this_rq->cpu_load[0];
4600 int i, scale; 4601 int i, scale;
4601 4602
4602 this_rq->nr_load_updates++; 4603 this_rq->nr_load_updates++;
@@ -4609,6 +4610,7 @@ static void cpu_load_update(struct rq *this_rq, unsigned long this_load,
4609 /* scale is effectively 1 << i now, and >> i divides by scale */ 4610 /* scale is effectively 1 << i now, and >> i divides by scale */
4610 4611
4611 old_load = this_rq->cpu_load[i]; 4612 old_load = this_rq->cpu_load[i];
4613#ifdef CONFIG_NO_HZ_COMMON
4612 old_load = decay_load_missed(old_load, pending_updates - 1, i); 4614 old_load = decay_load_missed(old_load, pending_updates - 1, i);
4613 if (tickless_load) { 4615 if (tickless_load) {
4614 old_load -= decay_load_missed(tickless_load, pending_updates - 1, i); 4616 old_load -= decay_load_missed(tickless_load, pending_updates - 1, i);
@@ -4619,6 +4621,7 @@ static void cpu_load_update(struct rq *this_rq, unsigned long this_load,
4619 */ 4621 */
4620 old_load += tickless_load; 4622 old_load += tickless_load;
4621 } 4623 }
4624#endif
4622 new_load = this_load; 4625 new_load = this_load;
4623 /* 4626 /*
4624 * Round up the averaging division if load is increasing. This 4627 * Round up the averaging division if load is increasing. This
@@ -4731,8 +4734,10 @@ static inline void cpu_load_update_nohz(struct rq *this_rq,
4731 4734
4732static void cpu_load_update_periodic(struct rq *this_rq, unsigned long load) 4735static void cpu_load_update_periodic(struct rq *this_rq, unsigned long load)
4733{ 4736{
4737#ifdef CONFIG_NO_HZ_COMMON
4734 /* See the mess around cpu_load_update_nohz(). */ 4738 /* See the mess around cpu_load_update_nohz(). */
4735 this_rq->last_load_update_tick = READ_ONCE(jiffies); 4739 this_rq->last_load_update_tick = READ_ONCE(jiffies);
4740#endif
4736 cpu_load_update(this_rq, load, 1); 4741 cpu_load_update(this_rq, load, 1);
4737} 4742}
4738 4743
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 32d9e22cfacf..69da6fcaa0e8 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -585,11 +585,13 @@ struct rq {
585#endif 585#endif
586 #define CPU_LOAD_IDX_MAX 5 586 #define CPU_LOAD_IDX_MAX 5
587 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 587 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
588 unsigned long last_load_update_tick;
589#ifdef CONFIG_NO_HZ_COMMON 588#ifdef CONFIG_NO_HZ_COMMON
589#ifdef CONFIG_SMP
590 unsigned long last_load_update_tick;
591#endif /* CONFIG_SMP */
590 u64 nohz_stamp; 592 u64 nohz_stamp;
591 unsigned long nohz_flags; 593 unsigned long nohz_flags;
592#endif 594#endif /* CONFIG_NO_HZ_COMMON */
593#ifdef CONFIG_NO_HZ_FULL 595#ifdef CONFIG_NO_HZ_FULL
594 unsigned long last_sched_tick; 596 unsigned long last_sched_tick;
595#endif 597#endif