summaryrefslogtreecommitdiffstats
path: root/kernel/sched/proc.c
diff options
context:
space:
mode:
authorAlex Shi <alex.shi@intel.com>2013-06-19 22:18:50 -0400
committerIngo Molnar <mingo@kernel.org>2013-06-27 04:07:35 -0400
commitb92486cbf2aa230d00f160664858495c81d2b37b (patch)
tree293470662cb92417b45bb68bab46aa36f1ba27e0 /kernel/sched/proc.c
parent83dfd5235ebd66c284b97befe6eabff7132333e6 (diff)
sched: Compute runnable load avg in cpu_load and cpu_avg_load_per_task
They are the base values in load balance, update them with rq runnable load average, then the load balance will consider runnable load avg naturally. We also try to include the blocked_load_avg as cpu load in balancing, but that cause kbuild performance drop 6% on every Intel machine, and aim7/oltp drop on some of 4 CPU sockets machines. Or only add blocked_load_avg into get_rq_runable_load, hackbench still drop a little on NHM EX. Signed-off-by: Alex Shi <alex.shi@intel.com> Reviewed-by: Gu Zheng <guz.fnst@cn.fujitsu.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1371694737-29336-7-git-send-email-alex.shi@intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/proc.c')
-rw-r--r--kernel/sched/proc.c17
1 files changed, 15 insertions, 2 deletions
diff --git a/kernel/sched/proc.c b/kernel/sched/proc.c
index bb3a6a0b8623..ce5cd4892e43 100644
--- a/kernel/sched/proc.c
+++ b/kernel/sched/proc.c
@@ -501,6 +501,18 @@ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
501 sched_avg_update(this_rq); 501 sched_avg_update(this_rq);
502} 502}
503 503
504#ifdef CONFIG_SMP
505unsigned long get_rq_runnable_load(struct rq *rq)
506{
507 return rq->cfs.runnable_load_avg;
508}
509#else
510unsigned long get_rq_runnable_load(struct rq *rq)
511{
512 return rq->load.weight;
513}
514#endif
515
504#ifdef CONFIG_NO_HZ_COMMON 516#ifdef CONFIG_NO_HZ_COMMON
505/* 517/*
506 * There is no sane way to deal with nohz on smp when using jiffies because the 518 * There is no sane way to deal with nohz on smp when using jiffies because the
@@ -522,7 +534,7 @@ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
522void update_idle_cpu_load(struct rq *this_rq) 534void update_idle_cpu_load(struct rq *this_rq)
523{ 535{
524 unsigned long curr_jiffies = ACCESS_ONCE(jiffies); 536 unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
525 unsigned long load = this_rq->load.weight; 537 unsigned long load = get_rq_runnable_load(this_rq);
526 unsigned long pending_updates; 538 unsigned long pending_updates;
527 539
528 /* 540 /*
@@ -568,11 +580,12 @@ void update_cpu_load_nohz(void)
568 */ 580 */
569void update_cpu_load_active(struct rq *this_rq) 581void update_cpu_load_active(struct rq *this_rq)
570{ 582{
583 unsigned long load = get_rq_runnable_load(this_rq);
571 /* 584 /*
572 * See the mess around update_idle_cpu_load() / update_cpu_load_nohz(). 585 * See the mess around update_idle_cpu_load() / update_cpu_load_nohz().
573 */ 586 */
574 this_rq->last_load_update_tick = jiffies; 587 this_rq->last_load_update_tick = jiffies;
575 __update_cpu_load(this_rq, this_rq->load.weight, 1); 588 __update_cpu_load(this_rq, load, 1);
576 589
577 calc_load_account_active(this_rq); 590 calc_load_account_active(this_rq);
578} 591}