aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPaul Turner <pjt@google.com>2010-11-15 18:47:08 -0500
committerIngo Molnar <mingo@elte.hu>2010-11-18 07:27:49 -0500
commit3b3d190ec3683d568fd2ebaead5e1ec7f97b6e37 (patch)
tree6839bc473200dcb69c5de998921684ac569ce18b /kernel/sched_fair.c
parentc66eaf619c0c7937e9ded160ae83b5a7a6b19b56 (diff)
sched: Implement demand based update_cfs_load()
When the system is busy, dilation of rq->next_balance makes lb->update_shares() insufficiently frequent for threads which don't sleep (no dequeue/enqueue updates). Adjust for this by making demand based updates based on the accumulation of execution time sufficient to wrap our averaging window. Signed-off-by: Paul Turner <pjt@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20101115234938.291159744@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c12
1 files changed, 12 insertions, 0 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 33f941dcf88c..e7e2f08e6d01 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -539,6 +539,9 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
539 return calc_delta_fair(sched_slice(cfs_rq, se), se); 539 return calc_delta_fair(sched_slice(cfs_rq, se), se);
540} 540}
541 541
542static void update_cfs_load(struct cfs_rq *cfs_rq);
543static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta);
544
542/* 545/*
543 * Update the current task's runtime statistics. Skip current tasks that 546 * Update the current task's runtime statistics. Skip current tasks that
544 * are not in our scheduling class. 547 * are not in our scheduling class.
@@ -558,6 +561,14 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
558 561
559 curr->vruntime += delta_exec_weighted; 562 curr->vruntime += delta_exec_weighted;
560 update_min_vruntime(cfs_rq); 563 update_min_vruntime(cfs_rq);
564
565#ifdef CONFIG_FAIR_GROUP_SCHED
566 cfs_rq->load_unacc_exec_time += delta_exec;
567 if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
568 update_cfs_load(cfs_rq);
569 update_cfs_shares(cfs_rq, 0);
570 }
571#endif
561} 572}
562 573
563static void update_curr(struct cfs_rq *cfs_rq) 574static void update_curr(struct cfs_rq *cfs_rq)
@@ -713,6 +724,7 @@ static void update_cfs_load(struct cfs_rq *cfs_rq)
713 } 724 }
714 725
715 cfs_rq->load_stamp = now; 726 cfs_rq->load_stamp = now;
727 cfs_rq->load_unacc_exec_time = 0;
716 cfs_rq->load_period += delta; 728 cfs_rq->load_period += delta;
717 if (load) { 729 if (load) {
718 cfs_rq->load_last = now; 730 cfs_rq->load_last = now;