diff options
author | Paul Turner <pjt@google.com> | 2010-11-15 18:47:09 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-11-18 07:27:50 -0500 |
commit | d6b5591829bd348a5fbe1c428d28dea00621cdba (patch) | |
tree | d8f8131c4d8033685b2e77bffee6cdcf0a975d38 /kernel | |
parent | 3b3d190ec3683d568fd2ebaead5e1ec7f97b6e37 (diff) |
sched: Allow update_cfs_load() to update global load
Refactor the global load updates from update_shares_cpu() so that
update_cfs_load() can update global load when it is more than ~10%
out of sync.
The new global_load parameter allows us to force an update, regardless of
the error factor so that we can synchronize w/ update_shares().
Signed-off-by: Paul Turner <pjt@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20101115234938.377473595@google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched_fair.c | 44 |
1 files changed, 29 insertions, 15 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index e7e2f08e6d01..390ce30ff2d0 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -539,7 +539,7 @@ static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
539 | return calc_delta_fair(sched_slice(cfs_rq, se), se); | 539 | return calc_delta_fair(sched_slice(cfs_rq, se), se); |
540 | } | 540 | } |
541 | 541 | ||
542 | static void update_cfs_load(struct cfs_rq *cfs_rq); | 542 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update); |
543 | static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta); | 543 | static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta); |
544 | 544 | ||
545 | /* | 545 | /* |
@@ -565,7 +565,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | |||
565 | #ifdef CONFIG_FAIR_GROUP_SCHED | 565 | #ifdef CONFIG_FAIR_GROUP_SCHED |
566 | cfs_rq->load_unacc_exec_time += delta_exec; | 566 | cfs_rq->load_unacc_exec_time += delta_exec; |
567 | if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) { | 567 | if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) { |
568 | update_cfs_load(cfs_rq); | 568 | update_cfs_load(cfs_rq, 0); |
569 | update_cfs_shares(cfs_rq, 0); | 569 | update_cfs_shares(cfs_rq, 0); |
570 | } | 570 | } |
571 | #endif | 571 | #endif |
@@ -704,7 +704,22 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
704 | } | 704 | } |
705 | 705 | ||
706 | #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED | 706 | #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED |
707 | static void update_cfs_load(struct cfs_rq *cfs_rq) | 707 | static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq, |
708 | int global_update) | ||
709 | { | ||
710 | struct task_group *tg = cfs_rq->tg; | ||
711 | long load_avg; | ||
712 | |||
713 | load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1); | ||
714 | load_avg -= cfs_rq->load_contribution; | ||
715 | |||
716 | if (global_update || abs(load_avg) > cfs_rq->load_contribution / 8) { | ||
717 | atomic_add(load_avg, &tg->load_weight); | ||
718 | cfs_rq->load_contribution += load_avg; | ||
719 | } | ||
720 | } | ||
721 | |||
722 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) | ||
708 | { | 723 | { |
709 | u64 period = sysctl_sched_shares_window; | 724 | u64 period = sysctl_sched_shares_window; |
710 | u64 now, delta; | 725 | u64 now, delta; |
@@ -731,6 +746,11 @@ static void update_cfs_load(struct cfs_rq *cfs_rq) | |||
731 | cfs_rq->load_avg += delta * load; | 746 | cfs_rq->load_avg += delta * load; |
732 | } | 747 | } |
733 | 748 | ||
749 | /* consider updating load contribution on each fold or truncate */ | ||
750 | if (global_update || cfs_rq->load_period > period | ||
751 | || !cfs_rq->load_period) | ||
752 | update_cfs_rq_load_contribution(cfs_rq, global_update); | ||
753 | |||
734 | while (cfs_rq->load_period > period) { | 754 | while (cfs_rq->load_period > period) { |
735 | /* | 755 | /* |
736 | * Inline assembly required to prevent the compiler | 756 | * Inline assembly required to prevent the compiler |
@@ -790,7 +810,7 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta) | |||
790 | reweight_entity(cfs_rq_of(se), se, shares); | 810 | reweight_entity(cfs_rq_of(se), se, shares); |
791 | } | 811 | } |
792 | #else /* CONFIG_FAIR_GROUP_SCHED */ | 812 | #else /* CONFIG_FAIR_GROUP_SCHED */ |
793 | static inline void update_cfs_load(struct cfs_rq *cfs_rq) | 813 | static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) |
794 | { | 814 | { |
795 | } | 815 | } |
796 | 816 | ||
@@ -920,7 +940,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) | |||
920 | * Update run-time statistics of the 'current'. | 940 | * Update run-time statistics of the 'current'. |
921 | */ | 941 | */ |
922 | update_curr(cfs_rq); | 942 | update_curr(cfs_rq); |
923 | update_cfs_load(cfs_rq); | 943 | update_cfs_load(cfs_rq, 0); |
924 | update_cfs_shares(cfs_rq, se->load.weight); | 944 | update_cfs_shares(cfs_rq, se->load.weight); |
925 | account_entity_enqueue(cfs_rq, se); | 945 | account_entity_enqueue(cfs_rq, se); |
926 | 946 | ||
@@ -981,7 +1001,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) | |||
981 | if (se != cfs_rq->curr) | 1001 | if (se != cfs_rq->curr) |
982 | __dequeue_entity(cfs_rq, se); | 1002 | __dequeue_entity(cfs_rq, se); |
983 | se->on_rq = 0; | 1003 | se->on_rq = 0; |
984 | update_cfs_load(cfs_rq); | 1004 | update_cfs_load(cfs_rq, 0); |
985 | account_entity_dequeue(cfs_rq, se); | 1005 | account_entity_dequeue(cfs_rq, se); |
986 | update_min_vruntime(cfs_rq); | 1006 | update_min_vruntime(cfs_rq); |
987 | update_cfs_shares(cfs_rq, 0); | 1007 | update_cfs_shares(cfs_rq, 0); |
@@ -1216,7 +1236,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) | |||
1216 | for_each_sched_entity(se) { | 1236 | for_each_sched_entity(se) { |
1217 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 1237 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
1218 | 1238 | ||
1219 | update_cfs_load(cfs_rq); | 1239 | update_cfs_load(cfs_rq, 0); |
1220 | update_cfs_shares(cfs_rq, 0); | 1240 | update_cfs_shares(cfs_rq, 0); |
1221 | } | 1241 | } |
1222 | 1242 | ||
@@ -1246,7 +1266,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) | |||
1246 | for_each_sched_entity(se) { | 1266 | for_each_sched_entity(se) { |
1247 | struct cfs_rq *cfs_rq = cfs_rq_of(se); | 1267 | struct cfs_rq *cfs_rq = cfs_rq_of(se); |
1248 | 1268 | ||
1249 | update_cfs_load(cfs_rq); | 1269 | update_cfs_load(cfs_rq, 0); |
1250 | update_cfs_shares(cfs_rq, 0); | 1270 | update_cfs_shares(cfs_rq, 0); |
1251 | } | 1271 | } |
1252 | 1272 | ||
@@ -2052,7 +2072,6 @@ static int update_shares_cpu(struct task_group *tg, int cpu) | |||
2052 | struct cfs_rq *cfs_rq; | 2072 | struct cfs_rq *cfs_rq; |
2053 | unsigned long flags; | 2073 | unsigned long flags; |
2054 | struct rq *rq; | 2074 | struct rq *rq; |
2055 | long load_avg; | ||
2056 | 2075 | ||
2057 | if (!tg->se[cpu]) | 2076 | if (!tg->se[cpu]) |
2058 | return 0; | 2077 | return 0; |
@@ -2063,12 +2082,7 @@ static int update_shares_cpu(struct task_group *tg, int cpu) | |||
2063 | raw_spin_lock_irqsave(&rq->lock, flags); | 2082 | raw_spin_lock_irqsave(&rq->lock, flags); |
2064 | 2083 | ||
2065 | update_rq_clock(rq); | 2084 | update_rq_clock(rq); |
2066 | update_cfs_load(cfs_rq); | 2085 | update_cfs_load(cfs_rq, 1); |
2067 | |||
2068 | load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1); | ||
2069 | load_avg -= cfs_rq->load_contribution; | ||
2070 | atomic_add(load_avg, &tg->load_weight); | ||
2071 | cfs_rq->load_contribution += load_avg; | ||
2072 | 2086 | ||
2073 | /* | 2087 | /* |
2074 | * We need to update shares after updating tg->load_weight in | 2088 | * We need to update shares after updating tg->load_weight in |