aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPaul Turner <pjt@google.com>2010-11-15 18:47:03 -0500
committerIngo Molnar <mingo@elte.hu>2010-11-18 07:27:47 -0500
commitf0d7442a5924a802b66eef79b3708f77297bfb35 (patch)
treeedffcb20dca3dfe7315b128dc8948977c7c27fa8 /kernel/sched_fair.c
parent9e3081ca61147b29f52fddb4f7c6b6b82ea5eb7a (diff)
sched: Fix load corruption from update_cfs_shares()
As part of enqueue_entity both a new entity weight and its contribution to the queuing cfs_rq / rq are updated. Since update_cfs_shares will only update the queueing weights when the entity is on_rq (which in this case it is not yet), there's a dependency loop here: update_cfs_shares needs account_entity_enqueue to update cfs_rq->load.weight account_entity_enqueue needs the updated weight for the queuing cfs_rq load[*] Fix this and avoid spurious dequeue/enqueues by issuing update_cfs_shares as if we had accounted the enqueue already. This was also resulting in rq->load corruption previously. [*]: this dependency also exists when using the group cfs_rq w/ update_cfs_shares as the weight of the enqueued entity changes without the load being updated. Signed-off-by: Paul Turner <pjt@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20101115234937.844900206@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 46ff6587dc16..d52b97a04e7a 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -718,7 +718,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
718 account_entity_enqueue(cfs_rq, se); 718 account_entity_enqueue(cfs_rq, se);
719} 719}
720 720
721static void update_cfs_shares(struct cfs_rq *cfs_rq) 721static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
722{ 722{
723 struct task_group *tg; 723 struct task_group *tg;
724 struct sched_entity *se; 724 struct sched_entity *se;
@@ -732,7 +732,7 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq)
732 if (!se) 732 if (!se)
733 return; 733 return;
734 734
735 load = cfs_rq->load.weight; 735 load = cfs_rq->load.weight + weight_delta;
736 736
737 load_weight = atomic_read(&tg->load_weight); 737 load_weight = atomic_read(&tg->load_weight);
738 load_weight -= cfs_rq->load_contribution; 738 load_weight -= cfs_rq->load_contribution;
@@ -754,7 +754,7 @@ static inline void update_cfs_load(struct cfs_rq *cfs_rq, int lb)
754{ 754{
755} 755}
756 756
757static inline void update_cfs_shares(struct cfs_rq *cfs_rq) 757static inline void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
758{ 758{
759} 759}
760#endif /* CONFIG_FAIR_GROUP_SCHED */ 760#endif /* CONFIG_FAIR_GROUP_SCHED */
@@ -881,8 +881,8 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
881 */ 881 */
882 update_curr(cfs_rq); 882 update_curr(cfs_rq);
883 update_cfs_load(cfs_rq, 0); 883 update_cfs_load(cfs_rq, 0);
884 update_cfs_shares(cfs_rq, se->load.weight);
884 account_entity_enqueue(cfs_rq, se); 885 account_entity_enqueue(cfs_rq, se);
885 update_cfs_shares(cfs_rq);
886 886
887 if (flags & ENQUEUE_WAKEUP) { 887 if (flags & ENQUEUE_WAKEUP) {
888 place_entity(cfs_rq, se, 0); 888 place_entity(cfs_rq, se, 0);
@@ -944,7 +944,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
944 update_cfs_load(cfs_rq, 0); 944 update_cfs_load(cfs_rq, 0);
945 account_entity_dequeue(cfs_rq, se); 945 account_entity_dequeue(cfs_rq, se);
946 update_min_vruntime(cfs_rq); 946 update_min_vruntime(cfs_rq);
947 update_cfs_shares(cfs_rq); 947 update_cfs_shares(cfs_rq, 0);
948 948
949 /* 949 /*
950 * Normalize the entity after updating the min_vruntime because the 950 * Normalize the entity after updating the min_vruntime because the
@@ -1177,7 +1177,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
1177 struct cfs_rq *cfs_rq = cfs_rq_of(se); 1177 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1178 1178
1179 update_cfs_load(cfs_rq, 0); 1179 update_cfs_load(cfs_rq, 0);
1180 update_cfs_shares(cfs_rq); 1180 update_cfs_shares(cfs_rq, 0);
1181 } 1181 }
1182 1182
1183 hrtick_update(rq); 1183 hrtick_update(rq);
@@ -1207,7 +1207,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
1207 struct cfs_rq *cfs_rq = cfs_rq_of(se); 1207 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1208 1208
1209 update_cfs_load(cfs_rq, 0); 1209 update_cfs_load(cfs_rq, 0);
1210 update_cfs_shares(cfs_rq); 1210 update_cfs_shares(cfs_rq, 0);
1211 } 1211 }
1212 1212
1213 hrtick_update(rq); 1213 hrtick_update(rq);
@@ -2034,7 +2034,7 @@ static int tg_shares_up(struct task_group *tg, int cpu)
2034 * We need to update shares after updating tg->load_weight in 2034 * We need to update shares after updating tg->load_weight in
2035 * order to adjust the weight of groups with long running tasks. 2035 * order to adjust the weight of groups with long running tasks.
2036 */ 2036 */
2037 update_cfs_shares(cfs_rq); 2037 update_cfs_shares(cfs_rq, 0);
2038 2038
2039 raw_spin_unlock_irqrestore(&rq->lock, flags); 2039 raw_spin_unlock_irqrestore(&rq->lock, flags);
2040 2040