aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPaul Turner <pjt@google.com>2010-11-15 18:47:04 -0500
committerIngo Molnar <mingo@elte.hu>2010-11-18 07:27:48 -0500
commite33078baa4d30ad1d0e46d1f62b9e5a63a3e6ee3 (patch)
tree13d1a075c9cd41ce666cf21b0b209bd3d121f566 /kernel/sched_fair.c
parentf0d7442a5924a802b66eef79b3708f77297bfb35 (diff)
sched: Fix update_cfs_load() synchronization
Using cfs_rq->nr_running is not sufficient to synchronize update_cfs_load with the put path since nr_running accounting occurs at deactivation. It's also not safe to make the removal decision based on load_avg as this fails with both high periods and low shares. Resolve this by clipping history after 4 periods without activity. Note: the above will always occur from update_shares() since in the last-task-sleep-case that task will still be cfs_rq->curr when update_cfs_load is called. Signed-off-by: Paul Turner <pjt@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20101115234937.933428187@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c33
1 files changed, 21 insertions, 12 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index d52b97a04e7a..a543a5b202a4 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -674,10 +674,11 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
674} 674}
675 675
676#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED 676#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
677static void update_cfs_load(struct cfs_rq *cfs_rq, int lb) 677static void update_cfs_load(struct cfs_rq *cfs_rq)
678{ 678{
679 u64 period = sched_avg_period(); 679 u64 period = sched_avg_period();
680 u64 now, delta; 680 u64 now, delta;
681 unsigned long load = cfs_rq->load.weight;
681 682
682 if (!cfs_rq) 683 if (!cfs_rq)
683 return; 684 return;
@@ -685,9 +686,19 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int lb)
685 now = rq_of(cfs_rq)->clock; 686 now = rq_of(cfs_rq)->clock;
686 delta = now - cfs_rq->load_stamp; 687 delta = now - cfs_rq->load_stamp;
687 688
689 /* truncate load history at 4 idle periods */
690 if (cfs_rq->load_stamp > cfs_rq->load_last &&
691 now - cfs_rq->load_last > 4 * period) {
692 cfs_rq->load_period = 0;
693 cfs_rq->load_avg = 0;
694 }
695
688 cfs_rq->load_stamp = now; 696 cfs_rq->load_stamp = now;
689 cfs_rq->load_period += delta; 697 cfs_rq->load_period += delta;
690 cfs_rq->load_avg += delta * cfs_rq->load.weight; 698 if (load) {
699 cfs_rq->load_last = now;
700 cfs_rq->load_avg += delta * load;
701 }
691 702
692 while (cfs_rq->load_period > period) { 703 while (cfs_rq->load_period > period) {
693 /* 704 /*
@@ -700,10 +711,8 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int lb)
700 cfs_rq->load_avg /= 2; 711 cfs_rq->load_avg /= 2;
701 } 712 }
702 713
703 if (lb && !cfs_rq->nr_running) { 714 if (!cfs_rq->curr && !cfs_rq->nr_running && !cfs_rq->load_avg)
704 if (cfs_rq->load_avg < (period / 8)) 715 list_del_leaf_cfs_rq(cfs_rq);
705 list_del_leaf_cfs_rq(cfs_rq);
706 }
707} 716}
708 717
709static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, 718static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
@@ -750,7 +759,7 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
750 reweight_entity(cfs_rq_of(se), se, shares); 759 reweight_entity(cfs_rq_of(se), se, shares);
751} 760}
752#else /* CONFIG_FAIR_GROUP_SCHED */ 761#else /* CONFIG_FAIR_GROUP_SCHED */
753static inline void update_cfs_load(struct cfs_rq *cfs_rq, int lb) 762static inline void update_cfs_load(struct cfs_rq *cfs_rq)
754{ 763{
755} 764}
756 765
@@ -880,7 +889,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
880 * Update run-time statistics of the 'current'. 889 * Update run-time statistics of the 'current'.
881 */ 890 */
882 update_curr(cfs_rq); 891 update_curr(cfs_rq);
883 update_cfs_load(cfs_rq, 0); 892 update_cfs_load(cfs_rq);
884 update_cfs_shares(cfs_rq, se->load.weight); 893 update_cfs_shares(cfs_rq, se->load.weight);
885 account_entity_enqueue(cfs_rq, se); 894 account_entity_enqueue(cfs_rq, se);
886 895
@@ -941,7 +950,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
941 if (se != cfs_rq->curr) 950 if (se != cfs_rq->curr)
942 __dequeue_entity(cfs_rq, se); 951 __dequeue_entity(cfs_rq, se);
943 se->on_rq = 0; 952 se->on_rq = 0;
944 update_cfs_load(cfs_rq, 0); 953 update_cfs_load(cfs_rq);
945 account_entity_dequeue(cfs_rq, se); 954 account_entity_dequeue(cfs_rq, se);
946 update_min_vruntime(cfs_rq); 955 update_min_vruntime(cfs_rq);
947 update_cfs_shares(cfs_rq, 0); 956 update_cfs_shares(cfs_rq, 0);
@@ -1176,7 +1185,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
1176 for_each_sched_entity(se) { 1185 for_each_sched_entity(se) {
1177 struct cfs_rq *cfs_rq = cfs_rq_of(se); 1186 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1178 1187
1179 update_cfs_load(cfs_rq, 0); 1188 update_cfs_load(cfs_rq);
1180 update_cfs_shares(cfs_rq, 0); 1189 update_cfs_shares(cfs_rq, 0);
1181 } 1190 }
1182 1191
@@ -1206,7 +1215,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
1206 for_each_sched_entity(se) { 1215 for_each_sched_entity(se) {
1207 struct cfs_rq *cfs_rq = cfs_rq_of(se); 1216 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1208 1217
1209 update_cfs_load(cfs_rq, 0); 1218 update_cfs_load(cfs_rq);
1210 update_cfs_shares(cfs_rq, 0); 1219 update_cfs_shares(cfs_rq, 0);
1211 } 1220 }
1212 1221
@@ -2023,7 +2032,7 @@ static int tg_shares_up(struct task_group *tg, int cpu)
2023 raw_spin_lock_irqsave(&rq->lock, flags); 2032 raw_spin_lock_irqsave(&rq->lock, flags);
2024 2033
2025 update_rq_clock(rq); 2034 update_rq_clock(rq);
2026 update_cfs_load(cfs_rq, 1); 2035 update_cfs_load(cfs_rq);
2027 2036
2028 load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1); 2037 load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
2029 load_avg -= cfs_rq->load_contribution; 2038 load_avg -= cfs_rq->load_contribution;