aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-11-15 18:47:01 -0500
committerIngo Molnar <mingo@elte.hu>2010-11-18 07:27:47 -0500
commit3d4b47b4b040c9d77dd68104cfc1055d89a55afd (patch)
treea4b39b5d7c89a319b81543c1b26778d6220e772b /kernel/sched_fair.c
parent2069dd75c7d0f49355939e5586daf5a9ab216db7 (diff)
sched: Implement on-demand (active) cfs_rq list
Make certain load-balance actions scale per number of active cgroups instead of the number of existing cgroups. This makes wakeup/sleep paths more expensive, but is a win for systems where the vast majority of existing cgroups are idle. Signed-off-by: Paul Turner <pjt@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20101115234937.666535048@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c46
1 files changed, 40 insertions, 6 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index d86544b4151c..0560e72bd732 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -143,6 +143,24 @@ static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
143 return cfs_rq->tg->cfs_rq[this_cpu]; 143 return cfs_rq->tg->cfs_rq[this_cpu];
144} 144}
145 145
146static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
147{
148 if (!cfs_rq->on_list) {
149 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
150 &rq_of(cfs_rq)->leaf_cfs_rq_list);
151
152 cfs_rq->on_list = 1;
153 }
154}
155
156static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
157{
158 if (cfs_rq->on_list) {
159 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
160 cfs_rq->on_list = 0;
161 }
162}
163
146/* Iterate thr' all leaf cfs_rq's on a runqueue */ 164/* Iterate thr' all leaf cfs_rq's on a runqueue */
147#define for_each_leaf_cfs_rq(rq, cfs_rq) \ 165#define for_each_leaf_cfs_rq(rq, cfs_rq) \
148 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) 166 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
@@ -246,6 +264,14 @@ static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
246 return &cpu_rq(this_cpu)->cfs; 264 return &cpu_rq(this_cpu)->cfs;
247} 265}
248 266
267static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
268{
269}
270
271static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
272{
273}
274
249#define for_each_leaf_cfs_rq(rq, cfs_rq) \ 275#define for_each_leaf_cfs_rq(rq, cfs_rq) \
250 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) 276 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
251 277
@@ -648,7 +674,7 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
648} 674}
649 675
650#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED 676#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
651static void update_cfs_load(struct cfs_rq *cfs_rq) 677static void update_cfs_load(struct cfs_rq *cfs_rq, int lb)
652{ 678{
653 u64 period = sched_avg_period(); 679 u64 period = sched_avg_period();
654 u64 now, delta; 680 u64 now, delta;
@@ -673,6 +699,11 @@ static void update_cfs_load(struct cfs_rq *cfs_rq)
673 cfs_rq->load_period /= 2; 699 cfs_rq->load_period /= 2;
674 cfs_rq->load_avg /= 2; 700 cfs_rq->load_avg /= 2;
675 } 701 }
702
703 if (lb && !cfs_rq->nr_running) {
704 if (cfs_rq->load_avg < (period / 8))
705 list_del_leaf_cfs_rq(cfs_rq);
706 }
676} 707}
677 708
678static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, 709static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
@@ -719,7 +750,7 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq)
719 reweight_entity(cfs_rq_of(se), se, shares); 750 reweight_entity(cfs_rq_of(se), se, shares);
720} 751}
721#else /* CONFIG_FAIR_GROUP_SCHED */ 752#else /* CONFIG_FAIR_GROUP_SCHED */
722static inline void update_cfs_load(struct cfs_rq *cfs_rq) 753static inline void update_cfs_load(struct cfs_rq *cfs_rq, int lb)
723{ 754{
724} 755}
725 756
@@ -849,7 +880,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
849 * Update run-time statistics of the 'current'. 880 * Update run-time statistics of the 'current'.
850 */ 881 */
851 update_curr(cfs_rq); 882 update_curr(cfs_rq);
852 update_cfs_load(cfs_rq); 883 update_cfs_load(cfs_rq, 0);
853 account_entity_enqueue(cfs_rq, se); 884 account_entity_enqueue(cfs_rq, se);
854 update_cfs_shares(cfs_rq); 885 update_cfs_shares(cfs_rq);
855 886
@@ -863,6 +894,9 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
863 if (se != cfs_rq->curr) 894 if (se != cfs_rq->curr)
864 __enqueue_entity(cfs_rq, se); 895 __enqueue_entity(cfs_rq, se);
865 se->on_rq = 1; 896 se->on_rq = 1;
897
898 if (cfs_rq->nr_running == 1)
899 list_add_leaf_cfs_rq(cfs_rq);
866} 900}
867 901
868static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) 902static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -907,7 +941,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
907 if (se != cfs_rq->curr) 941 if (se != cfs_rq->curr)
908 __dequeue_entity(cfs_rq, se); 942 __dequeue_entity(cfs_rq, se);
909 se->on_rq = 0; 943 se->on_rq = 0;
910 update_cfs_load(cfs_rq); 944 update_cfs_load(cfs_rq, 0);
911 account_entity_dequeue(cfs_rq, se); 945 account_entity_dequeue(cfs_rq, se);
912 update_min_vruntime(cfs_rq); 946 update_min_vruntime(cfs_rq);
913 update_cfs_shares(cfs_rq); 947 update_cfs_shares(cfs_rq);
@@ -1142,7 +1176,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
1142 for_each_sched_entity(se) { 1176 for_each_sched_entity(se) {
1143 struct cfs_rq *cfs_rq = cfs_rq_of(se); 1177 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1144 1178
1145 update_cfs_load(cfs_rq); 1179 update_cfs_load(cfs_rq, 0);
1146 update_cfs_shares(cfs_rq); 1180 update_cfs_shares(cfs_rq);
1147 } 1181 }
1148 1182
@@ -1172,7 +1206,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
1172 for_each_sched_entity(se) { 1206 for_each_sched_entity(se) {
1173 struct cfs_rq *cfs_rq = cfs_rq_of(se); 1207 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1174 1208
1175 update_cfs_load(cfs_rq); 1209 update_cfs_load(cfs_rq, 0);
1176 update_cfs_shares(cfs_rq); 1210 update_cfs_shares(cfs_rq);
1177 } 1211 }
1178 1212