aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/debug.c3
-rw-r--r--kernel/sched/fair.c51
-rw-r--r--kernel/sched/sched.h10
3 files changed, 59 insertions, 5 deletions
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 4240abce4116..c953a89f94aa 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -94,6 +94,7 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
94#ifdef CONFIG_SMP 94#ifdef CONFIG_SMP
95 P(se->avg.runnable_avg_sum); 95 P(se->avg.runnable_avg_sum);
96 P(se->avg.runnable_avg_period); 96 P(se->avg.runnable_avg_period);
97 P(se->avg.load_avg_contrib);
97#endif 98#endif
98#undef PN 99#undef PN
99#undef P 100#undef P
@@ -224,6 +225,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
224 cfs_rq->load_contribution); 225 cfs_rq->load_contribution);
225 SEQ_printf(m, " .%-30s: %d\n", "load_tg", 226 SEQ_printf(m, " .%-30s: %d\n", "load_tg",
226 atomic_read(&cfs_rq->tg->load_weight)); 227 atomic_read(&cfs_rq->tg->load_weight));
228 SEQ_printf(m, " .%-30s: %lld\n", "runnable_load_avg",
229 cfs_rq->runnable_load_avg);
227#endif 230#endif
228 231
229 print_cfs_group_stats(m, cpu, cfs_rq->tg); 232 print_cfs_group_stats(m, cpu, cfs_rq->tg);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 8c5468fcf10d..77af759e5675 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1081,20 +1081,63 @@ static __always_inline int __update_entity_runnable_avg(u64 now,
1081 return decayed; 1081 return decayed;
1082} 1082}
1083 1083
1084/* Compute the current contribution to load_avg by se, return any delta */
1085static long __update_entity_load_avg_contrib(struct sched_entity *se)
1086{
1087 long old_contrib = se->avg.load_avg_contrib;
1088
1089 if (!entity_is_task(se))
1090 return 0;
1091
1092 se->avg.load_avg_contrib = div64_u64(se->avg.runnable_avg_sum *
1093 se->load.weight,
1094 se->avg.runnable_avg_period + 1);
1095
1096 return se->avg.load_avg_contrib - old_contrib;
1097}
1098
1084/* Update a sched_entity's runnable average */ 1099/* Update a sched_entity's runnable average */
1085static inline void update_entity_load_avg(struct sched_entity *se) 1100static inline void update_entity_load_avg(struct sched_entity *se)
1086{ 1101{
1087 __update_entity_runnable_avg(rq_of(cfs_rq_of(se))->clock_task, &se->avg, 1102 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1088 se->on_rq); 1103 long contrib_delta;
1104
1105 if (!__update_entity_runnable_avg(rq_of(cfs_rq)->clock_task, &se->avg,
1106 se->on_rq))
1107 return;
1108
1109 contrib_delta = __update_entity_load_avg_contrib(se);
1110 if (se->on_rq)
1111 cfs_rq->runnable_load_avg += contrib_delta;
1089} 1112}
1090 1113
1091static inline void update_rq_runnable_avg(struct rq *rq, int runnable) 1114static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
1092{ 1115{
1093 __update_entity_runnable_avg(rq->clock_task, &rq->avg, runnable); 1116 __update_entity_runnable_avg(rq->clock_task, &rq->avg, runnable);
1094} 1117}
1118
1119/* Add the load generated by se into cfs_rq's child load-average */
1120static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
1121 struct sched_entity *se)
1122{
1123 update_entity_load_avg(se);
1124 cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
1125}
1126
1127/* Remove se's load from this cfs_rq child load-average */
1128static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
1129 struct sched_entity *se)
1130{
1131 update_entity_load_avg(se);
1132 cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
1133}
1095#else 1134#else
1096static inline void update_entity_load_avg(struct sched_entity *se) {} 1135static inline void update_entity_load_avg(struct sched_entity *se) {}
1097static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {} 1136static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
1137static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
1138 struct sched_entity *se) {}
1139static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
1140 struct sched_entity *se) {}
1098#endif 1141#endif
1099 1142
1100static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) 1143static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -1223,7 +1266,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1223 */ 1266 */
1224 update_curr(cfs_rq); 1267 update_curr(cfs_rq);
1225 update_cfs_load(cfs_rq, 0); 1268 update_cfs_load(cfs_rq, 0);
1226 update_entity_load_avg(se); 1269 enqueue_entity_load_avg(cfs_rq, se);
1227 account_entity_enqueue(cfs_rq, se); 1270 account_entity_enqueue(cfs_rq, se);
1228 update_cfs_shares(cfs_rq); 1271 update_cfs_shares(cfs_rq);
1229 1272
@@ -1298,7 +1341,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1298 * Update run-time statistics of the 'current'. 1341 * Update run-time statistics of the 'current'.
1299 */ 1342 */
1300 update_curr(cfs_rq); 1343 update_curr(cfs_rq);
1301 update_entity_load_avg(se); 1344 dequeue_entity_load_avg(cfs_rq, se);
1302 1345
1303 update_stats_dequeue(cfs_rq, se); 1346 update_stats_dequeue(cfs_rq, se);
1304 if (flags & DEQUEUE_SLEEP) { 1347 if (flags & DEQUEUE_SLEEP) {
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 14b571968713..e6539736af58 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -222,6 +222,15 @@ struct cfs_rq {
222 unsigned int nr_spread_over; 222 unsigned int nr_spread_over;
223#endif 223#endif
224 224
225#ifdef CONFIG_SMP
226 /*
227 * CFS Load tracking
228 * Under CFS, load is tracked on a per-entity basis and aggregated up.
229 * This allows for the description of both thread and group usage (in
230 * the FAIR_GROUP_SCHED case).
231 */
232 u64 runnable_load_avg;
233#endif
225#ifdef CONFIG_FAIR_GROUP_SCHED 234#ifdef CONFIG_FAIR_GROUP_SCHED
226 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ 235 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
227 236
@@ -1214,4 +1223,3 @@ static inline u64 irq_time_read(int cpu)
1214} 1223}
1215#endif /* CONFIG_64BIT */ 1224#endif /* CONFIG_64BIT */
1216#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 1225#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
1217