aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2007-10-15 11:00:10 -0400
committerIngo Molnar <mingo@elte.hu>2007-10-15 11:00:10 -0400
commitddc972975091ba5f839bf24d0f9ef54fe90ee741 (patch)
tree438406a2930bdcb36c8a1bc6778aab49e1fba36d
parentd822cecedad88b69a7d68aa8d49e1f238aa320c7 (diff)
sched debug: check spread
debug feature: check how well we schedule within a reasonable vruntime 'spread' range. (note that CPU overload can increase the spread, so this is not a hard condition, but normal loads should be within the spread.) Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
-rw-r--r--kernel/sched.c3
-rw-r--r--kernel/sched_debug.c2
-rw-r--r--kernel/sched_fair.c17
3 files changed, 22 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index fe1165b226a1..213294fdcd0f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -250,6 +250,9 @@ struct cfs_rq {
250 * It is set to NULL otherwise (i.e when none are currently running). 250 * It is set to NULL otherwise (i.e when none are currently running).
251 */ 251 */
252 struct sched_entity *curr; 252 struct sched_entity *curr;
253
254 unsigned long nr_spread_over;
255
253#ifdef CONFIG_FAIR_GROUP_SCHED 256#ifdef CONFIG_FAIR_GROUP_SCHED
254 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ 257 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
255 258
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index b24f17de19e3..4659c90c3418 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -140,6 +140,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
140 SEQ_printf(m, " .%-30s: %ld\n", "bkl_cnt", 140 SEQ_printf(m, " .%-30s: %ld\n", "bkl_cnt",
141 rq->bkl_cnt); 141 rq->bkl_cnt);
142#endif 142#endif
143 SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over",
144 cfs_rq->nr_spread_over);
143} 145}
144 146
145static void print_cpu(struct seq_file *m, int cpu) 147static void print_cpu(struct seq_file *m, int cpu)
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 87acc5cedd2d..8ea4c9b3e411 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -447,6 +447,19 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
447#endif 447#endif
448} 448}
449 449
450static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
451{
452#ifdef CONFIG_SCHED_DEBUG
453 s64 d = se->vruntime - cfs_rq->min_vruntime;
454
455 if (d < 0)
456 d = -d;
457
458 if (d > 3*sysctl_sched_latency)
459 schedstat_inc(cfs_rq, nr_spread_over);
460#endif
461}
462
450static void 463static void
451place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) 464place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
452{ 465{
@@ -494,6 +507,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
494 } 507 }
495 508
496 update_stats_enqueue(cfs_rq, se); 509 update_stats_enqueue(cfs_rq, se);
510 check_spread(cfs_rq, se);
497 if (se != cfs_rq->curr) 511 if (se != cfs_rq->curr)
498 __enqueue_entity(cfs_rq, se); 512 __enqueue_entity(cfs_rq, se);
499 account_entity_enqueue(cfs_rq, se); 513 account_entity_enqueue(cfs_rq, se);
@@ -587,6 +601,7 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
587 601
588 update_stats_curr_end(cfs_rq, prev); 602 update_stats_curr_end(cfs_rq, prev);
589 603
604 check_spread(cfs_rq, prev);
590 if (prev->on_rq) { 605 if (prev->on_rq) {
591 update_stats_wait_start(cfs_rq, prev); 606 update_stats_wait_start(cfs_rq, prev);
592 /* Put 'current' back into the tree. */ 607 /* Put 'current' back into the tree. */
@@ -996,6 +1011,8 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
996 } 1011 }
997 1012
998 update_stats_enqueue(cfs_rq, se); 1013 update_stats_enqueue(cfs_rq, se);
1014 check_spread(cfs_rq, se);
1015 check_spread(cfs_rq, curr);
999 __enqueue_entity(cfs_rq, se); 1016 __enqueue_entity(cfs_rq, se);
1000 account_entity_enqueue(cfs_rq, se); 1017 account_entity_enqueue(cfs_rq, se);
1001 resched_task(rq->curr); 1018 resched_task(rq->curr);