diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2007-10-15 11:00:10 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:10 -0400 |
commit | ddc972975091ba5f839bf24d0f9ef54fe90ee741 (patch) | |
tree | 438406a2930bdcb36c8a1bc6778aab49e1fba36d /kernel/sched_fair.c | |
parent | d822cecedad88b69a7d68aa8d49e1f238aa320c7 (diff) |
sched debug: check spread
debug feature: check how well we schedule within a reasonable
vruntime 'spread' range. (note that CPU overload can increase
the spread, so this is not a hard condition, but normal loads
should be within the spread.)
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 17 |
1 files changed, 17 insertions, 0 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 87acc5cedd2d..8ea4c9b3e411 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -447,6 +447,19 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
447 | #endif | 447 | #endif |
448 | } | 448 | } |
449 | 449 | ||
450 | static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
451 | { | ||
452 | #ifdef CONFIG_SCHED_DEBUG | ||
453 | s64 d = se->vruntime - cfs_rq->min_vruntime; | ||
454 | |||
455 | if (d < 0) | ||
456 | d = -d; | ||
457 | |||
458 | if (d > 3*sysctl_sched_latency) | ||
459 | schedstat_inc(cfs_rq, nr_spread_over); | ||
460 | #endif | ||
461 | } | ||
462 | |||
450 | static void | 463 | static void |
451 | place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | 464 | place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) |
452 | { | 465 | { |
@@ -494,6 +507,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup) | |||
494 | } | 507 | } |
495 | 508 | ||
496 | update_stats_enqueue(cfs_rq, se); | 509 | update_stats_enqueue(cfs_rq, se); |
510 | check_spread(cfs_rq, se); | ||
497 | if (se != cfs_rq->curr) | 511 | if (se != cfs_rq->curr) |
498 | __enqueue_entity(cfs_rq, se); | 512 | __enqueue_entity(cfs_rq, se); |
499 | account_entity_enqueue(cfs_rq, se); | 513 | account_entity_enqueue(cfs_rq, se); |
@@ -587,6 +601,7 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) | |||
587 | 601 | ||
588 | update_stats_curr_end(cfs_rq, prev); | 602 | update_stats_curr_end(cfs_rq, prev); |
589 | 603 | ||
604 | check_spread(cfs_rq, prev); | ||
590 | if (prev->on_rq) { | 605 | if (prev->on_rq) { |
591 | update_stats_wait_start(cfs_rq, prev); | 606 | update_stats_wait_start(cfs_rq, prev); |
592 | /* Put 'current' back into the tree. */ | 607 | /* Put 'current' back into the tree. */ |
@@ -996,6 +1011,8 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) | |||
996 | } | 1011 | } |
997 | 1012 | ||
998 | update_stats_enqueue(cfs_rq, se); | 1013 | update_stats_enqueue(cfs_rq, se); |
1014 | check_spread(cfs_rq, se); | ||
1015 | check_spread(cfs_rq, curr); | ||
999 | __enqueue_entity(cfs_rq, se); | 1016 | __enqueue_entity(cfs_rq, se); |
1000 | account_entity_enqueue(cfs_rq, se); | 1017 | account_entity_enqueue(cfs_rq, se); |
1001 | resched_task(rq->curr); | 1018 | resched_task(rq->curr); |