diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2007-09-05 08:32:49 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-09-05 08:32:49 -0400 |
commit | 1169783085adb9ac969d21103a6885e8435f7ed3 (patch) | |
tree | 8d5313ef18ba821436aa838cb6f03f065049ef21 /kernel/sched_fair.c | |
parent | 4a55b45036a677fac43fe81ddf7fdcd007aaaee7 (diff) |
sched: fix ideal_runtime calculations for reniced tasks
fix ideal_runtime:
- do not scale it using niced_granularity()
it is against sum_exec_delta, so its wall-time, not fair-time.
- move the whole check into __check_preempt_curr_fair()
so that wakeup preemption can also benefit from the new logic.
this also results in code size reduction:
text data bss dec hex filename
13391 228 1204 14823 39e7 sched.o.before
13369 228 1204 14801 39d1 sched.o.after
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 38 |
1 files changed, 22 insertions, 16 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 2d01bbc2d04a..892616bf2c77 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -678,11 +678,31 @@ __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, | |||
678 | struct sched_entity *curr, unsigned long granularity) | 678 | struct sched_entity *curr, unsigned long granularity) |
679 | { | 679 | { |
680 | s64 __delta = curr->fair_key - se->fair_key; | 680 | s64 __delta = curr->fair_key - se->fair_key; |
681 | unsigned long ideal_runtime, delta_exec; | ||
682 | |||
683 | /* | ||
684 | * ideal_runtime is compared against sum_exec_runtime, which is | ||
685 | * walltime, hence do not scale. | ||
686 | */ | ||
687 | ideal_runtime = max(sysctl_sched_latency / cfs_rq->nr_running, | ||
688 | (unsigned long)sysctl_sched_min_granularity); | ||
689 | |||
690 | /* | ||
691 | * If we executed more than what the latency constraint suggests, | ||
692 | * reduce the rescheduling granularity. This way the total latency | ||
693 | * of how much a task is not scheduled converges to | ||
694 | * sysctl_sched_latency: | ||
695 | */ | ||
696 | delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; | ||
697 | if (delta_exec > ideal_runtime) | ||
698 | granularity = 0; | ||
681 | 699 | ||
682 | /* | 700 | /* |
683 | * Take scheduling granularity into account - do not | 701 | * Take scheduling granularity into account - do not |
684 | * preempt the current task unless the best task has | 702 | * preempt the current task unless the best task has |
685 | * a larger than sched_granularity fairness advantage: | 703 | * a larger than sched_granularity fairness advantage: |
704 | * | ||
705 | * scale granularity as key space is in fair_clock. | ||
686 | */ | 706 | */ |
687 | if (__delta > niced_granularity(curr, granularity)) | 707 | if (__delta > niced_granularity(curr, granularity)) |
688 | resched_task(rq_of(cfs_rq)->curr); | 708 | resched_task(rq_of(cfs_rq)->curr); |
@@ -731,7 +751,6 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) | |||
731 | 751 | ||
732 | static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | 752 | static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) |
733 | { | 753 | { |
734 | unsigned long gran, ideal_runtime, delta_exec; | ||
735 | struct sched_entity *next; | 754 | struct sched_entity *next; |
736 | 755 | ||
737 | /* | 756 | /* |
@@ -748,21 +767,8 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |||
748 | if (next == curr) | 767 | if (next == curr) |
749 | return; | 768 | return; |
750 | 769 | ||
751 | gran = sched_granularity(cfs_rq); | 770 | __check_preempt_curr_fair(cfs_rq, next, curr, |
752 | ideal_runtime = niced_granularity(curr, | 771 | sched_granularity(cfs_rq)); |
753 | max(sysctl_sched_latency / cfs_rq->nr_running, | ||
754 | (unsigned long)sysctl_sched_min_granularity)); | ||
755 | /* | ||
756 | * If we executed more than what the latency constraint suggests, | ||
757 | * reduce the rescheduling granularity. This way the total latency | ||
758 | * of how much a task is not scheduled converges to | ||
759 | * sysctl_sched_latency: | ||
760 | */ | ||
761 | delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; | ||
762 | if (delta_exec > ideal_runtime) | ||
763 | gran = 0; | ||
764 | |||
765 | __check_preempt_curr_fair(cfs_rq, next, curr, gran); | ||
766 | } | 772 | } |
767 | 773 | ||
768 | /************************************************** | 774 | /************************************************** |