diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-08-28 06:53:24 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-08-28 06:53:24 -0400 |
commit | f6cf891c4d7128f9f91243fc0b9ce99e10fa1586 (patch) | |
tree | ab26a8e708756c3fbafbb15ce48acea4f80ced08 | |
parent | 5f01d519e60a6ca1a7d9be9f2d73c5f521383992 (diff) |
sched: make the scheduler converge to the ideal latency
de-HZ-ification of the granularity defaults unearthed a pre-existing
property of CFS: while it correctly converges to the granularity goal,
it does not prevent run-time fluctuations in the range of
[-gran ... 0 ... +gran].
With the increase of the granularity due to the removal of HZ
dependencies, this becomes visible in chew-max output (with 5 tasks
running):
out: 28 . 27. 32 | flu: 0 . 0 | ran: 9 . 13 | per: 37 . 40
out: 27 . 27. 32 | flu: 0 . 0 | ran: 17 . 13 | per: 44 . 40
out: 27 . 27. 32 | flu: 0 . 0 | ran: 9 . 13 | per: 36 . 40
out: 29 . 27. 32 | flu: 2 . 0 | ran: 17 . 13 | per: 46 . 40
out: 28 . 27. 32 | flu: 0 . 0 | ran: 9 . 13 | per: 37 . 40
out: 29 . 27. 32 | flu: 0 . 0 | ran: 18 . 13 | per: 47 . 40
out: 28 . 27. 32 | flu: 0 . 0 | ran: 9 . 13 | per: 37 . 40
average slice is the ideal 13 msecs and the period is picture-perfect 40
msecs. But the 'ran' field fluctuates around 13.33 msecs and there's no
mechanism in CFS to keep that from happening: it's a perfectly valid
solution that CFS finds.
to fix this we add a granularity/preemption rule that knows about
the "target latency", which makes tasks that run longer than the ideal
latency run a bit less. The simplest approach is to simply decrease the
preemption granularity when a task overruns its ideal latency. For this
we have to track how much the task executed since its last preemption.
( this adds a new field to task_struct, but we can eliminate that
overhead in 2.6.24 by putting all the scheduler timestamps into an
anonymous union. )
with this change in place, chew-max output is fluctuation-less all
around:
out: 28 . 27. 39 | flu: 0 . 2 | ran: 13 . 13 | per: 41 . 40
out: 28 . 27. 39 | flu: 0 . 2 | ran: 13 . 13 | per: 41 . 40
out: 28 . 27. 39 | flu: 0 . 2 | ran: 13 . 13 | per: 41 . 40
out: 28 . 27. 39 | flu: 0 . 2 | ran: 13 . 13 | per: 41 . 40
out: 28 . 27. 39 | flu: 0 . 1 | ran: 13 . 13 | per: 41 . 40
out: 28 . 27. 39 | flu: 0 . 1 | ran: 13 . 13 | per: 41 . 40
this patch has no impact on any fastpath or on any globally observable
scheduling property. (unless you have sharp enough eyes to see
millisecond-level ruckles in glxgears smoothness :-)
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Mike Galbraith <efault@gmx.de>
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | kernel/sched.c | 1 | ||||
-rw-r--r-- | kernel/sched_fair.c | 26 |
3 files changed, 24 insertions, 4 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index bd6a0320a770..f4e324ed2e44 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -904,6 +904,7 @@ struct sched_entity { | |||
904 | 904 | ||
905 | u64 exec_start; | 905 | u64 exec_start; |
906 | u64 sum_exec_runtime; | 906 | u64 sum_exec_runtime; |
907 | u64 prev_sum_exec_runtime; | ||
907 | u64 wait_start_fair; | 908 | u64 wait_start_fair; |
908 | u64 sleep_start_fair; | 909 | u64 sleep_start_fair; |
909 | 910 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index 9fe473a190de..b533d6db78aa 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1587,6 +1587,7 @@ static void __sched_fork(struct task_struct *p) | |||
1587 | p->se.wait_start_fair = 0; | 1587 | p->se.wait_start_fair = 0; |
1588 | p->se.exec_start = 0; | 1588 | p->se.exec_start = 0; |
1589 | p->se.sum_exec_runtime = 0; | 1589 | p->se.sum_exec_runtime = 0; |
1590 | p->se.prev_sum_exec_runtime = 0; | ||
1590 | p->se.delta_exec = 0; | 1591 | p->se.delta_exec = 0; |
1591 | p->se.delta_fair_run = 0; | 1592 | p->se.delta_fair_run = 0; |
1592 | p->se.delta_fair_sleep = 0; | 1593 | p->se.delta_fair_sleep = 0; |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 9f53d49f3aab..721fe7744874 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -668,7 +668,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | |||
668 | /* | 668 | /* |
669 | * Preempt the current task with a newly woken task if needed: | 669 | * Preempt the current task with a newly woken task if needed: |
670 | */ | 670 | */ |
671 | static void | 671 | static int |
672 | __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, | 672 | __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, |
673 | struct sched_entity *curr, unsigned long granularity) | 673 | struct sched_entity *curr, unsigned long granularity) |
674 | { | 674 | { |
@@ -679,8 +679,11 @@ __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, | |||
679 | * preempt the current task unless the best task has | 679 | * preempt the current task unless the best task has |
680 | * a larger than sched_granularity fairness advantage: | 680 | * a larger than sched_granularity fairness advantage: |
681 | */ | 681 | */ |
682 | if (__delta > niced_granularity(curr, granularity)) | 682 | if (__delta > niced_granularity(curr, granularity)) { |
683 | resched_task(rq_of(cfs_rq)->curr); | 683 | resched_task(rq_of(cfs_rq)->curr); |
684 | return 1; | ||
685 | } | ||
686 | return 0; | ||
684 | } | 687 | } |
685 | 688 | ||
686 | static inline void | 689 | static inline void |
@@ -725,6 +728,7 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) | |||
725 | 728 | ||
726 | static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | 729 | static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) |
727 | { | 730 | { |
731 | unsigned long gran, ideal_runtime, delta_exec; | ||
728 | struct sched_entity *next; | 732 | struct sched_entity *next; |
729 | 733 | ||
730 | /* | 734 | /* |
@@ -741,8 +745,22 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |||
741 | if (next == curr) | 745 | if (next == curr) |
742 | return; | 746 | return; |
743 | 747 | ||
744 | __check_preempt_curr_fair(cfs_rq, next, curr, | 748 | gran = sched_granularity(cfs_rq); |
745 | sched_granularity(cfs_rq)); | 749 | ideal_runtime = niced_granularity(curr, |
750 | max(sysctl_sched_latency / cfs_rq->nr_running, | ||
751 | (unsigned long)sysctl_sched_min_granularity)); | ||
752 | /* | ||
753 | * If we executed more than what the latency constraint suggests, | ||
754 | * reduce the rescheduling granularity. This way the total latency | ||
755 | * of how much a task is not scheduled converges to | ||
756 | * sysctl_sched_latency: | ||
757 | */ | ||
758 | delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; | ||
759 | if (delta_exec > ideal_runtime) | ||
760 | gran = 0; | ||
761 | |||
762 | if (__check_preempt_curr_fair(cfs_rq, next, curr, gran)) | ||
763 | curr->prev_sum_exec_runtime = curr->sum_exec_runtime; | ||
746 | } | 764 | } |
747 | 765 | ||
748 | /************************************************** | 766 | /************************************************** |