aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorGregory Haskins <ghaskins@novell.com>2008-06-27 16:30:00 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-04 06:50:22 -0400
commit2087a1ad822cd3a68b73338457047fcc54da726b (patch)
tree1ebc45b2499034ffe37b29bf7a748b8e992cd870 /kernel/sched_fair.c
parentc4acb2c0669c5c5c9b28e9d02a34b5c67edf7092 (diff)
sched: add avg-overlap support to RT tasks
We have the notion of tracking process-coupling (a.k.a. buddy-wake) via the p->se.last_wake / p->se.avg_overlap facilities, but it is only used for cfs to cfs interactions. There is no reason why an rt to cfs interaction cannot share in establishing a relationhip in a similar manner. Because PREEMPT_RT runs many kernel threads as FIFO priority, we often times have heavy interaction between RT threads waking CFS applications. This patch offers a substantial boost (50-60%+) in perfomance under those circumstances. Signed-off-by: Gregory Haskins <ghaskins@novell.com> Cc: npiggin@suse.de Cc: rostedt@goodmis.org Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c21
1 files changed, 2 insertions, 19 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 2e43d4a748c3..f2aa987027d6 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -726,21 +726,6 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
726 __enqueue_entity(cfs_rq, se); 726 __enqueue_entity(cfs_rq, se);
727} 727}
728 728
729static void update_avg(u64 *avg, u64 sample)
730{
731 s64 diff = sample - *avg;
732 *avg += diff >> 3;
733}
734
735static void update_avg_stats(struct cfs_rq *cfs_rq, struct sched_entity *se)
736{
737 if (!se->last_wakeup)
738 return;
739
740 update_avg(&se->avg_overlap, se->sum_exec_runtime - se->last_wakeup);
741 se->last_wakeup = 0;
742}
743
744static void 729static void
745dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) 730dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
746{ 731{
@@ -751,7 +736,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
751 736
752 update_stats_dequeue(cfs_rq, se); 737 update_stats_dequeue(cfs_rq, se);
753 if (sleep) { 738 if (sleep) {
754 update_avg_stats(cfs_rq, se);
755#ifdef CONFIG_SCHEDSTATS 739#ifdef CONFIG_SCHEDSTATS
756 if (entity_is_task(se)) { 740 if (entity_is_task(se)) {
757 struct task_struct *tsk = task_of(se); 741 struct task_struct *tsk = task_of(se);
@@ -1196,9 +1180,9 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
1196 * a reasonable amount of time then attract this newly 1180 * a reasonable amount of time then attract this newly
1197 * woken task: 1181 * woken task:
1198 */ 1182 */
1199 if (sync && balanced && curr->sched_class == &fair_sched_class) { 1183 if (sync && balanced) {
1200 if (curr->se.avg_overlap < sysctl_sched_migration_cost && 1184 if (curr->se.avg_overlap < sysctl_sched_migration_cost &&
1201 p->se.avg_overlap < sysctl_sched_migration_cost) 1185 p->se.avg_overlap < sysctl_sched_migration_cost)
1202 return 1; 1186 return 1;
1203 } 1187 }
1204 1188
@@ -1359,7 +1343,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
1359 return; 1343 return;
1360 } 1344 }
1361 1345
1362 se->last_wakeup = se->sum_exec_runtime;
1363 if (unlikely(se == pse)) 1346 if (unlikely(se == pse))
1364 return; 1347 return;
1365 1348