aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-10-15 11:00:05 -0400
committerIngo Molnar <mingo@elte.hu>2007-10-15 11:00:05 -0400
commit2e09bf556fbe1a4cd8d837a3e6607de55f7cf4fd (patch)
treee73fc7738893ec5c50c3b2e626d4b2ed0809ddbe /kernel/sched_fair.c
parent5c6b5964a0629bd39fbf4e5648a8aca32de5bcaf (diff)
sched: wakeup granularity increase
increase wakeup granularity - we were overscheduling a bit. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Mike Galbraith <efault@gmx.de>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c27
1 files changed, 11 insertions, 16 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 45c7493d8ca8..a60b1dac598a 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -74,7 +74,7 @@ const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 25000000UL;
74 * and reduces their over-scheduling. Synchronous workloads will still 74 * and reduces their over-scheduling. Synchronous workloads will still
75 * have immediate wakeup/sleep latencies. 75 * have immediate wakeup/sleep latencies.
76 */ 76 */
77const_debug unsigned int sysctl_sched_wakeup_granularity = 1000000UL; 77const_debug unsigned int sysctl_sched_wakeup_granularity = 2000000UL;
78 78
79unsigned int sysctl_sched_runtime_limit __read_mostly; 79unsigned int sysctl_sched_runtime_limit __read_mostly;
80 80
@@ -582,7 +582,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
582 * Preempt the current task with a newly woken task if needed: 582 * Preempt the current task with a newly woken task if needed:
583 */ 583 */
584static void 584static void
585__check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *curr) 585check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
586{ 586{
587 unsigned long ideal_runtime, delta_exec; 587 unsigned long ideal_runtime, delta_exec;
588 588
@@ -646,8 +646,6 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
646 646
647static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) 647static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
648{ 648{
649 struct sched_entity *next;
650
651 /* 649 /*
652 * Dequeue and enqueue the task to update its 650 * Dequeue and enqueue the task to update its
653 * position within the tree: 651 * position within the tree:
@@ -655,14 +653,8 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
655 dequeue_entity(cfs_rq, curr, 0); 653 dequeue_entity(cfs_rq, curr, 0);
656 enqueue_entity(cfs_rq, curr, 0); 654 enqueue_entity(cfs_rq, curr, 0);
657 655
658 /* 656 if (cfs_rq->nr_running > 1)
659 * Reschedule if another task tops the current one. 657 check_preempt_tick(cfs_rq, curr);
660 */
661 next = __pick_next_entity(cfs_rq);
662 if (next == curr)
663 return;
664
665 __check_preempt_curr_fair(cfs_rq, curr);
666} 658}
667 659
668/************************************************** 660/**************************************************
@@ -852,7 +844,7 @@ static void yield_task_fair(struct rq *rq, struct task_struct *p)
852/* 844/*
853 * Preempt the current task with a newly woken task if needed: 845 * Preempt the current task with a newly woken task if needed:
854 */ 846 */
855static void check_preempt_curr_fair(struct rq *rq, struct task_struct *p) 847static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
856{ 848{
857 struct task_struct *curr = rq->curr; 849 struct task_struct *curr = rq->curr;
858 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 850 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
@@ -863,9 +855,12 @@ static void check_preempt_curr_fair(struct rq *rq, struct task_struct *p)
863 resched_task(curr); 855 resched_task(curr);
864 return; 856 return;
865 } 857 }
858 if (is_same_group(curr, p)) {
859 s64 delta = curr->se.vruntime - p->se.vruntime;
866 860
867 if (is_same_group(curr, p)) 861 if (delta > (s64)sysctl_sched_wakeup_granularity)
868 __check_preempt_curr_fair(cfs_rq, &curr->se); 862 resched_task(curr);
863 }
869} 864}
870 865
871static struct task_struct *pick_next_task_fair(struct rq *rq) 866static struct task_struct *pick_next_task_fair(struct rq *rq)
@@ -1095,7 +1090,7 @@ struct sched_class fair_sched_class __read_mostly = {
1095 .dequeue_task = dequeue_task_fair, 1090 .dequeue_task = dequeue_task_fair,
1096 .yield_task = yield_task_fair, 1091 .yield_task = yield_task_fair,
1097 1092
1098 .check_preempt_curr = check_preempt_curr_fair, 1093 .check_preempt_curr = check_preempt_wakeup,
1099 1094
1100 .pick_next_task = pick_next_task_fair, 1095 .pick_next_task = pick_next_task_fair,
1101 .put_prev_task = put_prev_task_fair, 1096 .put_prev_task = put_prev_task_fair,