aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-08-31 13:52:00 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-08-31 13:52:00 -0400
commit5e7a39275b00ec881790ce47b8f7363fdfa097fa (patch)
tree89f8bbefe362f11c23ea47163f3f01f1035cccf5 /kernel
parent7d9ef601ddf0a42d11df3bdaaf28078fd2995eab (diff)
parent9f508f8258e18e9333f18daf1f0860df48d49ed2 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched: sched: clean up task_new_fair() sched: small schedstat fix sched: fix wait_start_fair condition in update_stats_wait_end() sched: call update_curr() in task_tick_fair() sched: make the scheduler converge to the ideal latency sched: fix sleeper bonus limit
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c1
-rw-r--r--kernel/sched_fair.c46
2 files changed, 36 insertions, 11 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 9fe473a190de..b533d6db78aa 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1587,6 +1587,7 @@ static void __sched_fork(struct task_struct *p)
1587 p->se.wait_start_fair = 0; 1587 p->se.wait_start_fair = 0;
1588 p->se.exec_start = 0; 1588 p->se.exec_start = 0;
1589 p->se.sum_exec_runtime = 0; 1589 p->se.sum_exec_runtime = 0;
1590 p->se.prev_sum_exec_runtime = 0;
1590 p->se.delta_exec = 0; 1591 p->se.delta_exec = 0;
1591 p->se.delta_fair_run = 0; 1592 p->se.delta_fair_run = 0;
1592 p->se.delta_fair_sleep = 0; 1593 p->se.delta_fair_sleep = 0;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index ee3771850aaf..ce39282d9c0d 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -354,7 +354,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr)
354 delta_fair = calc_delta_fair(delta_exec, lw); 354 delta_fair = calc_delta_fair(delta_exec, lw);
355 delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw); 355 delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw);
356 356
357 if (cfs_rq->sleeper_bonus > sysctl_sched_latency) { 357 if (cfs_rq->sleeper_bonus > sysctl_sched_min_granularity) {
358 delta = min((u64)delta_mine, cfs_rq->sleeper_bonus); 358 delta = min((u64)delta_mine, cfs_rq->sleeper_bonus);
359 delta = min(delta, (unsigned long)( 359 delta = min(delta, (unsigned long)(
360 (long)sysctl_sched_runtime_limit - curr->wait_runtime)); 360 (long)sysctl_sched_runtime_limit - curr->wait_runtime));
@@ -489,6 +489,9 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
489{ 489{
490 unsigned long delta_fair; 490 unsigned long delta_fair;
491 491
492 if (unlikely(!se->wait_start_fair))
493 return;
494
492 delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit), 495 delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
493 (u64)(cfs_rq->fair_clock - se->wait_start_fair)); 496 (u64)(cfs_rq->fair_clock - se->wait_start_fair));
494 497
@@ -668,7 +671,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
668/* 671/*
669 * Preempt the current task with a newly woken task if needed: 672 * Preempt the current task with a newly woken task if needed:
670 */ 673 */
671static void 674static int
672__check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se, 675__check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
673 struct sched_entity *curr, unsigned long granularity) 676 struct sched_entity *curr, unsigned long granularity)
674{ 677{
@@ -679,8 +682,11 @@ __check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
679 * preempt the current task unless the best task has 682 * preempt the current task unless the best task has
680 * a larger than sched_granularity fairness advantage: 683 * a larger than sched_granularity fairness advantage:
681 */ 684 */
682 if (__delta > niced_granularity(curr, granularity)) 685 if (__delta > niced_granularity(curr, granularity)) {
683 resched_task(rq_of(cfs_rq)->curr); 686 resched_task(rq_of(cfs_rq)->curr);
687 return 1;
688 }
689 return 0;
684} 690}
685 691
686static inline void 692static inline void
@@ -725,6 +731,7 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
725 731
726static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) 732static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
727{ 733{
734 unsigned long gran, ideal_runtime, delta_exec;
728 struct sched_entity *next; 735 struct sched_entity *next;
729 736
730 /* 737 /*
@@ -741,8 +748,22 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
741 if (next == curr) 748 if (next == curr)
742 return; 749 return;
743 750
744 __check_preempt_curr_fair(cfs_rq, next, curr, 751 gran = sched_granularity(cfs_rq);
745 sched_granularity(cfs_rq)); 752 ideal_runtime = niced_granularity(curr,
753 max(sysctl_sched_latency / cfs_rq->nr_running,
754 (unsigned long)sysctl_sched_min_granularity));
755 /*
756 * If we executed more than what the latency constraint suggests,
757 * reduce the rescheduling granularity. This way the total latency
758 * of how much a task is not scheduled converges to
759 * sysctl_sched_latency:
760 */
761 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
762 if (delta_exec > ideal_runtime)
763 gran = 0;
764
765 if (__check_preempt_curr_fair(cfs_rq, next, curr, gran))
766 curr->prev_sum_exec_runtime = curr->sum_exec_runtime;
746} 767}
747 768
748/************************************************** 769/**************************************************
@@ -1076,31 +1097,34 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr)
1076static void task_new_fair(struct rq *rq, struct task_struct *p) 1097static void task_new_fair(struct rq *rq, struct task_struct *p)
1077{ 1098{
1078 struct cfs_rq *cfs_rq = task_cfs_rq(p); 1099 struct cfs_rq *cfs_rq = task_cfs_rq(p);
1079 struct sched_entity *se = &p->se; 1100 struct sched_entity *se = &p->se, *curr = cfs_rq_curr(cfs_rq);
1080 1101
1081 sched_info_queued(p); 1102 sched_info_queued(p);
1082 1103
1104 update_curr(cfs_rq);
1083 update_stats_enqueue(cfs_rq, se); 1105 update_stats_enqueue(cfs_rq, se);
1084 /* 1106 /*
1085 * Child runs first: we let it run before the parent 1107 * Child runs first: we let it run before the parent
1086 * until it reschedules once. We set up the key so that 1108 * until it reschedules once. We set up the key so that
1087 * it will preempt the parent: 1109 * it will preempt the parent:
1088 */ 1110 */
1089 p->se.fair_key = current->se.fair_key - 1111 se->fair_key = curr->fair_key -
1090 niced_granularity(&rq->curr->se, sched_granularity(cfs_rq)) - 1; 1112 niced_granularity(curr, sched_granularity(cfs_rq)) - 1;
1091 /* 1113 /*
1092 * The first wait is dominated by the child-runs-first logic, 1114 * The first wait is dominated by the child-runs-first logic,
1093 * so do not credit it with that waiting time yet: 1115 * so do not credit it with that waiting time yet:
1094 */ 1116 */
1095 if (sysctl_sched_features & SCHED_FEAT_SKIP_INITIAL) 1117 if (sysctl_sched_features & SCHED_FEAT_SKIP_INITIAL)
1096 p->se.wait_start_fair = 0; 1118 se->wait_start_fair = 0;
1097 1119
1098 /* 1120 /*
1099 * The statistical average of wait_runtime is about 1121 * The statistical average of wait_runtime is about
1100 * -granularity/2, so initialize the task with that: 1122 * -granularity/2, so initialize the task with that:
1101 */ 1123 */
1102 if (sysctl_sched_features & SCHED_FEAT_START_DEBIT) 1124 if (sysctl_sched_features & SCHED_FEAT_START_DEBIT) {
1103 p->se.wait_runtime = -(sched_granularity(cfs_rq) / 2); 1125 se->wait_runtime = -(sched_granularity(cfs_rq) / 2);
1126 schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
1127 }
1104 1128
1105 __enqueue_entity(cfs_rq, se); 1129 __enqueue_entity(cfs_rq, se);
1106} 1130}