aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-08-24 19:11:13 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-08-24 19:11:13 -0400
commitd1caeb02b17c6bc215a9a40a98a1beb92dcbd310 (patch)
tree4ed5e724ce073c270fb9e4d8d9cb665b826ff111 /kernel/sched_fair.c
parent37a3cc99d5048df62bb201c0b45a51ba94497e45 (diff)
parent095e56c7036fe97bc3ebcd80ed6e121be0847656 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched: sched: fix startup penalty calculation sched: simplify bonus calculation #2 sched: simplify bonus calculation #1 sched: tidy up and simplify the bonus balance sched: optimize task_tick_rt() a bit sched: simplify can_migrate_task() sched: remove HZ dependency from the granularity default sched: CONFIG_SCHED_GROUP_FAIR=y fixlet
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c35
1 files changed, 19 insertions, 16 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index fedbb51bba96..4d6b7e2df2aa 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -19,7 +19,7 @@
19 19
20/* 20/*
21 * Preemption granularity: 21 * Preemption granularity:
22 * (default: 2 msec, units: nanoseconds) 22 * (default: 10 msec, units: nanoseconds)
23 * 23 *
24 * NOTE: this granularity value is not the same as the concept of 24 * NOTE: this granularity value is not the same as the concept of
25 * 'timeslice length' - timeslices in CFS will typically be somewhat 25 * 'timeslice length' - timeslices in CFS will typically be somewhat
@@ -31,18 +31,17 @@
31 * number of CPUs. (i.e. factor 2x on 2-way systems, 3x on 4-way 31 * number of CPUs. (i.e. factor 2x on 2-way systems, 3x on 4-way
32 * systems, 4x on 8-way systems, 5x on 16-way systems, etc.) 32 * systems, 4x on 8-way systems, 5x on 16-way systems, etc.)
33 */ 33 */
34unsigned int sysctl_sched_granularity __read_mostly = 2000000000ULL/HZ; 34unsigned int sysctl_sched_granularity __read_mostly = 10000000UL;
35 35
36/* 36/*
37 * SCHED_BATCH wake-up granularity. 37 * SCHED_BATCH wake-up granularity.
38 * (default: 10 msec, units: nanoseconds) 38 * (default: 25 msec, units: nanoseconds)
39 * 39 *
40 * This option delays the preemption effects of decoupled workloads 40 * This option delays the preemption effects of decoupled workloads
41 * and reduces their over-scheduling. Synchronous workloads will still 41 * and reduces their over-scheduling. Synchronous workloads will still
42 * have immediate wakeup/sleep latencies. 42 * have immediate wakeup/sleep latencies.
43 */ 43 */
44unsigned int sysctl_sched_batch_wakeup_granularity __read_mostly = 44unsigned int sysctl_sched_batch_wakeup_granularity __read_mostly = 25000000UL;
45 10000000000ULL/HZ;
46 45
47/* 46/*
48 * SCHED_OTHER wake-up granularity. 47 * SCHED_OTHER wake-up granularity.
@@ -52,12 +51,12 @@ unsigned int sysctl_sched_batch_wakeup_granularity __read_mostly =
52 * and reduces their over-scheduling. Synchronous workloads will still 51 * and reduces their over-scheduling. Synchronous workloads will still
53 * have immediate wakeup/sleep latencies. 52 * have immediate wakeup/sleep latencies.
54 */ 53 */
55unsigned int sysctl_sched_wakeup_granularity __read_mostly = 1000000000ULL/HZ; 54unsigned int sysctl_sched_wakeup_granularity __read_mostly = 1000000UL;
56 55
57unsigned int sysctl_sched_stat_granularity __read_mostly; 56unsigned int sysctl_sched_stat_granularity __read_mostly;
58 57
59/* 58/*
60 * Initialized in sched_init_granularity(): 59 * Initialized in sched_init_granularity() [to 5 times the base granularity]:
61 */ 60 */
62unsigned int sysctl_sched_runtime_limit __read_mostly; 61unsigned int sysctl_sched_runtime_limit __read_mostly;
63 62
@@ -304,9 +303,9 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr)
304 delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw); 303 delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw);
305 304
306 if (cfs_rq->sleeper_bonus > sysctl_sched_granularity) { 305 if (cfs_rq->sleeper_bonus > sysctl_sched_granularity) {
307 delta = min(cfs_rq->sleeper_bonus, (u64)delta_exec); 306 delta = min((u64)delta_mine, cfs_rq->sleeper_bonus);
308 delta = calc_delta_mine(delta, curr->load.weight, lw); 307 delta = min(delta, (unsigned long)(
309 delta = min((u64)delta, cfs_rq->sleeper_bonus); 308 (long)sysctl_sched_runtime_limit - curr->wait_runtime));
310 cfs_rq->sleeper_bonus -= delta; 309 cfs_rq->sleeper_bonus -= delta;
311 delta_mine -= delta; 310 delta_mine -= delta;
312 } 311 }
@@ -494,6 +493,13 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
494 unsigned long load = cfs_rq->load.weight, delta_fair; 493 unsigned long load = cfs_rq->load.weight, delta_fair;
495 long prev_runtime; 494 long prev_runtime;
496 495
496 /*
497 * Do not boost sleepers if there's too much bonus 'in flight'
498 * already:
499 */
500 if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit))
501 return;
502
497 if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG) 503 if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG)
498 load = rq_of(cfs_rq)->cpu_load[2]; 504 load = rq_of(cfs_rq)->cpu_load[2];
499 505
@@ -513,16 +519,13 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
513 519
514 prev_runtime = se->wait_runtime; 520 prev_runtime = se->wait_runtime;
515 __add_wait_runtime(cfs_rq, se, delta_fair); 521 __add_wait_runtime(cfs_rq, se, delta_fair);
522 schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
516 delta_fair = se->wait_runtime - prev_runtime; 523 delta_fair = se->wait_runtime - prev_runtime;
517 524
518 /* 525 /*
519 * Track the amount of bonus we've given to sleepers: 526 * Track the amount of bonus we've given to sleepers:
520 */ 527 */
521 cfs_rq->sleeper_bonus += delta_fair; 528 cfs_rq->sleeper_bonus += delta_fair;
522 if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit))
523 cfs_rq->sleeper_bonus = sysctl_sched_runtime_limit;
524
525 schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
526} 529}
527 530
528static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) 531static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -1044,7 +1047,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
1044 * -granularity/2, so initialize the task with that: 1047 * -granularity/2, so initialize the task with that:
1045 */ 1048 */
1046 if (sysctl_sched_features & SCHED_FEAT_START_DEBIT) 1049 if (sysctl_sched_features & SCHED_FEAT_START_DEBIT)
1047 p->se.wait_runtime = -(sysctl_sched_granularity / 2); 1050 p->se.wait_runtime = -((long)sysctl_sched_granularity / 2);
1048 1051
1049 __enqueue_entity(cfs_rq, se); 1052 __enqueue_entity(cfs_rq, se);
1050} 1053}
@@ -1057,7 +1060,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
1057 */ 1060 */
1058static void set_curr_task_fair(struct rq *rq) 1061static void set_curr_task_fair(struct rq *rq)
1059{ 1062{
1060 struct sched_entity *se = &rq->curr.se; 1063 struct sched_entity *se = &rq->curr->se;
1061 1064
1062 for_each_sched_entity(se) 1065 for_each_sched_entity(se)
1063 set_next_entity(cfs_rq_of(se), se); 1066 set_next_entity(cfs_rq_of(se), se);