diff options
author | Mike Galbraith <efault@gmx.de> | 2010-03-11 11:15:38 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-03-11 12:32:50 -0500 |
commit | b42e0c41a422a212ddea0666d5a3a0e3c35206db (patch) | |
tree | 443cf5918548cab86c3f9f3f34a1b700d809070b /kernel/sched_fair.c | |
parent | 39c0cbe2150cbd848a25ba6cdb271d1ad46818ad (diff) |
sched: Remove avg_wakeup
Testing the load which led to this heuristic (nfs4 kbuild) shows that it has
outlived it's usefullness. With intervening load balancing changes, I cannot
see any difference with/without, so recover there fastpath cycles.
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1268301062.6785.29.camel@marge.simson.net>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 31 |
1 files changed, 0 insertions, 31 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 8ad164bbdac1..6fc62854422c 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1592,42 +1592,11 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag | |||
1592 | } | 1592 | } |
1593 | #endif /* CONFIG_SMP */ | 1593 | #endif /* CONFIG_SMP */ |
1594 | 1594 | ||
1595 | /* | ||
1596 | * Adaptive granularity | ||
1597 | * | ||
1598 | * se->avg_wakeup gives the average time a task runs until it does a wakeup, | ||
1599 | * with the limit of wakeup_gran -- when it never does a wakeup. | ||
1600 | * | ||
1601 | * So the smaller avg_wakeup is the faster we want this task to preempt, | ||
1602 | * but we don't want to treat the preemptee unfairly and therefore allow it | ||
1603 | * to run for at least the amount of time we'd like to run. | ||
1604 | * | ||
1605 | * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one | ||
1606 | * | ||
1607 | * NOTE: we use *nr_running to scale with load, this nicely matches the | ||
1608 | * degrading latency on load. | ||
1609 | */ | ||
1610 | static unsigned long | ||
1611 | adaptive_gran(struct sched_entity *curr, struct sched_entity *se) | ||
1612 | { | ||
1613 | u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; | ||
1614 | u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running; | ||
1615 | u64 gran = 0; | ||
1616 | |||
1617 | if (this_run < expected_wakeup) | ||
1618 | gran = expected_wakeup - this_run; | ||
1619 | |||
1620 | return min_t(s64, gran, sysctl_sched_wakeup_granularity); | ||
1621 | } | ||
1622 | |||
1623 | static unsigned long | 1595 | static unsigned long |
1624 | wakeup_gran(struct sched_entity *curr, struct sched_entity *se) | 1596 | wakeup_gran(struct sched_entity *curr, struct sched_entity *se) |
1625 | { | 1597 | { |
1626 | unsigned long gran = sysctl_sched_wakeup_granularity; | 1598 | unsigned long gran = sysctl_sched_wakeup_granularity; |
1627 | 1599 | ||
1628 | if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN)) | ||
1629 | gran = adaptive_gran(curr, se); | ||
1630 | |||
1631 | /* | 1600 | /* |
1632 | * Since its curr running now, convert the gran from real-time | 1601 | * Since its curr running now, convert the gran from real-time |
1633 | * to virtual-time in his units. | 1602 | * to virtual-time in his units. |