diff options
author | Mike Galbraith <efault@gmx.de> | 2010-03-11 11:15:38 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-03-11 12:32:50 -0500 |
commit | b42e0c41a422a212ddea0666d5a3a0e3c35206db (patch) | |
tree | 443cf5918548cab86c3f9f3f34a1b700d809070b /kernel/sched.c | |
parent | 39c0cbe2150cbd848a25ba6cdb271d1ad46818ad (diff) |
sched: Remove avg_wakeup
Testing the load which led to this heuristic (nfs4 kbuild) shows that it has
outlived it's usefullness. With intervening load balancing changes, I cannot
see any difference with/without, so recover there fastpath cycles.
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1268301062.6785.29.camel@marge.simson.net>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 26 |
1 files changed, 4 insertions, 22 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 60b1bbe2ad1b..35a8626ace7d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -1880,9 +1880,6 @@ static void update_avg(u64 *avg, u64 sample) | |||
1880 | static void | 1880 | static void |
1881 | enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head) | 1881 | enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head) |
1882 | { | 1882 | { |
1883 | if (wakeup) | ||
1884 | p->se.start_runtime = p->se.sum_exec_runtime; | ||
1885 | |||
1886 | sched_info_queued(p); | 1883 | sched_info_queued(p); |
1887 | p->sched_class->enqueue_task(rq, p, wakeup, head); | 1884 | p->sched_class->enqueue_task(rq, p, wakeup, head); |
1888 | p->se.on_rq = 1; | 1885 | p->se.on_rq = 1; |
@@ -1890,17 +1887,11 @@ enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head) | |||
1890 | 1887 | ||
1891 | static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) | 1888 | static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) |
1892 | { | 1889 | { |
1893 | if (sleep) { | 1890 | if (sleep && p->se.last_wakeup) { |
1894 | if (p->se.last_wakeup) { | 1891 | update_avg(&p->se.avg_overlap, |
1895 | update_avg(&p->se.avg_overlap, | 1892 | p->se.sum_exec_runtime - p->se.last_wakeup); |
1896 | p->se.sum_exec_runtime - p->se.last_wakeup); | 1893 | p->se.last_wakeup = 0; |
1897 | p->se.last_wakeup = 0; | ||
1898 | } else { | ||
1899 | update_avg(&p->se.avg_wakeup, | ||
1900 | sysctl_sched_wakeup_granularity); | ||
1901 | } | ||
1902 | } | 1894 | } |
1903 | |||
1904 | sched_info_dequeued(p); | 1895 | sched_info_dequeued(p); |
1905 | p->sched_class->dequeue_task(rq, p, sleep); | 1896 | p->sched_class->dequeue_task(rq, p, sleep); |
1906 | p->se.on_rq = 0; | 1897 | p->se.on_rq = 0; |
@@ -2466,13 +2457,6 @@ out_activate: | |||
2466 | */ | 2457 | */ |
2467 | if (!in_interrupt()) { | 2458 | if (!in_interrupt()) { |
2468 | struct sched_entity *se = ¤t->se; | 2459 | struct sched_entity *se = ¤t->se; |
2469 | u64 sample = se->sum_exec_runtime; | ||
2470 | |||
2471 | if (se->last_wakeup) | ||
2472 | sample -= se->last_wakeup; | ||
2473 | else | ||
2474 | sample -= se->start_runtime; | ||
2475 | update_avg(&se->avg_wakeup, sample); | ||
2476 | 2460 | ||
2477 | se->last_wakeup = se->sum_exec_runtime; | 2461 | se->last_wakeup = se->sum_exec_runtime; |
2478 | } | 2462 | } |
@@ -2540,8 +2524,6 @@ static void __sched_fork(struct task_struct *p) | |||
2540 | p->se.nr_migrations = 0; | 2524 | p->se.nr_migrations = 0; |
2541 | p->se.last_wakeup = 0; | 2525 | p->se.last_wakeup = 0; |
2542 | p->se.avg_overlap = 0; | 2526 | p->se.avg_overlap = 0; |
2543 | p->se.start_runtime = 0; | ||
2544 | p->se.avg_wakeup = sysctl_sched_wakeup_granularity; | ||
2545 | 2527 | ||
2546 | #ifdef CONFIG_SCHEDSTATS | 2528 | #ifdef CONFIG_SCHEDSTATS |
2547 | memset(&p->se.statistics, 0, sizeof(p->se.statistics)); | 2529 | memset(&p->se.statistics, 0, sizeof(p->se.statistics)); |