aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-11-30 07:00:37 -0500
committerIngo Molnar <mingo@elte.hu>2009-12-09 04:03:07 -0500
commit6cecd084d0fd27bb1e498e2829fd45846d806856 (patch)
tree90cc079c942ad35669d1a33957a121c1cb3a88a6
parent3a7e73a2e26fffdbc46ba95fc0425418984f5140 (diff)
sched: Discard some old bits
WAKEUP_RUNNING was an experiment, not sure why that ever ended up being merged... Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/sched.c17
-rw-r--r--kernel/sched_debug.c1
-rw-r--r--kernel/sched_fair.c3
-rw-r--r--kernel/sched_features.h5
5 files changed, 7 insertions, 21 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 31d9dec78675..4b1ebd3280c6 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1152,8 +1152,6 @@ struct sched_entity {
1152 u64 start_runtime; 1152 u64 start_runtime;
1153 u64 avg_wakeup; 1153 u64 avg_wakeup;
1154 1154
1155 u64 avg_running;
1156
1157#ifdef CONFIG_SCHEDSTATS 1155#ifdef CONFIG_SCHEDSTATS
1158 u64 wait_start; 1156 u64 wait_start;
1159 u64 wait_max; 1157 u64 wait_max;
diff --git a/kernel/sched.c b/kernel/sched.c
index 33c903573132..0170735bdafc 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2493,7 +2493,6 @@ static void __sched_fork(struct task_struct *p)
2493 p->se.avg_overlap = 0; 2493 p->se.avg_overlap = 0;
2494 p->se.start_runtime = 0; 2494 p->se.start_runtime = 0;
2495 p->se.avg_wakeup = sysctl_sched_wakeup_granularity; 2495 p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
2496 p->se.avg_running = 0;
2497 2496
2498#ifdef CONFIG_SCHEDSTATS 2497#ifdef CONFIG_SCHEDSTATS
2499 p->se.wait_start = 0; 2498 p->se.wait_start = 0;
@@ -5379,13 +5378,14 @@ static inline void schedule_debug(struct task_struct *prev)
5379#endif 5378#endif
5380} 5379}
5381 5380
5382static void put_prev_task(struct rq *rq, struct task_struct *p) 5381static void put_prev_task(struct rq *rq, struct task_struct *prev)
5383{ 5382{
5384 u64 runtime = p->se.sum_exec_runtime - p->se.prev_sum_exec_runtime; 5383 if (prev->state == TASK_RUNNING) {
5384 u64 runtime = prev->se.sum_exec_runtime;
5385 5385
5386 update_avg(&p->se.avg_running, runtime); 5386 runtime -= prev->se.prev_sum_exec_runtime;
5387 runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
5387 5388
5388 if (p->state == TASK_RUNNING) {
5389 /* 5389 /*
5390 * In order to avoid avg_overlap growing stale when we are 5390 * In order to avoid avg_overlap growing stale when we are
5391 * indeed overlapping and hence not getting put to sleep, grow 5391 * indeed overlapping and hence not getting put to sleep, grow
@@ -5395,12 +5395,9 @@ static void put_prev_task(struct rq *rq, struct task_struct *p)
5395 * correlates to the amount of cache footprint a task can 5395 * correlates to the amount of cache footprint a task can
5396 * build up. 5396 * build up.
5397 */ 5397 */
5398 runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost); 5398 update_avg(&prev->se.avg_overlap, runtime);
5399 update_avg(&p->se.avg_overlap, runtime);
5400 } else {
5401 update_avg(&p->se.avg_running, 0);
5402 } 5399 }
5403 p->sched_class->put_prev_task(rq, p); 5400 prev->sched_class->put_prev_task(rq, prev);
5404} 5401}
5405 5402
5406/* 5403/*
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 6988cf08f705..5fda66615fee 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -399,7 +399,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
399 PN(se.sum_exec_runtime); 399 PN(se.sum_exec_runtime);
400 PN(se.avg_overlap); 400 PN(se.avg_overlap);
401 PN(se.avg_wakeup); 401 PN(se.avg_wakeup);
402 PN(se.avg_running);
403 402
404 nr_switches = p->nvcsw + p->nivcsw; 403 nr_switches = p->nvcsw + p->nivcsw;
405 404
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 76b5792c4198..e9f5daee12c7 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1689,9 +1689,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1689 pse->avg_overlap < sysctl_sched_migration_cost) 1689 pse->avg_overlap < sysctl_sched_migration_cost)
1690 goto preempt; 1690 goto preempt;
1691 1691
1692 if (sched_feat(WAKEUP_RUNNING) && pse->avg_running < se->avg_running)
1693 goto preempt;
1694
1695 if (!sched_feat(WAKEUP_PREEMPT)) 1692 if (!sched_feat(WAKEUP_PREEMPT))
1696 return; 1693 return;
1697 1694
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index 0d94083582c7..d5059fd761d9 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -54,11 +54,6 @@ SCHED_FEAT(WAKEUP_SYNC, 0)
54SCHED_FEAT(WAKEUP_OVERLAP, 0) 54SCHED_FEAT(WAKEUP_OVERLAP, 0)
55 55
56/* 56/*
57 * Wakeup preemption towards tasks that run short
58 */
59SCHED_FEAT(WAKEUP_RUNNING, 0)
60
61/*
62 * Use the SYNC wakeup hint, pipes and the likes use this to indicate 57 * Use the SYNC wakeup hint, pipes and the likes use this to indicate
63 * the remote end is likely to consume the data we just wrote, and 58 * the remote end is likely to consume the data we just wrote, and
64 * therefore has cache benefit from being placed on the same cpu, see 59 * therefore has cache benefit from being placed on the same cpu, see