aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-01-14 06:39:18 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-15 06:00:08 -0500
commit831451ac4e44d3a20b581ce726ef1d1144373f7d (patch)
tree0ef28221132dc7cd30a2660bb4193fb86f3bb021 /kernel
parenta6525042bfdfcab128bd91fad264de10fd24a55e (diff)
sched: introduce avg_wakeup
Introduce a new avg_wakeup statistic. avg_wakeup is a measure of how frequently a task wakes up other tasks, it represents the average time between wakeups, with a limit of avg_runtime for when it doesn't wake up anybody. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c36
-rw-r--r--kernel/sched_debug.c1
2 files changed, 31 insertions, 6 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 8be2c13b50d0..86f5a063f0b9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1705,6 +1705,9 @@ static void update_avg(u64 *avg, u64 sample)
1705 1705
1706static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup) 1706static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
1707{ 1707{
1708 if (wakeup)
1709 p->se.start_runtime = p->se.sum_exec_runtime;
1710
1708 sched_info_queued(p); 1711 sched_info_queued(p);
1709 p->sched_class->enqueue_task(rq, p, wakeup); 1712 p->sched_class->enqueue_task(rq, p, wakeup);
1710 p->se.on_rq = 1; 1713 p->se.on_rq = 1;
@@ -1712,10 +1715,15 @@ static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
1712 1715
1713static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) 1716static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
1714{ 1717{
1715 if (sleep && p->se.last_wakeup) { 1718 if (sleep) {
1716 update_avg(&p->se.avg_overlap, 1719 if (p->se.last_wakeup) {
1717 p->se.sum_exec_runtime - p->se.last_wakeup); 1720 update_avg(&p->se.avg_overlap,
1718 p->se.last_wakeup = 0; 1721 p->se.sum_exec_runtime - p->se.last_wakeup);
1722 p->se.last_wakeup = 0;
1723 } else {
1724 update_avg(&p->se.avg_wakeup,
1725 sysctl_sched_wakeup_granularity);
1726 }
1719 } 1727 }
1720 1728
1721 sched_info_dequeued(p); 1729 sched_info_dequeued(p);
@@ -2345,6 +2353,22 @@ out_activate:
2345 activate_task(rq, p, 1); 2353 activate_task(rq, p, 1);
2346 success = 1; 2354 success = 1;
2347 2355
2356 /*
2357 * Only attribute actual wakeups done by this task.
2358 */
2359 if (!in_interrupt()) {
2360 struct sched_entity *se = &current->se;
2361 u64 sample = se->sum_exec_runtime;
2362
2363 if (se->last_wakeup)
2364 sample -= se->last_wakeup;
2365 else
2366 sample -= se->start_runtime;
2367 update_avg(&se->avg_wakeup, sample);
2368
2369 se->last_wakeup = se->sum_exec_runtime;
2370 }
2371
2348out_running: 2372out_running:
2349 trace_sched_wakeup(rq, p, success); 2373 trace_sched_wakeup(rq, p, success);
2350 check_preempt_curr(rq, p, sync); 2374 check_preempt_curr(rq, p, sync);
@@ -2355,8 +2379,6 @@ out_running:
2355 p->sched_class->task_wake_up(rq, p); 2379 p->sched_class->task_wake_up(rq, p);
2356#endif 2380#endif
2357out: 2381out:
2358 current->se.last_wakeup = current->se.sum_exec_runtime;
2359
2360 task_rq_unlock(rq, &flags); 2382 task_rq_unlock(rq, &flags);
2361 2383
2362 return success; 2384 return success;
@@ -2386,6 +2408,8 @@ static void __sched_fork(struct task_struct *p)
2386 p->se.prev_sum_exec_runtime = 0; 2408 p->se.prev_sum_exec_runtime = 0;
2387 p->se.last_wakeup = 0; 2409 p->se.last_wakeup = 0;
2388 p->se.avg_overlap = 0; 2410 p->se.avg_overlap = 0;
2411 p->se.start_runtime = 0;
2412 p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
2389 2413
2390#ifdef CONFIG_SCHEDSTATS 2414#ifdef CONFIG_SCHEDSTATS
2391 p->se.wait_start = 0; 2415 p->se.wait_start = 0;
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 16eeba4e4169..2b1260f0e800 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -397,6 +397,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
397 PN(se.vruntime); 397 PN(se.vruntime);
398 PN(se.sum_exec_runtime); 398 PN(se.sum_exec_runtime);
399 PN(se.avg_overlap); 399 PN(se.avg_overlap);
400 PN(se.avg_wakeup);
400 401
401 nr_switches = p->nvcsw + p->nivcsw; 402 nr_switches = p->nvcsw + p->nivcsw;
402 403