aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-06-27 07:41:35 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-27 08:31:45 -0400
commit2398f2c6d34b43025f274fc42eaca34d23ec2320 (patch)
tree0ff3e9edf12c6b4485e4fa94f47a79b44d75376a /kernel/sched.c
parentcd80917e4ff465ea77106f8e4fb631eedc4cf426 (diff)
sched: update shares on wakeup
We found that the affine wakeup code needs rather accurate load figures to be effective. The trouble is that updating the load figures is fairly expensive with group scheduling. Therefore ratelimit the updating. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Cc: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c30
1 files changed, 29 insertions, 1 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 1cff969f6646..62db0891025a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -778,6 +778,12 @@ late_initcall(sched_init_debug);
778const_debug unsigned int sysctl_sched_nr_migrate = 32; 778const_debug unsigned int sysctl_sched_nr_migrate = 32;
779 779
780/* 780/*
781 * ratelimit for updating the group shares.
782 * default: 0.5ms
783 */
784const_debug unsigned int sysctl_sched_shares_ratelimit = 500000;
785
786/*
781 * period over which we measure -rt task cpu usage in us. 787 * period over which we measure -rt task cpu usage in us.
782 * default: 1s 788 * default: 1s
783 */ 789 */
@@ -1590,7 +1596,13 @@ tg_nop(struct task_group *tg, int cpu, struct sched_domain *sd)
1590 1596
1591static void update_shares(struct sched_domain *sd) 1597static void update_shares(struct sched_domain *sd)
1592{ 1598{
1593 walk_tg_tree(tg_nop, tg_shares_up, 0, sd); 1599 u64 now = cpu_clock(raw_smp_processor_id());
1600 s64 elapsed = now - sd->last_update;
1601
1602 if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
1603 sd->last_update = now;
1604 walk_tg_tree(tg_nop, tg_shares_up, 0, sd);
1605 }
1594} 1606}
1595 1607
1596static void update_shares_locked(struct rq *rq, struct sched_domain *sd) 1608static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
@@ -2199,6 +2211,22 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
2199 if (!sched_feat(SYNC_WAKEUPS)) 2211 if (!sched_feat(SYNC_WAKEUPS))
2200 sync = 0; 2212 sync = 0;
2201 2213
2214#ifdef CONFIG_SMP
2215 if (sched_feat(LB_WAKEUP_UPDATE)) {
2216 struct sched_domain *sd;
2217
2218 this_cpu = raw_smp_processor_id();
2219 cpu = task_cpu(p);
2220
2221 for_each_domain(this_cpu, sd) {
2222 if (cpu_isset(cpu, sd->span)) {
2223 update_shares(sd);
2224 break;
2225 }
2226 }
2227 }
2228#endif
2229
2202 smp_wmb(); 2230 smp_wmb();
2203 rq = task_rq_lock(p, &flags); 2231 rq = task_rq_lock(p, &flags);
2204 old_state = p->state; 2232 old_state = p->state;