aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-08-09 05:16:45 -0400
committerIngo Molnar <mingo@elte.hu>2007-08-09 05:16:45 -0400
commit0915c4e89d311948b67cdd4c183a2efbcafcc9f9 (patch)
treefc6175a3d735f738a113be089a6e534e3a351b11 /kernel/sched_fair.c
parent4a2a4df7b6db25df8f3d5cc6dd0b096119359d92 (diff)
sched: batch sleeper bonus
batch up the sleeper bonus sum a bit more. Anything below sched-granularity is too small to make a practical difference anyway. this optimization reduces the math in high-frequency scheduling scenarios. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 6f579ff5a9bc..9f401588d509 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -300,7 +300,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, u64 now)
300 delta_fair = calc_delta_fair(delta_exec, lw); 300 delta_fair = calc_delta_fair(delta_exec, lw);
301 delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw); 301 delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw);
302 302
303 if (cfs_rq->sleeper_bonus > sysctl_sched_stat_granularity) { 303 if (cfs_rq->sleeper_bonus > sysctl_sched_granularity) {
304 delta = calc_delta_mine(cfs_rq->sleeper_bonus, 304 delta = calc_delta_mine(cfs_rq->sleeper_bonus,
305 curr->load.weight, lw); 305 curr->load.weight, lw);
306 if (unlikely(delta > cfs_rq->sleeper_bonus)) 306 if (unlikely(delta > cfs_rq->sleeper_bonus))