diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:06 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:06 -0400 |
commit | e22f5bbf86d8cce710d5c8ba5bf57832e73aab8c (patch) | |
tree | 9e6240455f123da6249fe0a88ba51459488f2e87 /kernel/sched_fair.c | |
parent | 495eca494aa6006df55e3a04e105462c5940ca17 (diff) |
sched: remove wait_runtime limit
remove the wait_runtime-limit fields and the code depending on it, now
that the math has been changed over to rely on the vruntime metric.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r-- | kernel/sched_fair.c | 97 |
1 files changed, 5 insertions, 92 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 72f202a8be96..a94189c42d1a 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -249,41 +249,11 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
249 | return period; | 249 | return period; |
250 | } | 250 | } |
251 | 251 | ||
252 | static inline void | ||
253 | limit_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
254 | { | ||
255 | long limit = sysctl_sched_runtime_limit; | ||
256 | |||
257 | /* | ||
258 | * Niced tasks have the same history dynamic range as | ||
259 | * non-niced tasks: | ||
260 | */ | ||
261 | if (unlikely(se->wait_runtime > limit)) { | ||
262 | se->wait_runtime = limit; | ||
263 | schedstat_inc(se, wait_runtime_overruns); | ||
264 | schedstat_inc(cfs_rq, wait_runtime_overruns); | ||
265 | } | ||
266 | if (unlikely(se->wait_runtime < -limit)) { | ||
267 | se->wait_runtime = -limit; | ||
268 | schedstat_inc(se, wait_runtime_underruns); | ||
269 | schedstat_inc(cfs_rq, wait_runtime_underruns); | ||
270 | } | ||
271 | } | ||
272 | |||
273 | static inline void | ||
274 | __add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta) | ||
275 | { | ||
276 | se->wait_runtime += delta; | ||
277 | schedstat_add(se, sum_wait_runtime, delta); | ||
278 | limit_wait_runtime(cfs_rq, se); | ||
279 | } | ||
280 | |||
281 | static void | 252 | static void |
282 | add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta) | 253 | add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta) |
283 | { | 254 | { |
284 | schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime); | 255 | se->wait_runtime += delta; |
285 | __add_wait_runtime(cfs_rq, se, delta); | 256 | schedstat_add(cfs_rq, wait_runtime, delta); |
286 | schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); | ||
287 | } | 257 | } |
288 | 258 | ||
289 | /* | 259 | /* |
@@ -294,7 +264,7 @@ static inline void | |||
294 | __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | 264 | __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, |
295 | unsigned long delta_exec) | 265 | unsigned long delta_exec) |
296 | { | 266 | { |
297 | unsigned long delta, delta_fair, delta_mine, delta_exec_weighted; | 267 | unsigned long delta_fair, delta_mine, delta_exec_weighted; |
298 | struct load_weight *lw = &cfs_rq->load; | 268 | struct load_weight *lw = &cfs_rq->load; |
299 | unsigned long load = lw->weight; | 269 | unsigned long load = lw->weight; |
300 | 270 | ||
@@ -318,14 +288,6 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | |||
318 | delta_fair = calc_delta_fair(delta_exec, lw); | 288 | delta_fair = calc_delta_fair(delta_exec, lw); |
319 | delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw); | 289 | delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw); |
320 | 290 | ||
321 | if (cfs_rq->sleeper_bonus > sysctl_sched_min_granularity) { | ||
322 | delta = min((u64)delta_mine, cfs_rq->sleeper_bonus); | ||
323 | delta = min(delta, (unsigned long)( | ||
324 | (long)sysctl_sched_runtime_limit - curr->wait_runtime)); | ||
325 | cfs_rq->sleeper_bonus -= delta; | ||
326 | delta_mine -= delta; | ||
327 | } | ||
328 | |||
329 | cfs_rq->fair_clock += delta_fair; | 291 | cfs_rq->fair_clock += delta_fair; |
330 | /* | 292 | /* |
331 | * We executed delta_exec amount of time on the CPU, | 293 | * We executed delta_exec amount of time on the CPU, |
@@ -461,58 +423,8 @@ update_stats_curr_end(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
461 | * Scheduling class queueing methods: | 423 | * Scheduling class queueing methods: |
462 | */ | 424 | */ |
463 | 425 | ||
464 | static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se, | ||
465 | unsigned long delta_fair) | ||
466 | { | ||
467 | unsigned long load = cfs_rq->load.weight; | ||
468 | long prev_runtime; | ||
469 | |||
470 | /* | ||
471 | * Do not boost sleepers if there's too much bonus 'in flight' | ||
472 | * already: | ||
473 | */ | ||
474 | if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit)) | ||
475 | return; | ||
476 | |||
477 | if (sched_feat(SLEEPER_LOAD_AVG)) | ||
478 | load = rq_of(cfs_rq)->cpu_load[2]; | ||
479 | |||
480 | /* | ||
481 | * Fix up delta_fair with the effect of us running | ||
482 | * during the whole sleep period: | ||
483 | */ | ||
484 | if (sched_feat(SLEEPER_AVG)) | ||
485 | delta_fair = div64_likely32((u64)delta_fair * load, | ||
486 | load + se->load.weight); | ||
487 | |||
488 | delta_fair = calc_weighted(delta_fair, se); | ||
489 | |||
490 | prev_runtime = se->wait_runtime; | ||
491 | __add_wait_runtime(cfs_rq, se, delta_fair); | ||
492 | delta_fair = se->wait_runtime - prev_runtime; | ||
493 | |||
494 | /* | ||
495 | * Track the amount of bonus we've given to sleepers: | ||
496 | */ | ||
497 | cfs_rq->sleeper_bonus += delta_fair; | ||
498 | } | ||
499 | |||
500 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | 426 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) |
501 | { | 427 | { |
502 | struct task_struct *tsk = task_of(se); | ||
503 | unsigned long delta_fair; | ||
504 | |||
505 | if ((entity_is_task(se) && tsk->policy == SCHED_BATCH) || | ||
506 | !sched_feat(FAIR_SLEEPERS)) | ||
507 | return; | ||
508 | |||
509 | delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit), | ||
510 | (u64)(cfs_rq->fair_clock - se->sleep_start_fair)); | ||
511 | |||
512 | __enqueue_sleeper(cfs_rq, se, delta_fair); | ||
513 | |||
514 | se->sleep_start_fair = 0; | ||
515 | |||
516 | #ifdef CONFIG_SCHEDSTATS | 428 | #ifdef CONFIG_SCHEDSTATS |
517 | if (se->sleep_start) { | 429 | if (se->sleep_start) { |
518 | u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; | 430 | u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; |
@@ -544,6 +456,8 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
544 | * time that the task spent sleeping: | 456 | * time that the task spent sleeping: |
545 | */ | 457 | */ |
546 | if (unlikely(prof_on == SLEEP_PROFILING)) { | 458 | if (unlikely(prof_on == SLEEP_PROFILING)) { |
459 | struct task_struct *tsk = task_of(se); | ||
460 | |||
547 | profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk), | 461 | profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk), |
548 | delta >> 20); | 462 | delta >> 20); |
549 | } | 463 | } |
@@ -604,7 +518,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | |||
604 | { | 518 | { |
605 | update_stats_dequeue(cfs_rq, se); | 519 | update_stats_dequeue(cfs_rq, se); |
606 | if (sleep) { | 520 | if (sleep) { |
607 | se->sleep_start_fair = cfs_rq->fair_clock; | ||
608 | #ifdef CONFIG_SCHEDSTATS | 521 | #ifdef CONFIG_SCHEDSTATS |
609 | if (entity_is_task(se)) { | 522 | if (entity_is_task(se)) { |
610 | struct task_struct *tsk = task_of(se); | 523 | struct task_struct *tsk = task_of(se); |