diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:06 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 11:00:06 -0400 |
commit | e22f5bbf86d8cce710d5c8ba5bf57832e73aab8c (patch) | |
tree | 9e6240455f123da6249fe0a88ba51459488f2e87 /kernel | |
parent | 495eca494aa6006df55e3a04e105462c5940ca17 (diff) |
sched: remove wait_runtime limit
remove the wait_runtime-limit fields and the code depending on it, now
that the math has been changed over to rely on the vruntime metric.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Mike Galbraith <efault@gmx.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 18 | ||||
-rw-r--r-- | kernel/sched_debug.c | 2 | ||||
-rw-r--r-- | kernel/sched_fair.c | 97 | ||||
-rw-r--r-- | kernel/sysctl.c | 11 |
4 files changed, 5 insertions, 123 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 3a4ac0b75f2d..21cc3b2be023 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -180,7 +180,6 @@ struct cfs_rq { | |||
180 | u64 exec_clock; | 180 | u64 exec_clock; |
181 | u64 min_vruntime; | 181 | u64 min_vruntime; |
182 | s64 wait_runtime; | 182 | s64 wait_runtime; |
183 | u64 sleeper_bonus; | ||
184 | unsigned long wait_runtime_overruns, wait_runtime_underruns; | 183 | unsigned long wait_runtime_overruns, wait_runtime_underruns; |
185 | 184 | ||
186 | struct rb_root tasks_timeline; | 185 | struct rb_root tasks_timeline; |
@@ -673,19 +672,6 @@ static inline void resched_task(struct task_struct *p) | |||
673 | } | 672 | } |
674 | #endif | 673 | #endif |
675 | 674 | ||
676 | static u64 div64_likely32(u64 divident, unsigned long divisor) | ||
677 | { | ||
678 | #if BITS_PER_LONG == 32 | ||
679 | if (likely(divident <= 0xffffffffULL)) | ||
680 | return (u32)divident / divisor; | ||
681 | do_div(divident, divisor); | ||
682 | |||
683 | return divident; | ||
684 | #else | ||
685 | return divident / divisor; | ||
686 | #endif | ||
687 | } | ||
688 | |||
689 | #if BITS_PER_LONG == 32 | 675 | #if BITS_PER_LONG == 32 |
690 | # define WMULT_CONST (~0UL) | 676 | # define WMULT_CONST (~0UL) |
691 | #else | 677 | #else |
@@ -1016,8 +1002,6 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
1016 | 1002 | ||
1017 | if (p->se.wait_start_fair) | 1003 | if (p->se.wait_start_fair) |
1018 | p->se.wait_start_fair -= fair_clock_offset; | 1004 | p->se.wait_start_fair -= fair_clock_offset; |
1019 | if (p->se.sleep_start_fair) | ||
1020 | p->se.sleep_start_fair -= fair_clock_offset; | ||
1021 | 1005 | ||
1022 | #ifdef CONFIG_SCHEDSTATS | 1006 | #ifdef CONFIG_SCHEDSTATS |
1023 | if (p->se.wait_start) | 1007 | if (p->se.wait_start) |
@@ -1592,7 +1576,6 @@ static void __sched_fork(struct task_struct *p) | |||
1592 | p->se.sum_exec_runtime = 0; | 1576 | p->se.sum_exec_runtime = 0; |
1593 | p->se.prev_sum_exec_runtime = 0; | 1577 | p->se.prev_sum_exec_runtime = 0; |
1594 | p->se.wait_runtime = 0; | 1578 | p->se.wait_runtime = 0; |
1595 | p->se.sleep_start_fair = 0; | ||
1596 | 1579 | ||
1597 | #ifdef CONFIG_SCHEDSTATS | 1580 | #ifdef CONFIG_SCHEDSTATS |
1598 | p->se.wait_start = 0; | 1581 | p->se.wait_start = 0; |
@@ -6582,7 +6565,6 @@ void normalize_rt_tasks(void) | |||
6582 | p->se.wait_runtime = 0; | 6565 | p->se.wait_runtime = 0; |
6583 | p->se.exec_start = 0; | 6566 | p->se.exec_start = 0; |
6584 | p->se.wait_start_fair = 0; | 6567 | p->se.wait_start_fair = 0; |
6585 | p->se.sleep_start_fair = 0; | ||
6586 | #ifdef CONFIG_SCHEDSTATS | 6568 | #ifdef CONFIG_SCHEDSTATS |
6587 | p->se.wait_start = 0; | 6569 | p->se.wait_start = 0; |
6588 | p->se.sleep_start = 0; | 6570 | p->se.sleep_start = 0; |
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 62965f0ae37c..3350169a7d2a 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -148,7 +148,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
148 | P(wait_runtime); | 148 | P(wait_runtime); |
149 | P(wait_runtime_overruns); | 149 | P(wait_runtime_overruns); |
150 | P(wait_runtime_underruns); | 150 | P(wait_runtime_underruns); |
151 | P(sleeper_bonus); | ||
152 | #undef P | 151 | #undef P |
153 | 152 | ||
154 | print_cfs_rq_runtime_sum(m, cpu, cfs_rq); | 153 | print_cfs_rq_runtime_sum(m, cpu, cfs_rq); |
@@ -272,7 +271,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) | |||
272 | P(se.wait_runtime); | 271 | P(se.wait_runtime); |
273 | P(se.wait_start_fair); | 272 | P(se.wait_start_fair); |
274 | P(se.exec_start); | 273 | P(se.exec_start); |
275 | P(se.sleep_start_fair); | ||
276 | P(se.vruntime); | 274 | P(se.vruntime); |
277 | P(se.sum_exec_runtime); | 275 | P(se.sum_exec_runtime); |
278 | 276 | ||
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 72f202a8be96..a94189c42d1a 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -249,41 +249,11 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
249 | return period; | 249 | return period; |
250 | } | 250 | } |
251 | 251 | ||
252 | static inline void | ||
253 | limit_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se) | ||
254 | { | ||
255 | long limit = sysctl_sched_runtime_limit; | ||
256 | |||
257 | /* | ||
258 | * Niced tasks have the same history dynamic range as | ||
259 | * non-niced tasks: | ||
260 | */ | ||
261 | if (unlikely(se->wait_runtime > limit)) { | ||
262 | se->wait_runtime = limit; | ||
263 | schedstat_inc(se, wait_runtime_overruns); | ||
264 | schedstat_inc(cfs_rq, wait_runtime_overruns); | ||
265 | } | ||
266 | if (unlikely(se->wait_runtime < -limit)) { | ||
267 | se->wait_runtime = -limit; | ||
268 | schedstat_inc(se, wait_runtime_underruns); | ||
269 | schedstat_inc(cfs_rq, wait_runtime_underruns); | ||
270 | } | ||
271 | } | ||
272 | |||
273 | static inline void | ||
274 | __add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta) | ||
275 | { | ||
276 | se->wait_runtime += delta; | ||
277 | schedstat_add(se, sum_wait_runtime, delta); | ||
278 | limit_wait_runtime(cfs_rq, se); | ||
279 | } | ||
280 | |||
281 | static void | 252 | static void |
282 | add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta) | 253 | add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta) |
283 | { | 254 | { |
284 | schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime); | 255 | se->wait_runtime += delta; |
285 | __add_wait_runtime(cfs_rq, se, delta); | 256 | schedstat_add(cfs_rq, wait_runtime, delta); |
286 | schedstat_add(cfs_rq, wait_runtime, se->wait_runtime); | ||
287 | } | 257 | } |
288 | 258 | ||
289 | /* | 259 | /* |
@@ -294,7 +264,7 @@ static inline void | |||
294 | __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | 264 | __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, |
295 | unsigned long delta_exec) | 265 | unsigned long delta_exec) |
296 | { | 266 | { |
297 | unsigned long delta, delta_fair, delta_mine, delta_exec_weighted; | 267 | unsigned long delta_fair, delta_mine, delta_exec_weighted; |
298 | struct load_weight *lw = &cfs_rq->load; | 268 | struct load_weight *lw = &cfs_rq->load; |
299 | unsigned long load = lw->weight; | 269 | unsigned long load = lw->weight; |
300 | 270 | ||
@@ -318,14 +288,6 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, | |||
318 | delta_fair = calc_delta_fair(delta_exec, lw); | 288 | delta_fair = calc_delta_fair(delta_exec, lw); |
319 | delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw); | 289 | delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw); |
320 | 290 | ||
321 | if (cfs_rq->sleeper_bonus > sysctl_sched_min_granularity) { | ||
322 | delta = min((u64)delta_mine, cfs_rq->sleeper_bonus); | ||
323 | delta = min(delta, (unsigned long)( | ||
324 | (long)sysctl_sched_runtime_limit - curr->wait_runtime)); | ||
325 | cfs_rq->sleeper_bonus -= delta; | ||
326 | delta_mine -= delta; | ||
327 | } | ||
328 | |||
329 | cfs_rq->fair_clock += delta_fair; | 291 | cfs_rq->fair_clock += delta_fair; |
330 | /* | 292 | /* |
331 | * We executed delta_exec amount of time on the CPU, | 293 | * We executed delta_exec amount of time on the CPU, |
@@ -461,58 +423,8 @@ update_stats_curr_end(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
461 | * Scheduling class queueing methods: | 423 | * Scheduling class queueing methods: |
462 | */ | 424 | */ |
463 | 425 | ||
464 | static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se, | ||
465 | unsigned long delta_fair) | ||
466 | { | ||
467 | unsigned long load = cfs_rq->load.weight; | ||
468 | long prev_runtime; | ||
469 | |||
470 | /* | ||
471 | * Do not boost sleepers if there's too much bonus 'in flight' | ||
472 | * already: | ||
473 | */ | ||
474 | if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit)) | ||
475 | return; | ||
476 | |||
477 | if (sched_feat(SLEEPER_LOAD_AVG)) | ||
478 | load = rq_of(cfs_rq)->cpu_load[2]; | ||
479 | |||
480 | /* | ||
481 | * Fix up delta_fair with the effect of us running | ||
482 | * during the whole sleep period: | ||
483 | */ | ||
484 | if (sched_feat(SLEEPER_AVG)) | ||
485 | delta_fair = div64_likely32((u64)delta_fair * load, | ||
486 | load + se->load.weight); | ||
487 | |||
488 | delta_fair = calc_weighted(delta_fair, se); | ||
489 | |||
490 | prev_runtime = se->wait_runtime; | ||
491 | __add_wait_runtime(cfs_rq, se, delta_fair); | ||
492 | delta_fair = se->wait_runtime - prev_runtime; | ||
493 | |||
494 | /* | ||
495 | * Track the amount of bonus we've given to sleepers: | ||
496 | */ | ||
497 | cfs_rq->sleeper_bonus += delta_fair; | ||
498 | } | ||
499 | |||
500 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | 426 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) |
501 | { | 427 | { |
502 | struct task_struct *tsk = task_of(se); | ||
503 | unsigned long delta_fair; | ||
504 | |||
505 | if ((entity_is_task(se) && tsk->policy == SCHED_BATCH) || | ||
506 | !sched_feat(FAIR_SLEEPERS)) | ||
507 | return; | ||
508 | |||
509 | delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit), | ||
510 | (u64)(cfs_rq->fair_clock - se->sleep_start_fair)); | ||
511 | |||
512 | __enqueue_sleeper(cfs_rq, se, delta_fair); | ||
513 | |||
514 | se->sleep_start_fair = 0; | ||
515 | |||
516 | #ifdef CONFIG_SCHEDSTATS | 428 | #ifdef CONFIG_SCHEDSTATS |
517 | if (se->sleep_start) { | 429 | if (se->sleep_start) { |
518 | u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; | 430 | u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; |
@@ -544,6 +456,8 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
544 | * time that the task spent sleeping: | 456 | * time that the task spent sleeping: |
545 | */ | 457 | */ |
546 | if (unlikely(prof_on == SLEEP_PROFILING)) { | 458 | if (unlikely(prof_on == SLEEP_PROFILING)) { |
459 | struct task_struct *tsk = task_of(se); | ||
460 | |||
547 | profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk), | 461 | profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk), |
548 | delta >> 20); | 462 | delta >> 20); |
549 | } | 463 | } |
@@ -604,7 +518,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | |||
604 | { | 518 | { |
605 | update_stats_dequeue(cfs_rq, se); | 519 | update_stats_dequeue(cfs_rq, se); |
606 | if (sleep) { | 520 | if (sleep) { |
607 | se->sleep_start_fair = cfs_rq->fair_clock; | ||
608 | #ifdef CONFIG_SCHEDSTATS | 521 | #ifdef CONFIG_SCHEDSTATS |
609 | if (entity_is_task(se)) { | 522 | if (entity_is_task(se)) { |
610 | struct task_struct *tsk = task_of(se); | 523 | struct task_struct *tsk = task_of(se); |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 9b1b0d4ff966..97b15c27407f 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -266,17 +266,6 @@ static ctl_table kern_table[] = { | |||
266 | }, | 266 | }, |
267 | { | 267 | { |
268 | .ctl_name = CTL_UNNUMBERED, | 268 | .ctl_name = CTL_UNNUMBERED, |
269 | .procname = "sched_runtime_limit_ns", | ||
270 | .data = &sysctl_sched_runtime_limit, | ||
271 | .maxlen = sizeof(unsigned int), | ||
272 | .mode = 0644, | ||
273 | .proc_handler = &proc_dointvec_minmax, | ||
274 | .strategy = &sysctl_intvec, | ||
275 | .extra1 = &min_sched_granularity_ns, | ||
276 | .extra2 = &max_sched_granularity_ns, | ||
277 | }, | ||
278 | { | ||
279 | .ctl_name = CTL_UNNUMBERED, | ||
280 | .procname = "sched_child_runs_first", | 269 | .procname = "sched_child_runs_first", |
281 | .data = &sysctl_sched_child_runs_first, | 270 | .data = &sysctl_sched_child_runs_first, |
282 | .maxlen = sizeof(unsigned int), | 271 | .maxlen = sizeof(unsigned int), |