diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2012-11-28 11:00:57 -0500 |
---|---|---|
committer | Frederic Weisbecker <fweisbec@gmail.com> | 2012-11-28 11:08:20 -0500 |
commit | fa09205783d11cc05122ad6e4ce06074624b2c0c (patch) | |
tree | ee13b0a24856952b06b8ad92640ef90602fd4a86 | |
parent | d37f761dbd276790f70dcf73a287fde2c3464482 (diff) |
cputime: Comment cputime's adjusting code
The reason for the scaling and monotonicity correction performed
by cputime_adjust() may not be immediately clear to the reviewer.
Add some comments to explain what happens there.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
-rw-r--r-- | kernel/sched/cputime.c | 18 |
1 files changed, 16 insertions, 2 deletions
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 220fdc4db770..b7f731768625 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c | |||
@@ -516,6 +516,10 @@ static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total) | |||
516 | return (__force cputime_t) temp; | 516 | return (__force cputime_t) temp; |
517 | } | 517 | } |
518 | 518 | ||
519 | /* | ||
520 | * Adjust tick based cputime random precision against scheduler | ||
521 | * runtime accounting. | ||
522 | */ | ||
519 | static void cputime_adjust(struct task_cputime *curr, | 523 | static void cputime_adjust(struct task_cputime *curr, |
520 | struct cputime *prev, | 524 | struct cputime *prev, |
521 | cputime_t *ut, cputime_t *st) | 525 | cputime_t *ut, cputime_t *st) |
@@ -524,8 +528,16 @@ static void cputime_adjust(struct task_cputime *curr, | |||
524 | 528 | ||
525 | utime = curr->utime; | 529 | utime = curr->utime; |
526 | total = utime + curr->stime; | 530 | total = utime + curr->stime; |
531 | |||
527 | /* | 532 | /* |
528 | * Use CFS's precise accounting: | 533 | * Tick based cputime accounting depend on random scheduling |
534 | * timeslices of a task to be interrupted or not by the timer. | ||
535 | * Depending on these circumstances, the number of these interrupts | ||
536 | * may be over or under-optimistic, matching the real user and system | ||
537 | * cputime with a variable precision. | ||
538 | * | ||
539 | * Fix this by scaling these tick based values against the total | ||
540 | * runtime accounted by the CFS scheduler. | ||
529 | */ | 541 | */ |
530 | rtime = nsecs_to_cputime(curr->sum_exec_runtime); | 542 | rtime = nsecs_to_cputime(curr->sum_exec_runtime); |
531 | 543 | ||
@@ -535,7 +547,9 @@ static void cputime_adjust(struct task_cputime *curr, | |||
535 | utime = rtime; | 547 | utime = rtime; |
536 | 548 | ||
537 | /* | 549 | /* |
538 | * Compare with previous values, to keep monotonicity: | 550 | * If the tick based count grows faster than the scheduler one, |
551 | * the result of the scaling may go backward. | ||
552 | * Let's enforce monotonicity. | ||
539 | */ | 553 | */ |
540 | prev->utime = max(prev->utime, utime); | 554 | prev->utime = max(prev->utime, utime); |
541 | prev->stime = max(prev->stime, rtime - prev->utime); | 555 | prev->stime = max(prev->stime, rtime - prev->utime); |