diff options
author | Stanislaw Gruszka <sgruszka@redhat.com> | 2013-04-30 05:35:06 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-04-30 13:13:05 -0400 |
commit | 68aa8efcd1ab961e4684ef5af32f72a6ec1911de (patch) | |
tree | ccf6ffe680a1e85f9319c70dcf45f1bba81243e9 /kernel/sched | |
parent | 772c808a252594692972773f6ee41c289b8e0b2a (diff) |
sched: Avoid prev->stime underflow
Dave Hansen reported strange utime/stime values on his system:
https://lkml.org/lkml/2013/4/4/435
This happens because prev->stime value is bigger than rtime
value. Root of the problem are non-monotonic rtime values (i.e.
current rtime is smaller than previous rtime) and that should be
debugged and fixed.
But since problem did not manifest itself before commit
62188451f0d63add7ad0cd2a1ae269d600c1663d "cputime: Avoid
multiplication overflow on utime scaling", it should be threated
as regression, which we can easily fixed on cputime_adjust()
function.
For now, let's apply this fix, but further work is needed to fix
root of the problem.
Reported-and-tested-by: Dave Hansen <dave@sr71.net>
Cc: <stable@vger.kernel.org> # 3.9+
Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: rostedt@goodmis.org
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Dave Hansen <dave@sr71.net>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1367314507-9728-3-git-send-email-sgruszka@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/cputime.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 1b7c2161f5cd..337a36745800 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c | |||
@@ -558,7 +558,7 @@ static void cputime_adjust(struct task_cputime *curr, | |||
558 | struct cputime *prev, | 558 | struct cputime *prev, |
559 | cputime_t *ut, cputime_t *st) | 559 | cputime_t *ut, cputime_t *st) |
560 | { | 560 | { |
561 | cputime_t rtime, stime, total; | 561 | cputime_t rtime, stime, utime, total; |
562 | 562 | ||
563 | if (vtime_accounting_enabled()) { | 563 | if (vtime_accounting_enabled()) { |
564 | *ut = curr->utime; | 564 | *ut = curr->utime; |
@@ -589,13 +589,13 @@ static void cputime_adjust(struct task_cputime *curr, | |||
589 | if (prev->stime + prev->utime >= rtime) | 589 | if (prev->stime + prev->utime >= rtime) |
590 | goto out; | 590 | goto out; |
591 | 591 | ||
592 | if (!rtime) { | 592 | if (total) { |
593 | stime = 0; | ||
594 | } else if (!total) { | ||
595 | stime = rtime; | ||
596 | } else { | ||
597 | stime = scale_stime((__force u64)stime, | 593 | stime = scale_stime((__force u64)stime, |
598 | (__force u64)rtime, (__force u64)total); | 594 | (__force u64)rtime, (__force u64)total); |
595 | utime = rtime - stime; | ||
596 | } else { | ||
597 | stime = rtime; | ||
598 | utime = 0; | ||
599 | } | 599 | } |
600 | 600 | ||
601 | /* | 601 | /* |
@@ -604,7 +604,7 @@ static void cputime_adjust(struct task_cputime *curr, | |||
604 | * Let's enforce monotonicity. | 604 | * Let's enforce monotonicity. |
605 | */ | 605 | */ |
606 | prev->stime = max(prev->stime, stime); | 606 | prev->stime = max(prev->stime, stime); |
607 | prev->utime = max(prev->utime, rtime - prev->stime); | 607 | prev->utime = max(prev->utime, utime); |
608 | 608 | ||
609 | out: | 609 | out: |
610 | *ut = prev->utime; | 610 | *ut = prev->utime; |