diff options
author | Stanislaw Gruszka <sgruszka@redhat.com> | 2013-09-04 09:16:03 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-09-04 10:31:25 -0400 |
commit | 5a8e01f8fa51f5cbce8f37acc050eb2319d12956 (patch) | |
tree | 01173781a3b8ea5583cc7d256e8c476c48aab1a9 /kernel/sched | |
parent | c2e7fcf53c3cb02b4ada1c66a9bc8a4d97d58aba (diff) |
sched/cputime: Do not scale when utime == 0
scale_stime() silently assumes that stime < rtime, otherwise
when stime == rtime and both values are big enough (operations
on them do not fit in 32 bits), the resulting scaling stime can
be bigger than rtime. In consequence utime = rtime - stime
results in negative value.
User space visible symptoms of the bug are overflowed TIME
values on ps/top, for example:
$ ps aux | grep rcu
root 8 0.0 0.0 0 0 ? S 12:42 0:00 [rcuc/0]
root 9 0.0 0.0 0 0 ? S 12:42 0:00 [rcub/0]
root 10 62422329 0.0 0 0 ? R 12:42 21114581:37 [rcu_preempt]
root 11 0.1 0.0 0 0 ? S 12:42 0:02 [rcuop/0]
root 12 62422329 0.0 0 0 ? S 12:42 21114581:35 [rcuop/1]
root 10 62422329 0.0 0 0 ? R 12:42 21114581:37 [rcu_preempt]
or overflowed utime values read directly from /proc/$PID/stat
Reference:
https://lkml.org/lkml/2013/8/20/259
Reported-and-tested-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
Cc: stable@vger.kernel.org
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Borislav Petkov <bp@alien8.de>
Link: http://lkml.kernel.org/r/20130904131602.GC2564@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/cputime.c | 19 |
1 files changed, 11 insertions, 8 deletions
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index c1d7493825ae..5b03f5bebabc 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c | |||
@@ -551,10 +551,7 @@ static void cputime_adjust(struct task_cputime *curr, | |||
551 | struct cputime *prev, | 551 | struct cputime *prev, |
552 | cputime_t *ut, cputime_t *st) | 552 | cputime_t *ut, cputime_t *st) |
553 | { | 553 | { |
554 | cputime_t rtime, stime, utime, total; | 554 | cputime_t rtime, stime, utime; |
555 | |||
556 | stime = curr->stime; | ||
557 | total = stime + curr->utime; | ||
558 | 555 | ||
559 | /* | 556 | /* |
560 | * Tick based cputime accounting depend on random scheduling | 557 | * Tick based cputime accounting depend on random scheduling |
@@ -576,13 +573,19 @@ static void cputime_adjust(struct task_cputime *curr, | |||
576 | if (prev->stime + prev->utime >= rtime) | 573 | if (prev->stime + prev->utime >= rtime) |
577 | goto out; | 574 | goto out; |
578 | 575 | ||
579 | if (total) { | 576 | stime = curr->stime; |
577 | utime = curr->utime; | ||
578 | |||
579 | if (utime == 0) { | ||
580 | stime = rtime; | ||
581 | } else if (stime == 0) { | ||
582 | utime = rtime; | ||
583 | } else { | ||
584 | cputime_t total = stime + utime; | ||
585 | |||
580 | stime = scale_stime((__force u64)stime, | 586 | stime = scale_stime((__force u64)stime, |
581 | (__force u64)rtime, (__force u64)total); | 587 | (__force u64)rtime, (__force u64)total); |
582 | utime = rtime - stime; | 588 | utime = rtime - stime; |
583 | } else { | ||
584 | stime = rtime; | ||
585 | utime = 0; | ||
586 | } | 589 | } |
587 | 590 | ||
588 | /* | 591 | /* |