diff options
Diffstat (limited to 'kernel/sched/cputime.c')
-rw-r--r-- | kernel/sched/cputime.c | 46 |
1 files changed, 34 insertions, 12 deletions
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 024fe1998ad5..699d59756ece 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c | |||
@@ -521,18 +521,36 @@ void account_idle_ticks(unsigned long ticks) | |||
521 | account_idle_time(jiffies_to_cputime(ticks)); | 521 | account_idle_time(jiffies_to_cputime(ticks)); |
522 | } | 522 | } |
523 | 523 | ||
524 | static cputime_t scale_stime(cputime_t stime, cputime_t rtime, cputime_t total) | 524 | /* |
525 | * Perform (stime * rtime) / total with reduced chances | ||
526 | * of multiplication overflows by using smaller factors | ||
527 | * like quotient and remainders of divisions between | ||
528 | * rtime and total. | ||
529 | */ | ||
530 | static cputime_t scale_stime(u64 stime, u64 rtime, u64 total) | ||
525 | { | 531 | { |
526 | u64 temp = (__force u64) rtime; | 532 | u64 rem, res, scaled; |
527 | 533 | ||
528 | temp *= (__force u64) stime; | 534 | if (rtime >= total) { |
529 | 535 | /* | |
530 | if (sizeof(cputime_t) == 4) | 536 | * Scale up to rtime / total then add |
531 | temp = div_u64(temp, (__force u32) total); | 537 | * the remainder scaled to stime / total. |
532 | else | 538 | */ |
533 | temp = div64_u64(temp, (__force u64) total); | 539 | res = div64_u64_rem(rtime, total, &rem); |
540 | scaled = stime * res; | ||
541 | scaled += div64_u64(stime * rem, total); | ||
542 | } else { | ||
543 | /* | ||
544 | * Same in reverse: scale down to total / rtime | ||
545 | * then substract that result scaled to | ||
546 | * to the remaining part. | ||
547 | */ | ||
548 | res = div64_u64_rem(total, rtime, &rem); | ||
549 | scaled = div64_u64(stime, res); | ||
550 | scaled -= div64_u64(scaled * rem, total); | ||
551 | } | ||
534 | 552 | ||
535 | return (__force cputime_t) temp; | 553 | return (__force cputime_t) scaled; |
536 | } | 554 | } |
537 | 555 | ||
538 | /* | 556 | /* |
@@ -566,10 +584,14 @@ static void cputime_adjust(struct task_cputime *curr, | |||
566 | */ | 584 | */ |
567 | rtime = nsecs_to_cputime(curr->sum_exec_runtime); | 585 | rtime = nsecs_to_cputime(curr->sum_exec_runtime); |
568 | 586 | ||
569 | if (total) | 587 | if (!rtime) { |
570 | stime = scale_stime(stime, rtime, total); | 588 | stime = 0; |
571 | else | 589 | } else if (!total) { |
572 | stime = rtime; | 590 | stime = rtime; |
591 | } else { | ||
592 | stime = scale_stime((__force u64)stime, | ||
593 | (__force u64)rtime, (__force u64)total); | ||
594 | } | ||
573 | 595 | ||
574 | /* | 596 | /* |
575 | * If the tick based count grows faster than the scheduler one, | 597 | * If the tick based count grows faster than the scheduler one, |