aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/math64.h19
-rw-r--r--kernel/sched/cputime.c80
-rw-r--r--lib/div64.c19
3 files changed, 58 insertions, 60 deletions
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 931a619407bf..b8ba85544721 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -30,15 +30,6 @@ static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
30} 30}
31 31
32/** 32/**
33 * div64_u64_rem - unsigned 64bit divide with 64bit divisor
34 */
35static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
36{
37 *remainder = dividend % divisor;
38 return dividend / divisor;
39}
40
41/**
42 * div64_u64 - unsigned 64bit divide with 64bit divisor 33 * div64_u64 - unsigned 64bit divide with 64bit divisor
43 */ 34 */
44static inline u64 div64_u64(u64 dividend, u64 divisor) 35static inline u64 div64_u64(u64 dividend, u64 divisor)
@@ -70,16 +61,8 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
70extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder); 61extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
71#endif 62#endif
72 63
73#ifndef div64_u64_rem
74extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
75#endif
76
77#ifndef div64_u64 64#ifndef div64_u64
78static inline u64 div64_u64(u64 dividend, u64 divisor) 65extern u64 div64_u64(u64 dividend, u64 divisor);
79{
80 u64 remainder;
81 return div64_u64_rem(dividend, divisor, &remainder);
82}
83#endif 66#endif
84 67
85#ifndef div64_s64 68#ifndef div64_s64
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index ea32f02bf2c3..cc2dc3eea8a3 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -506,34 +506,47 @@ void account_idle_ticks(unsigned long ticks)
506} 506}
507 507
508/* 508/*
509 * Perform (stime * rtime) / total with reduced chances 509 * Perform (stime * rtime) / total, but avoid multiplication overflow by
510 * of multiplication overflows by using smaller factors 510 * loosing precision when the numbers are big.
511 * like quotient and remainders of divisions between
512 * rtime and total.
513 */ 511 */
514static cputime_t scale_stime(u64 stime, u64 rtime, u64 total) 512static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
515{ 513{
516 u64 rem, res, scaled; 514 u64 scaled;
517 515
518 if (rtime >= total) { 516 for (;;) {
519 /* 517 /* Make sure "rtime" is the bigger of stime/rtime */
520 * Scale up to rtime / total then add 518 if (stime > rtime) {
521 * the remainder scaled to stime / total. 519 u64 tmp = rtime; rtime = stime; stime = tmp;
522 */ 520 }
523 res = div64_u64_rem(rtime, total, &rem); 521
524 scaled = stime * res; 522 /* Make sure 'total' fits in 32 bits */
525 scaled += div64_u64(stime * rem, total); 523 if (total >> 32)
526 } else { 524 goto drop_precision;
527 /* 525
528 * Same in reverse: scale down to total / rtime 526 /* Does rtime (and thus stime) fit in 32 bits? */
529 * then substract that result scaled to 527 if (!(rtime >> 32))
530 * to the remaining part. 528 break;
531 */ 529
532 res = div64_u64_rem(total, rtime, &rem); 530 /* Can we just balance rtime/stime rather than dropping bits? */
533 scaled = div64_u64(stime, res); 531 if (stime >> 31)
534 scaled -= div64_u64(scaled * rem, total); 532 goto drop_precision;
533
534 /* We can grow stime and shrink rtime and try to make them both fit */
535 stime <<= 1;
536 rtime >>= 1;
537 continue;
538
539drop_precision:
540 /* We drop from rtime, it has more bits than stime */
541 rtime >>= 1;
542 total >>= 1;
535 } 543 }
536 544
545 /*
546 * Make sure gcc understands that this is a 32x32->64 multiply,
547 * followed by a 64/32->64 divide.
548 */
549 scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
537 return (__force cputime_t) scaled; 550 return (__force cputime_t) scaled;
538} 551}
539 552
@@ -545,7 +558,7 @@ static void cputime_adjust(struct task_cputime *curr,
545 struct cputime *prev, 558 struct cputime *prev,
546 cputime_t *ut, cputime_t *st) 559 cputime_t *ut, cputime_t *st)
547{ 560{
548 cputime_t rtime, stime, total; 561 cputime_t rtime, stime, utime, total;
549 562
550 if (vtime_accounting_enabled()) { 563 if (vtime_accounting_enabled()) {
551 *ut = curr->utime; 564 *ut = curr->utime;
@@ -568,13 +581,21 @@ static void cputime_adjust(struct task_cputime *curr,
568 */ 581 */
569 rtime = nsecs_to_cputime(curr->sum_exec_runtime); 582 rtime = nsecs_to_cputime(curr->sum_exec_runtime);
570 583
571 if (!rtime) { 584 /*
572 stime = 0; 585 * Update userspace visible utime/stime values only if actual execution
573 } else if (!total) { 586 * time is bigger than already exported. Note that can happen, that we
574 stime = rtime; 587 * provided bigger values due to scaling inaccuracy on big numbers.
575 } else { 588 */
589 if (prev->stime + prev->utime >= rtime)
590 goto out;
591
592 if (total) {
576 stime = scale_stime((__force u64)stime, 593 stime = scale_stime((__force u64)stime,
577 (__force u64)rtime, (__force u64)total); 594 (__force u64)rtime, (__force u64)total);
595 utime = rtime - stime;
596 } else {
597 stime = rtime;
598 utime = 0;
578 } 599 }
579 600
580 /* 601 /*
@@ -583,8 +604,9 @@ static void cputime_adjust(struct task_cputime *curr,
583 * Let's enforce monotonicity. 604 * Let's enforce monotonicity.
584 */ 605 */
585 prev->stime = max(prev->stime, stime); 606 prev->stime = max(prev->stime, stime);
586 prev->utime = max(prev->utime, rtime - prev->stime); 607 prev->utime = max(prev->utime, utime);
587 608
609out:
588 *ut = prev->utime; 610 *ut = prev->utime;
589 *st = prev->stime; 611 *st = prev->stime;
590} 612}
diff --git a/lib/div64.c b/lib/div64.c
index 3af5728d95fd..a163b6caef73 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -79,10 +79,9 @@ EXPORT_SYMBOL(div_s64_rem);
79#endif 79#endif
80 80
81/** 81/**
82 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and 64bit remainder 82 * div64_u64 - unsigned 64bit divide with 64bit divisor
83 * @dividend: 64bit dividend 83 * @dividend: 64bit dividend
84 * @divisor: 64bit divisor 84 * @divisor: 64bit divisor
85 * @remainder: 64bit remainder
86 * 85 *
87 * This implementation is a modified version of the algorithm proposed 86 * This implementation is a modified version of the algorithm proposed
88 * by the book 'Hacker's Delight'. The original source and full proof 87 * by the book 'Hacker's Delight'. The original source and full proof
@@ -90,33 +89,27 @@ EXPORT_SYMBOL(div_s64_rem);
90 * 89 *
91 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt' 90 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
92 */ 91 */
93#ifndef div64_u64_rem 92#ifndef div64_u64
94u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) 93u64 div64_u64(u64 dividend, u64 divisor)
95{ 94{
96 u32 high = divisor >> 32; 95 u32 high = divisor >> 32;
97 u64 quot; 96 u64 quot;
98 97
99 if (high == 0) { 98 if (high == 0) {
100 u32 rem32; 99 quot = div_u64(dividend, divisor);
101 quot = div_u64_rem(dividend, divisor, &rem32);
102 *remainder = rem32;
103 } else { 100 } else {
104 int n = 1 + fls(high); 101 int n = 1 + fls(high);
105 quot = div_u64(dividend >> n, divisor >> n); 102 quot = div_u64(dividend >> n, divisor >> n);
106 103
107 if (quot != 0) 104 if (quot != 0)
108 quot--; 105 quot--;
109 106 if ((dividend - quot * divisor) >= divisor)
110 *remainder = dividend - quot * divisor;
111 if (*remainder >= divisor) {
112 quot++; 107 quot++;
113 *remainder -= divisor;
114 }
115 } 108 }
116 109
117 return quot; 110 return quot;
118} 111}
119EXPORT_SYMBOL(div64_u64_rem); 112EXPORT_SYMBOL(div64_u64);
120#endif 113#endif
121 114
122/** 115/**