aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 10:43:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 10:43:28 -0400
commit16fa94b532b1958f508e07eca1a9256351241fbc (patch)
tree90012a7b7fe2b8cf96f6f5ec12490e0c5e152291 /lib
parente0972916e8fe943f342b0dd1c9d43dbf5bc261c2 (diff)
parent25f55d9d01ad7a7ad248fd5af1d22675ffd202c5 (diff)
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes from Ingo Molnar: "The main changes in this development cycle were: - full dynticks preparatory work by Frederic Weisbecker - factor out the cpu time accounting code better, by Li Zefan - multi-CPU load balancer cleanups and improvements by Joonsoo Kim - various smaller fixes and cleanups" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (45 commits) sched: Fix init NOHZ_IDLE flag sched: Prevent to re-select dst-cpu in load_balance() sched: Rename load_balance_tmpmask to load_balance_mask sched: Move up affinity check to mitigate useless redoing overhead sched: Don't consider other cpus in our group in case of NEWLY_IDLE sched: Explicitly cpu_idle_type checking in rebalance_domains() sched: Change position of resched_cpu() in load_balance() sched: Fix wrong rq's runnable_avg update with rt tasks sched: Document task_struct::personality field sched/cpuacct/UML: Fix header file dependency bug on the UML build cgroup: Kill subsys.active flag sched/cpuacct: No need to check subsys active state sched/cpuacct: Initialize cpuacct subsystem earlier sched/cpuacct: Initialize root cpuacct earlier sched/cpuacct: Allocate per_cpu cpuusage for root cpuacct statically sched/cpuacct: Clean up cpuacct.h sched/cpuacct: Remove redundant NULL checks in cpuacct_acount_field() sched/cpuacct: Remove redundant NULL checks in cpuacct_charge() sched/cpuacct: Add cpuacct_acount_field() sched/cpuacct: Add cpuacct_init() ...
Diffstat (limited to 'lib')
-rw-r--r--lib/div64.c19
1 files changed, 13 insertions, 6 deletions
diff --git a/lib/div64.c b/lib/div64.c
index a163b6caef73..3af5728d95fd 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -79,9 +79,10 @@ EXPORT_SYMBOL(div_s64_rem);
79#endif 79#endif
80 80
81/** 81/**
82 * div64_u64 - unsigned 64bit divide with 64bit divisor 82 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and 64bit remainder
83 * @dividend: 64bit dividend 83 * @dividend: 64bit dividend
84 * @divisor: 64bit divisor 84 * @divisor: 64bit divisor
85 * @remainder: 64bit remainder
85 * 86 *
86 * This implementation is a modified version of the algorithm proposed 87 * This implementation is a modified version of the algorithm proposed
87 * by the book 'Hacker's Delight'. The original source and full proof 88 * by the book 'Hacker's Delight'. The original source and full proof
@@ -89,27 +90,33 @@ EXPORT_SYMBOL(div_s64_rem);
89 * 90 *
90 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt' 91 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
91 */ 92 */
92#ifndef div64_u64 93#ifndef div64_u64_rem
93u64 div64_u64(u64 dividend, u64 divisor) 94u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
94{ 95{
95 u32 high = divisor >> 32; 96 u32 high = divisor >> 32;
96 u64 quot; 97 u64 quot;
97 98
98 if (high == 0) { 99 if (high == 0) {
99 quot = div_u64(dividend, divisor); 100 u32 rem32;
101 quot = div_u64_rem(dividend, divisor, &rem32);
102 *remainder = rem32;
100 } else { 103 } else {
101 int n = 1 + fls(high); 104 int n = 1 + fls(high);
102 quot = div_u64(dividend >> n, divisor >> n); 105 quot = div_u64(dividend >> n, divisor >> n);
103 106
104 if (quot != 0) 107 if (quot != 0)
105 quot--; 108 quot--;
106 if ((dividend - quot * divisor) >= divisor) 109
110 *remainder = dividend - quot * divisor;
111 if (*remainder >= divisor) {
107 quot++; 112 quot++;
113 *remainder -= divisor;
114 }
108 } 115 }
109 116
110 return quot; 117 return quot;
111} 118}
112EXPORT_SYMBOL(div64_u64); 119EXPORT_SYMBOL(div64_u64_rem);
113#endif 120#endif
114 121
115/** 122/**