diff options
Diffstat (limited to 'include/linux/math64.h')
-rw-r--r-- | include/linux/math64.h | 80 |
1 files changed, 80 insertions, 0 deletions
diff --git a/include/linux/math64.h b/include/linux/math64.h index c45c089bfdac..6e8b5b270ffe 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h | |||
@@ -142,6 +142,13 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) | |||
142 | } | 142 | } |
143 | #endif /* mul_u64_u32_shr */ | 143 | #endif /* mul_u64_u32_shr */ |
144 | 144 | ||
145 | #ifndef mul_u64_u64_shr | ||
146 | static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift) | ||
147 | { | ||
148 | return (u64)(((unsigned __int128)a * mul) >> shift); | ||
149 | } | ||
150 | #endif /* mul_u64_u64_shr */ | ||
151 | |||
145 | #else | 152 | #else |
146 | 153 | ||
147 | #ifndef mul_u64_u32_shr | 154 | #ifndef mul_u64_u32_shr |
@@ -161,6 +168,79 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) | |||
161 | } | 168 | } |
162 | #endif /* mul_u64_u32_shr */ | 169 | #endif /* mul_u64_u32_shr */ |
163 | 170 | ||
171 | #ifndef mul_u64_u64_shr | ||
172 | static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift) | ||
173 | { | ||
174 | union { | ||
175 | u64 ll; | ||
176 | struct { | ||
177 | #ifdef __BIG_ENDIAN | ||
178 | u32 high, low; | ||
179 | #else | ||
180 | u32 low, high; | ||
181 | #endif | ||
182 | } l; | ||
183 | } rl, rm, rn, rh, a0, b0; | ||
184 | u64 c; | ||
185 | |||
186 | a0.ll = a; | ||
187 | b0.ll = b; | ||
188 | |||
189 | rl.ll = (u64)a0.l.low * b0.l.low; | ||
190 | rm.ll = (u64)a0.l.low * b0.l.high; | ||
191 | rn.ll = (u64)a0.l.high * b0.l.low; | ||
192 | rh.ll = (u64)a0.l.high * b0.l.high; | ||
193 | |||
194 | /* | ||
195 | * Each of these lines computes a 64-bit intermediate result into "c", | ||
196 | * starting at bits 32-95. The low 32-bits go into the result of the | ||
197 | * multiplication, the high 32-bits are carried into the next step. | ||
198 | */ | ||
199 | rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low; | ||
200 | rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low; | ||
201 | rh.l.high = (c >> 32) + rh.l.high; | ||
202 | |||
203 | /* | ||
204 | * The 128-bit result of the multiplication is in rl.ll and rh.ll, | ||
205 | * shift it right and throw away the high part of the result. | ||
206 | */ | ||
207 | if (shift == 0) | ||
208 | return rl.ll; | ||
209 | if (shift < 64) | ||
210 | return (rl.ll >> shift) | (rh.ll << (64 - shift)); | ||
211 | return rh.ll >> (shift & 63); | ||
212 | } | ||
213 | #endif /* mul_u64_u64_shr */ | ||
214 | |||
164 | #endif | 215 | #endif |
165 | 216 | ||
217 | #ifndef mul_u64_u32_div | ||
218 | static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) | ||
219 | { | ||
220 | union { | ||
221 | u64 ll; | ||
222 | struct { | ||
223 | #ifdef __BIG_ENDIAN | ||
224 | u32 high, low; | ||
225 | #else | ||
226 | u32 low, high; | ||
227 | #endif | ||
228 | } l; | ||
229 | } u, rl, rh; | ||
230 | |||
231 | u.ll = a; | ||
232 | rl.ll = (u64)u.l.low * mul; | ||
233 | rh.ll = (u64)u.l.high * mul + rl.l.high; | ||
234 | |||
235 | /* Bits 32-63 of the result will be in rh.l.low. */ | ||
236 | rl.l.high = do_div(rh.ll, divisor); | ||
237 | |||
238 | /* Bits 0-31 of the result will be in rl.l.low. */ | ||
239 | do_div(rl.ll, divisor); | ||
240 | |||
241 | rl.l.high = rh.l.low; | ||
242 | return rl.ll; | ||
243 | } | ||
244 | #endif /* mul_u64_u32_div */ | ||
245 | |||
166 | #endif /* _LINUX_MATH64_H */ | 246 | #endif /* _LINUX_MATH64_H */ |