aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2005-06-29 18:01:02 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2005-06-29 18:01:02 -0400
commit438a76167959061e371025f727fabec2ad9e70a7 (patch)
tree47991373507725b1307ab084a7d7bda5dd9ee1be
parentb3402cf50efead37dd9553b90fbf1486e09fb78e (diff)
[PATCH] ARM: Fix VFP to use do_div()
VFP used __divdi3 64-bit division needlessly. Convert it to use our 64-bit by 32-bit division instead. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/vfp/vfp.h15
-rw-r--r--arch/arm/vfp/vfpdouble.c2
-rw-r--r--arch/arm/vfp/vfpsingle.c14
3 files changed, 27 insertions, 4 deletions
diff --git a/arch/arm/vfp/vfp.h b/arch/arm/vfp/vfp.h
index 55a02bc994a3..4b97950984e9 100644
--- a/arch/arm/vfp/vfp.h
+++ b/arch/arm/vfp/vfp.h
@@ -117,7 +117,13 @@ static inline u64 vfp_estimate_div128to64(u64 nh, u64 nl, u64 m)
117 if (nh >= m) 117 if (nh >= m)
118 return ~0ULL; 118 return ~0ULL;
119 mh = m >> 32; 119 mh = m >> 32;
120 z = (mh << 32 <= nh) ? 0xffffffff00000000ULL : (nh / mh) << 32; 120 if (mh << 32 <= nh) {
121 z = 0xffffffff00000000ULL;
122 } else {
123 z = nh;
124 do_div(z, mh);
125 z <<= 32;
126 }
121 mul64to128(&termh, &terml, m, z); 127 mul64to128(&termh, &terml, m, z);
122 sub128(&remh, &reml, nh, nl, termh, terml); 128 sub128(&remh, &reml, nh, nl, termh, terml);
123 ml = m << 32; 129 ml = m << 32;
@@ -126,7 +132,12 @@ static inline u64 vfp_estimate_div128to64(u64 nh, u64 nl, u64 m)
126 add128(&remh, &reml, remh, reml, mh, ml); 132 add128(&remh, &reml, remh, reml, mh, ml);
127 } 133 }
128 remh = (remh << 32) | (reml >> 32); 134 remh = (remh << 32) | (reml >> 32);
129 z |= (mh << 32 <= remh) ? 0xffffffff : remh / mh; 135 if (mh << 32 <= remh) {
136 z |= 0xffffffff;
137 } else {
138 do_div(remh, mh);
139 z |= remh;
140 }
130 return z; 141 return z;
131} 142}
132 143
diff --git a/arch/arm/vfp/vfpdouble.c b/arch/arm/vfp/vfpdouble.c
index fa3053e84db5..b801cd66b6ea 100644
--- a/arch/arm/vfp/vfpdouble.c
+++ b/arch/arm/vfp/vfpdouble.c
@@ -32,6 +32,8 @@
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/bitops.h> 34#include <linux/bitops.h>
35
36#include <asm/div64.h>
35#include <asm/ptrace.h> 37#include <asm/ptrace.h>
36#include <asm/vfp.h> 38#include <asm/vfp.h>
37 39
diff --git a/arch/arm/vfp/vfpsingle.c b/arch/arm/vfp/vfpsingle.c
index 6849fe35cb2e..14dd696ddeb1 100644
--- a/arch/arm/vfp/vfpsingle.c
+++ b/arch/arm/vfp/vfpsingle.c
@@ -32,6 +32,8 @@
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/bitops.h> 34#include <linux/bitops.h>
35
36#include <asm/div64.h>
35#include <asm/ptrace.h> 37#include <asm/ptrace.h>
36#include <asm/vfp.h> 38#include <asm/vfp.h>
37 39
@@ -303,7 +305,11 @@ u32 vfp_estimate_sqrt_significand(u32 exponent, u32 significand)
303 if (z <= a) 305 if (z <= a)
304 return (s32)a >> 1; 306 return (s32)a >> 1;
305 } 307 }
306 return (u32)(((u64)a << 31) / z) + (z >> 1); 308 {
309 u64 v = (u64)a << 31;
310 do_div(v, z);
311 return v + (z >> 1);
312 }
307} 313}
308 314
309static u32 vfp_single_fsqrt(int sd, int unused, s32 m, u32 fpscr) 315static u32 vfp_single_fsqrt(int sd, int unused, s32 m, u32 fpscr)
@@ -1107,7 +1113,11 @@ static u32 vfp_single_fdiv(int sd, int sn, s32 m, u32 fpscr)
1107 vsn.significand >>= 1; 1113 vsn.significand >>= 1;
1108 vsd.exponent++; 1114 vsd.exponent++;
1109 } 1115 }
1110 vsd.significand = ((u64)vsn.significand << 32) / vsm.significand; 1116 {
1117 u64 significand = (u64)vsn.significand << 32;
1118 do_div(significand, vsm.significand);
1119 vsd.significand = significand;
1120 }
1111 if ((vsd.significand & 0x3f) == 0) 1121 if ((vsd.significand & 0x3f) == 0)
1112 vsd.significand |= ((u64)vsm.significand * vsd.significand != (u64)vsn.significand << 32); 1122 vsd.significand |= ((u64)vsm.significand * vsd.significand != (u64)vsn.significand << 32);
1113 1123