aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/math-emu/sfp-util.h
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2006-09-28 10:56:43 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2006-09-28 10:56:43 -0400
commit94c12cc7d196bab34aaa98d38521549fa1e5ef76 (patch)
tree8e0cec0ed44445d74a2cb5160303d6b4dfb1bc31 /arch/s390/math-emu/sfp-util.h
parent25d83cbfaa44e1b9170c0941c3ef52ca39f54ccc (diff)
[S390] Inline assembly cleanup.
Major cleanup of all s390 inline assemblies. They now have a common coding style. Quite a few have been shortened, mainly by using register asm variables. Use of the EX_TABLE macro helps as well. The atomic ops, bit ops and locking inlines new use the Q-constraint if a newer gcc is used. That results in slightly better code. Thanks to Christian Borntraeger for proof reading the changes. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/math-emu/sfp-util.h')
-rw-r--r--arch/s390/math-emu/sfp-util.h73
1 files changed, 38 insertions, 35 deletions
diff --git a/arch/s390/math-emu/sfp-util.h b/arch/s390/math-emu/sfp-util.h
index ab556b600f73..5b6ca4570ea4 100644
--- a/arch/s390/math-emu/sfp-util.h
+++ b/arch/s390/math-emu/sfp-util.h
@@ -4,48 +4,51 @@
4#include <asm/byteorder.h> 4#include <asm/byteorder.h>
5 5
6#define add_ssaaaa(sh, sl, ah, al, bh, bl) ({ \ 6#define add_ssaaaa(sh, sl, ah, al, bh, bl) ({ \
7 unsigned int __sh = (ah); \ 7 unsigned int __sh = (ah); \
8 unsigned int __sl = (al); \ 8 unsigned int __sl = (al); \
9 __asm__ (" alr %1,%3\n" \ 9 asm volatile( \
10 " brc 12,0f\n" \ 10 " alr %1,%3\n" \
11 " ahi %0,1\n" \ 11 " brc 12,0f\n" \
12 "0: alr %0,%2" \ 12 " ahi %0,1\n" \
13 : "+&d" (__sh), "+d" (__sl) \ 13 "0: alr %0,%2" \
14 : "d" (bh), "d" (bl) : "cc" ); \ 14 : "+&d" (__sh), "+d" (__sl) \
15 (sh) = __sh; \ 15 : "d" (bh), "d" (bl) : "cc"); \
16 (sl) = __sl; \ 16 (sh) = __sh; \
17 (sl) = __sl; \
17}) 18})
18 19
19#define sub_ddmmss(sh, sl, ah, al, bh, bl) ({ \ 20#define sub_ddmmss(sh, sl, ah, al, bh, bl) ({ \
20 unsigned int __sh = (ah); \ 21 unsigned int __sh = (ah); \
21 unsigned int __sl = (al); \ 22 unsigned int __sl = (al); \
22 __asm__ (" slr %1,%3\n" \ 23 asm volatile( \
23 " brc 3,0f\n" \ 24 " slr %1,%3\n" \
24 " ahi %0,-1\n" \ 25 " brc 3,0f\n" \
25 "0: slr %0,%2" \ 26 " ahi %0,-1\n" \
26 : "+&d" (__sh), "+d" (__sl) \ 27 "0: slr %0,%2" \
27 : "d" (bh), "d" (bl) : "cc" ); \ 28 : "+&d" (__sh), "+d" (__sl) \
28 (sh) = __sh; \ 29 : "d" (bh), "d" (bl) : "cc"); \
29 (sl) = __sl; \ 30 (sh) = __sh; \
31 (sl) = __sl; \
30}) 32})
31 33
32/* a umul b = a mul b + (a>=2<<31) ? b<<32:0 + (b>=2<<31) ? a<<32:0 */ 34/* a umul b = a mul b + (a>=2<<31) ? b<<32:0 + (b>=2<<31) ? a<<32:0 */
33#define umul_ppmm(wh, wl, u, v) ({ \ 35#define umul_ppmm(wh, wl, u, v) ({ \
34 unsigned int __wh = u; \ 36 unsigned int __wh = u; \
35 unsigned int __wl = v; \ 37 unsigned int __wl = v; \
36 __asm__ (" ltr 1,%0\n" \ 38 asm volatile( \
37 " mr 0,%1\n" \ 39 " ltr 1,%0\n" \
38 " jnm 0f\n" \ 40 " mr 0,%1\n" \
39 " alr 0,%1\n" \ 41 " jnm 0f\n" \
40 "0: ltr %1,%1\n" \ 42 " alr 0,%1\n" \
41 " jnm 1f\n" \ 43 "0: ltr %1,%1\n" \
42 " alr 0,%0\n" \ 44 " jnm 1f\n" \
43 "1: lr %0,0\n" \ 45 " alr 0,%0\n" \
44 " lr %1,1\n" \ 46 "1: lr %0,0\n" \
45 : "+d" (__wh), "+d" (__wl) \ 47 " lr %1,1\n" \
46 : : "0", "1", "cc" ); \ 48 : "+d" (__wh), "+d" (__wl) \
47 wh = __wh; \ 49 : : "0", "1", "cc"); \
48 wl = __wl; \ 50 wh = __wh; \
51 wl = __wl; \
49}) 52})
50 53
51#define udiv_qrnnd(q, r, n1, n0, d) \ 54#define udiv_qrnnd(q, r, n1, n0, d) \