aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorChris Zankel <chris@zankel.net>2007-05-31 20:44:31 -0400
committerChris Zankel <chris@zankel.net>2007-05-31 20:44:31 -0400
commita0bb46ba074d2442e96f55c997293767340f4ce9 (patch)
tree773c688bfe2ed7a803a405a9f592a68837621301 /arch
parent4af410a868ddddfc6aa9b19379599feac7e79d95 (diff)
[XTENSA] fix sources using deprecated assembler directive
The assembler directive '.begin literal' is deprecated in the newer versions of the binutils (strncopy_user.S and strnlen_user.S). Signed-off-by: Chris Zankel <chris@zankel.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/xtensa/lib/strncpy_user.S33
-rw-r--r--arch/xtensa/lib/strnlen_user.S33
2 files changed, 34 insertions, 32 deletions
diff --git a/arch/xtensa/lib/strncpy_user.S b/arch/xtensa/lib/strncpy_user.S
index a834057bda6b..b2655d94558d 100644
--- a/arch/xtensa/lib/strncpy_user.S
+++ b/arch/xtensa/lib/strncpy_user.S
@@ -25,18 +25,18 @@
25/* 25/*
26 * char *__strncpy_user(char *dst, const char *src, size_t len) 26 * char *__strncpy_user(char *dst, const char *src, size_t len)
27 */ 27 */
28.text 28
29.begin literal 29#ifdef __XTENSA_EB__
30.align 4 30# define MASK0 0xff000000
31.Lmask0: 31# define MASK1 0x00ff0000
32 .byte 0xff, 0x00, 0x00, 0x00 32# define MASK2 0x0000ff00
33.Lmask1: 33# define MASK3 0x000000ff
34 .byte 0x00, 0xff, 0x00, 0x00 34#else
35.Lmask2: 35# define MASK0 0x000000ff
36 .byte 0x00, 0x00, 0xff, 0x00 36# define MASK1 0x0000ff00
37.Lmask3: 37# define MASK2 0x00ff0000
38 .byte 0x00, 0x00, 0x00, 0xff 38# define MASK3 0xff000000
39.end literal 39#endif
40 40
41# Register use 41# Register use
42# a0/ return address 42# a0/ return address
@@ -53,6 +53,7 @@
53# a11/ dst 53# a11/ dst
54# a12/ tmp 54# a12/ tmp
55 55
56.text
56.align 4 57.align 4
57.global __strncpy_user 58.global __strncpy_user
58.type __strncpy_user,@function 59.type __strncpy_user,@function
@@ -61,10 +62,10 @@ __strncpy_user:
61 # a2/ dst, a3/ src, a4/ len 62 # a2/ dst, a3/ src, a4/ len
62 mov a11, a2 # leave dst in return value register 63 mov a11, a2 # leave dst in return value register
63 beqz a4, .Lret # if len is zero 64 beqz a4, .Lret # if len is zero
64 l32r a5, .Lmask0 # mask for byte 0 65 movi a5, MASK0 # mask for byte 0
65 l32r a6, .Lmask1 # mask for byte 1 66 movi a6, MASK1 # mask for byte 1
66 l32r a7, .Lmask2 # mask for byte 2 67 movi a7, MASK2 # mask for byte 2
67 l32r a8, .Lmask3 # mask for byte 3 68 movi a8, MASK3 # mask for byte 3
68 bbsi.l a3, 0, .Lsrc1mod2 # if only 8-bit aligned 69 bbsi.l a3, 0, .Lsrc1mod2 # if only 8-bit aligned
69 bbsi.l a3, 1, .Lsrc2mod4 # if only 16-bit aligned 70 bbsi.l a3, 1, .Lsrc2mod4 # if only 16-bit aligned
70.Lsrcaligned: # return here when src is word-aligned 71.Lsrcaligned: # return here when src is word-aligned
diff --git a/arch/xtensa/lib/strnlen_user.S b/arch/xtensa/lib/strnlen_user.S
index 5e9c1e709b2e..ad3f616322ca 100644
--- a/arch/xtensa/lib/strnlen_user.S
+++ b/arch/xtensa/lib/strnlen_user.S
@@ -24,18 +24,18 @@
24/* 24/*
25 * size_t __strnlen_user(const char *s, size_t len) 25 * size_t __strnlen_user(const char *s, size_t len)
26 */ 26 */
27.text 27
28.begin literal 28#ifdef __XTENSA_EB__
29.align 4 29# define MASK0 0xff000000
30.Lmask0: 30# define MASK1 0x00ff0000
31 .byte 0xff, 0x00, 0x00, 0x00 31# define MASK2 0x0000ff00
32.Lmask1: 32# define MASK3 0x000000ff
33 .byte 0x00, 0xff, 0x00, 0x00 33#else
34.Lmask2: 34# define MASK0 0x000000ff
35 .byte 0x00, 0x00, 0xff, 0x00 35# define MASK1 0x0000ff00
36.Lmask3: 36# define MASK2 0x00ff0000
37 .byte 0x00, 0x00, 0x00, 0xff 37# define MASK3 0xff000000
38.end literal 38#endif
39 39
40# Register use: 40# Register use:
41# a2/ src 41# a2/ src
@@ -48,6 +48,7 @@
48# a9/ tmp 48# a9/ tmp
49# a10/ tmp 49# a10/ tmp
50 50
51.text
51.align 4 52.align 4
52.global __strnlen_user 53.global __strnlen_user
53.type __strnlen_user,@function 54.type __strnlen_user,@function
@@ -56,10 +57,10 @@ __strnlen_user:
56 # a2/ s, a3/ len 57 # a2/ s, a3/ len
57 addi a4, a2, -4 # because we overincrement at the end; 58 addi a4, a2, -4 # because we overincrement at the end;
58 # we compensate with load offsets of 4 59 # we compensate with load offsets of 4
59 l32r a5, .Lmask0 # mask for byte 0 60 movi a5, MASK0 # mask for byte 0
60 l32r a6, .Lmask1 # mask for byte 1 61 movi a6, MASK1 # mask for byte 1
61 l32r a7, .Lmask2 # mask for byte 2 62 movi a7, MASK2 # mask for byte 2
62 l32r a8, .Lmask3 # mask for byte 3 63 movi a8, MASK3 # mask for byte 3
63 bbsi.l a2, 0, .L1mod2 # if only 8-bit aligned 64 bbsi.l a2, 0, .L1mod2 # if only 8-bit aligned
64 bbsi.l a2, 1, .L2mod4 # if only 16-bit aligned 65 bbsi.l a2, 1, .L2mod4 # if only 16-bit aligned
65 66