aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorNicolas Pitre <nico@cam.org>2005-11-11 16:51:49 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2005-11-11 16:51:49 -0500
commit8adbb3718d6cead304f84f7dd60ad65274df0b15 (patch)
tree5d39d25a7d2a6e6606c6b2ec4ce23d45cb6b4a2a /arch/arm
parenta9c4814d8db200052c07d8b68e76c134682c4569 (diff)
[ARM] 3152/1: make various assembly local labels actually local (the rest)
Patch from Nicolas Pitre For assembly labels to actually be local they must start with ".L" and not only "." otherwise they still remain visible in the final link and clutter kallsyms needlessly, and possibly make for unclear symbolic backtrace. This patch simply inserts a"L" where appropriate. The code itself is unchanged. Signed-off-by: Nicolas Pitre <nico@cam.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/lib/csumpartial.S28
-rw-r--r--arch/arm/lib/csumpartialcopygeneric.S70
-rw-r--r--arch/arm/lib/delay.S4
-rw-r--r--arch/arm/lib/findbit.S18
4 files changed, 61 insertions, 59 deletions
diff --git a/arch/arm/lib/csumpartial.S b/arch/arm/lib/csumpartial.S
index fe797cf320b..a78dae5a7b2 100644
--- a/arch/arm/lib/csumpartial.S
+++ b/arch/arm/lib/csumpartial.S
@@ -26,7 +26,7 @@ td1 .req r4 @ save before use
26td2 .req r5 @ save before use 26td2 .req r5 @ save before use
27td3 .req lr 27td3 .req lr
28 28
29.zero: mov r0, sum 29.Lzero: mov r0, sum
30 add sp, sp, #4 30 add sp, sp, #4
31 ldr pc, [sp], #4 31 ldr pc, [sp], #4
32 32
@@ -34,8 +34,8 @@ td3 .req lr
34 * Handle 0 to 7 bytes, with any alignment of source and 34 * Handle 0 to 7 bytes, with any alignment of source and
35 * destination pointers. Note that when we get here, C = 0 35 * destination pointers. Note that when we get here, C = 0
36 */ 36 */
37.less8: teq len, #0 @ check for zero count 37.Lless8: teq len, #0 @ check for zero count
38 beq .zero 38 beq .Lzero
39 39
40 /* we must have at least one byte. */ 40 /* we must have at least one byte. */
41 tst buf, #1 @ odd address? 41 tst buf, #1 @ odd address?
@@ -44,12 +44,12 @@ td3 .req lr
44 subne len, len, #1 44 subne len, len, #1
45 adcnes sum, sum, td0, put_byte_1 45 adcnes sum, sum, td0, put_byte_1
46 46
47.less4: tst len, #6 47.Lless4: tst len, #6
48 beq .less8_byte 48 beq .Lless8_byte
49 49
50 /* we are now half-word aligned */ 50 /* we are now half-word aligned */
51 51
52.less8_wordlp: 52.Lless8_wordlp:
53#if __LINUX_ARM_ARCH__ >= 4 53#if __LINUX_ARM_ARCH__ >= 4
54 ldrh td0, [buf], #2 54 ldrh td0, [buf], #2
55 sub len, len, #2 55 sub len, len, #2
@@ -65,19 +65,19 @@ td3 .req lr
65#endif 65#endif
66 adcs sum, sum, td0 66 adcs sum, sum, td0
67 tst len, #6 67 tst len, #6
68 bne .less8_wordlp 68 bne .Lless8_wordlp
69 69
70.less8_byte: tst len, #1 @ odd number of bytes 70.Lless8_byte: tst len, #1 @ odd number of bytes
71 ldrneb td0, [buf], #1 @ include last byte 71 ldrneb td0, [buf], #1 @ include last byte
72 adcnes sum, sum, td0, put_byte_0 @ update checksum 72 adcnes sum, sum, td0, put_byte_0 @ update checksum
73 73
74.done: adc r0, sum, #0 @ collect up the last carry 74.Ldone: adc r0, sum, #0 @ collect up the last carry
75 ldr td0, [sp], #4 75 ldr td0, [sp], #4
76 tst td0, #1 @ check buffer alignment 76 tst td0, #1 @ check buffer alignment
77 movne r0, r0, ror #8 @ rotate checksum by 8 bits 77 movne r0, r0, ror #8 @ rotate checksum by 8 bits
78 ldr pc, [sp], #4 @ return 78 ldr pc, [sp], #4 @ return
79 79
80.not_aligned: tst buf, #1 @ odd address 80.Lnot_aligned: tst buf, #1 @ odd address
81 ldrneb td0, [buf], #1 @ make even 81 ldrneb td0, [buf], #1 @ make even
82 subne len, len, #1 82 subne len, len, #1
83 adcnes sum, sum, td0, put_byte_1 @ update checksum 83 adcnes sum, sum, td0, put_byte_1 @ update checksum
@@ -102,14 +102,14 @@ td3 .req lr
102ENTRY(csum_partial) 102ENTRY(csum_partial)
103 stmfd sp!, {buf, lr} 103 stmfd sp!, {buf, lr}
104 cmp len, #8 @ Ensure that we have at least 104 cmp len, #8 @ Ensure that we have at least
105 blo .less8 @ 8 bytes to copy. 105 blo .Lless8 @ 8 bytes to copy.
106 106
107 tst buf, #1 107 tst buf, #1
108 movne sum, sum, ror #8 108 movne sum, sum, ror #8
109 109
110 adds sum, sum, #0 @ C = 0 110 adds sum, sum, #0 @ C = 0
111 tst buf, #3 @ Test destination alignment 111 tst buf, #3 @ Test destination alignment
112 blne .not_aligned @ aligh destination, return here 112 blne .Lnot_aligned @ align destination, return here
113 113
1141: bics ip, len, #31 1141: bics ip, len, #31
115 beq 3f 115 beq 3f
@@ -131,11 +131,11 @@ ENTRY(csum_partial)
131 ldmfd sp!, {r4 - r5} 131 ldmfd sp!, {r4 - r5}
132 132
1333: tst len, #0x1c @ should not change C 1333: tst len, #0x1c @ should not change C
134 beq .less4 134 beq .Lless4
135 135
1364: ldr td0, [buf], #4 1364: ldr td0, [buf], #4
137 sub len, len, #4 137 sub len, len, #4
138 adcs sum, sum, td0 138 adcs sum, sum, td0
139 tst len, #0x1c 139 tst len, #0x1c
140 bne 4b 140 bne 4b
141 b .less4 141 b .Lless4
diff --git a/arch/arm/lib/csumpartialcopygeneric.S b/arch/arm/lib/csumpartialcopygeneric.S
index d3a2f4667db..4a4609c1909 100644
--- a/arch/arm/lib/csumpartialcopygeneric.S
+++ b/arch/arm/lib/csumpartialcopygeneric.S
@@ -22,7 +22,7 @@ dst .req r1
22len .req r2 22len .req r2
23sum .req r3 23sum .req r3
24 24
25.zero: mov r0, sum 25.Lzero: mov r0, sum
26 load_regs ea 26 load_regs ea
27 27
28 /* 28 /*
@@ -31,8 +31,9 @@ sum .req r3
31 * the length. Note that the source pointer hasn't been 31 * the length. Note that the source pointer hasn't been
32 * aligned yet. 32 * aligned yet.
33 */ 33 */
34.dst_unaligned: tst dst, #1 34.Ldst_unaligned:
35 beq .dst_16bit 35 tst dst, #1
36 beq .Ldst_16bit
36 37
37 load1b ip 38 load1b ip
38 sub len, len, #1 39 sub len, len, #1
@@ -41,7 +42,7 @@ sum .req r3
41 tst dst, #2 42 tst dst, #2
42 moveq pc, lr @ dst is now 32bit aligned 43 moveq pc, lr @ dst is now 32bit aligned
43 44
44.dst_16bit: load2b r8, ip 45.Ldst_16bit: load2b r8, ip
45 sub len, len, #2 46 sub len, len, #2
46 adcs sum, sum, r8, put_byte_0 47 adcs sum, sum, r8, put_byte_0
47 strb r8, [dst], #1 48 strb r8, [dst], #1
@@ -53,12 +54,12 @@ sum .req r3
53 * Handle 0 to 7 bytes, with any alignment of source and 54 * Handle 0 to 7 bytes, with any alignment of source and
54 * destination pointers. Note that when we get here, C = 0 55 * destination pointers. Note that when we get here, C = 0
55 */ 56 */
56.less8: teq len, #0 @ check for zero count 57.Lless8: teq len, #0 @ check for zero count
57 beq .zero 58 beq .Lzero
58 59
59 /* we must have at least one byte. */ 60 /* we must have at least one byte. */
60 tst dst, #1 @ dst 16-bit aligned 61 tst dst, #1 @ dst 16-bit aligned
61 beq .less8_aligned 62 beq .Lless8_aligned
62 63
63 /* Align dst */ 64 /* Align dst */
64 load1b ip 65 load1b ip
@@ -66,7 +67,7 @@ sum .req r3
66 adcs sum, sum, ip, put_byte_1 @ update checksum 67 adcs sum, sum, ip, put_byte_1 @ update checksum
67 strb ip, [dst], #1 68 strb ip, [dst], #1
68 tst len, #6 69 tst len, #6
69 beq .less8_byteonly 70 beq .Lless8_byteonly
70 71
711: load2b r8, ip 721: load2b r8, ip
72 sub len, len, #2 73 sub len, len, #2
@@ -74,15 +75,16 @@ sum .req r3
74 strb r8, [dst], #1 75 strb r8, [dst], #1
75 adcs sum, sum, ip, put_byte_1 76 adcs sum, sum, ip, put_byte_1
76 strb ip, [dst], #1 77 strb ip, [dst], #1
77.less8_aligned: tst len, #6 78.Lless8_aligned:
79 tst len, #6
78 bne 1b 80 bne 1b
79.less8_byteonly: 81.Lless8_byteonly:
80 tst len, #1 82 tst len, #1
81 beq .done 83 beq .Ldone
82 load1b r8 84 load1b r8
83 adcs sum, sum, r8, put_byte_0 @ update checksum 85 adcs sum, sum, r8, put_byte_0 @ update checksum
84 strb r8, [dst], #1 86 strb r8, [dst], #1
85 b .done 87 b .Ldone
86 88
87FN_ENTRY 89FN_ENTRY
88 mov ip, sp 90 mov ip, sp
@@ -90,11 +92,11 @@ FN_ENTRY
90 sub fp, ip, #4 92 sub fp, ip, #4
91 93
92 cmp len, #8 @ Ensure that we have at least 94 cmp len, #8 @ Ensure that we have at least
93 blo .less8 @ 8 bytes to copy. 95 blo .Lless8 @ 8 bytes to copy.
94 96
95 adds sum, sum, #0 @ C = 0 97 adds sum, sum, #0 @ C = 0
96 tst dst, #3 @ Test destination alignment 98 tst dst, #3 @ Test destination alignment
97 blne .dst_unaligned @ align destination, return here 99 blne .Ldst_unaligned @ align destination, return here
98 100
99 /* 101 /*
100 * Ok, the dst pointer is now 32bit aligned, and we know 102 * Ok, the dst pointer is now 32bit aligned, and we know
@@ -103,7 +105,7 @@ FN_ENTRY
103 */ 105 */
104 106
105 tst src, #3 @ Test source alignment 107 tst src, #3 @ Test source alignment
106 bne .src_not_aligned 108 bne .Lsrc_not_aligned
107 109
108 /* Routine for src & dst aligned */ 110 /* Routine for src & dst aligned */
109 111
@@ -136,17 +138,17 @@ FN_ENTRY
136 adcs sum, sum, r4 138 adcs sum, sum, r4
137 139
1384: ands len, len, #3 1404: ands len, len, #3
139 beq .done 141 beq .Ldone
140 load1l r4 142 load1l r4
141 tst len, #2 143 tst len, #2
142 mov r5, r4, get_byte_0 144 mov r5, r4, get_byte_0
143 beq .exit 145 beq .Lexit
144 adcs sum, sum, r4, push #16 146 adcs sum, sum, r4, push #16
145 strb r5, [dst], #1 147 strb r5, [dst], #1
146 mov r5, r4, get_byte_1 148 mov r5, r4, get_byte_1
147 strb r5, [dst], #1 149 strb r5, [dst], #1
148 mov r5, r4, get_byte_2 150 mov r5, r4, get_byte_2
149.exit: tst len, #1 151.Lexit: tst len, #1
150 strneb r5, [dst], #1 152 strneb r5, [dst], #1
151 andne r5, r5, #255 153 andne r5, r5, #255
152 adcnes sum, sum, r5, put_byte_0 154 adcnes sum, sum, r5, put_byte_0
@@ -157,20 +159,20 @@ FN_ENTRY
157 * the inefficient byte manipulations in the 159 * the inefficient byte manipulations in the
158 * architecture independent code. 160 * architecture independent code.
159 */ 161 */
160.done: adc r0, sum, #0 162.Ldone: adc r0, sum, #0
161 ldr sum, [sp, #0] @ dst 163 ldr sum, [sp, #0] @ dst
162 tst sum, #1 164 tst sum, #1
163 movne r0, r0, ror #8 165 movne r0, r0, ror #8
164 load_regs ea 166 load_regs ea
165 167
166.src_not_aligned: 168.Lsrc_not_aligned:
167 adc sum, sum, #0 @ include C from dst alignment 169 adc sum, sum, #0 @ include C from dst alignment
168 and ip, src, #3 170 and ip, src, #3
169 bic src, src, #3 171 bic src, src, #3
170 load1l r5 172 load1l r5
171 cmp ip, #2 173 cmp ip, #2
172 beq .src2_aligned 174 beq .Lsrc2_aligned
173 bhi .src3_aligned 175 bhi .Lsrc3_aligned
174 mov r4, r5, pull #8 @ C = 0 176 mov r4, r5, pull #8 @ C = 0
175 bics ip, len, #15 177 bics ip, len, #15
176 beq 2f 178 beq 2f
@@ -211,18 +213,18 @@ FN_ENTRY
211 adcs sum, sum, r4 213 adcs sum, sum, r4
212 mov r4, r5, pull #8 214 mov r4, r5, pull #8
2134: ands len, len, #3 2154: ands len, len, #3
214 beq .done 216 beq .Ldone
215 mov r5, r4, get_byte_0 217 mov r5, r4, get_byte_0
216 tst len, #2 218 tst len, #2
217 beq .exit 219 beq .Lexit
218 adcs sum, sum, r4, push #16 220 adcs sum, sum, r4, push #16
219 strb r5, [dst], #1 221 strb r5, [dst], #1
220 mov r5, r4, get_byte_1 222 mov r5, r4, get_byte_1
221 strb r5, [dst], #1 223 strb r5, [dst], #1
222 mov r5, r4, get_byte_2 224 mov r5, r4, get_byte_2
223 b .exit 225 b .Lexit
224 226
225.src2_aligned: mov r4, r5, pull #16 227.Lsrc2_aligned: mov r4, r5, pull #16
226 adds sum, sum, #0 228 adds sum, sum, #0
227 bics ip, len, #15 229 bics ip, len, #15
228 beq 2f 230 beq 2f
@@ -263,20 +265,20 @@ FN_ENTRY
263 adcs sum, sum, r4 265 adcs sum, sum, r4
264 mov r4, r5, pull #16 266 mov r4, r5, pull #16
2654: ands len, len, #3 2674: ands len, len, #3
266 beq .done 268 beq .Ldone
267 mov r5, r4, get_byte_0 269 mov r5, r4, get_byte_0
268 tst len, #2 270 tst len, #2
269 beq .exit 271 beq .Lexit
270 adcs sum, sum, r4 272 adcs sum, sum, r4
271 strb r5, [dst], #1 273 strb r5, [dst], #1
272 mov r5, r4, get_byte_1 274 mov r5, r4, get_byte_1
273 strb r5, [dst], #1 275 strb r5, [dst], #1
274 tst len, #1 276 tst len, #1
275 beq .done 277 beq .Ldone
276 load1b r5 278 load1b r5
277 b .exit 279 b .Lexit
278 280
279.src3_aligned: mov r4, r5, pull #24 281.Lsrc3_aligned: mov r4, r5, pull #24
280 adds sum, sum, #0 282 adds sum, sum, #0
281 bics ip, len, #15 283 bics ip, len, #15
282 beq 2f 284 beq 2f
@@ -317,10 +319,10 @@ FN_ENTRY
317 adcs sum, sum, r4 319 adcs sum, sum, r4
318 mov r4, r5, pull #24 320 mov r4, r5, pull #24
3194: ands len, len, #3 3214: ands len, len, #3
320 beq .done 322 beq .Ldone
321 mov r5, r4, get_byte_0 323 mov r5, r4, get_byte_0
322 tst len, #2 324 tst len, #2
323 beq .exit 325 beq .Lexit
324 strb r5, [dst], #1 326 strb r5, [dst], #1
325 adcs sum, sum, r4 327 adcs sum, sum, r4
326 load1l r4 328 load1l r4
@@ -328,4 +330,4 @@ FN_ENTRY
328 strb r5, [dst], #1 330 strb r5, [dst], #1
329 adcs sum, sum, r4, push #24 331 adcs sum, sum, r4, push #24
330 mov r5, r4, get_byte_1 332 mov r5, r4, get_byte_1
331 b .exit 333 b .Lexit
diff --git a/arch/arm/lib/delay.S b/arch/arm/lib/delay.S
index 3c7f7e675dd..b3fb475b412 100644
--- a/arch/arm/lib/delay.S
+++ b/arch/arm/lib/delay.S
@@ -11,7 +11,7 @@
11#include <asm/assembler.h> 11#include <asm/assembler.h>
12 .text 12 .text
13 13
14LC0: .word loops_per_jiffy 14.LC0: .word loops_per_jiffy
15 15
16/* 16/*
17 * 0 <= r0 <= 2000 17 * 0 <= r0 <= 2000
@@ -21,7 +21,7 @@ ENTRY(__udelay)
21 orr r2, r2, #0x00db 21 orr r2, r2, #0x00db
22 mul r0, r2, r0 22 mul r0, r2, r0
23ENTRY(__const_udelay) @ 0 <= r0 <= 0x01ffffff 23ENTRY(__const_udelay) @ 0 <= r0 <= 0x01ffffff
24 ldr r2, LC0 24 ldr r2, .LC0
25 ldr r2, [r2] @ max = 0x0fffffff 25 ldr r2, [r2] @ max = 0x0fffffff
26 mov r0, r0, lsr #11 @ max = 0x00003fff 26 mov r0, r0, lsr #11 @ max = 0x00003fff
27 mov r2, r2, lsr #11 @ max = 0x0003ffff 27 mov r2, r2, lsr #11 @ max = 0x0003ffff
diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S
index f055d56ea68..6f8e27a58c7 100644
--- a/arch/arm/lib/findbit.S
+++ b/arch/arm/lib/findbit.S
@@ -27,7 +27,7 @@ ENTRY(_find_first_zero_bit_le)
27 mov r2, #0 27 mov r2, #0
281: ldrb r3, [r0, r2, lsr #3] 281: ldrb r3, [r0, r2, lsr #3]
29 eors r3, r3, #0xff @ invert bits 29 eors r3, r3, #0xff @ invert bits
30 bne .found @ any now set - found zero bit 30 bne .L_found @ any now set - found zero bit
31 add r2, r2, #8 @ next bit pointer 31 add r2, r2, #8 @ next bit pointer
322: cmp r2, r1 @ any more? 322: cmp r2, r1 @ any more?
33 blo 1b 33 blo 1b
@@ -46,7 +46,7 @@ ENTRY(_find_next_zero_bit_le)
46 ldrb r3, [r0, r2, lsr #3] 46 ldrb r3, [r0, r2, lsr #3]
47 eor r3, r3, #0xff @ now looking for a 1 bit 47 eor r3, r3, #0xff @ now looking for a 1 bit
48 movs r3, r3, lsr ip @ shift off unused bits 48 movs r3, r3, lsr ip @ shift off unused bits
49 bne .found 49 bne .L_found
50 orr r2, r2, #7 @ if zero, then no bits here 50 orr r2, r2, #7 @ if zero, then no bits here
51 add r2, r2, #1 @ align bit pointer 51 add r2, r2, #1 @ align bit pointer
52 b 2b @ loop for next bit 52 b 2b @ loop for next bit
@@ -61,7 +61,7 @@ ENTRY(_find_first_bit_le)
61 mov r2, #0 61 mov r2, #0
621: ldrb r3, [r0, r2, lsr #3] 621: ldrb r3, [r0, r2, lsr #3]
63 movs r3, r3 63 movs r3, r3
64 bne .found @ any now set - found zero bit 64 bne .L_found @ any now set - found zero bit
65 add r2, r2, #8 @ next bit pointer 65 add r2, r2, #8 @ next bit pointer
662: cmp r2, r1 @ any more? 662: cmp r2, r1 @ any more?
67 blo 1b 67 blo 1b
@@ -79,7 +79,7 @@ ENTRY(_find_next_bit_le)
79 beq 1b @ If new byte, goto old routine 79 beq 1b @ If new byte, goto old routine
80 ldrb r3, [r0, r2, lsr #3] 80 ldrb r3, [r0, r2, lsr #3]
81 movs r3, r3, lsr ip @ shift off unused bits 81 movs r3, r3, lsr ip @ shift off unused bits
82 bne .found 82 bne .L_found
83 orr r2, r2, #7 @ if zero, then no bits here 83 orr r2, r2, #7 @ if zero, then no bits here
84 add r2, r2, #1 @ align bit pointer 84 add r2, r2, #1 @ align bit pointer
85 b 2b @ loop for next bit 85 b 2b @ loop for next bit
@@ -93,7 +93,7 @@ ENTRY(_find_first_zero_bit_be)
931: eor r3, r2, #0x18 @ big endian byte ordering 931: eor r3, r2, #0x18 @ big endian byte ordering
94 ldrb r3, [r0, r3, lsr #3] 94 ldrb r3, [r0, r3, lsr #3]
95 eors r3, r3, #0xff @ invert bits 95 eors r3, r3, #0xff @ invert bits
96 bne .found @ any now set - found zero bit 96 bne .L_found @ any now set - found zero bit
97 add r2, r2, #8 @ next bit pointer 97 add r2, r2, #8 @ next bit pointer
982: cmp r2, r1 @ any more? 982: cmp r2, r1 @ any more?
99 blo 1b 99 blo 1b
@@ -109,7 +109,7 @@ ENTRY(_find_next_zero_bit_be)
109 ldrb r3, [r0, r3, lsr #3] 109 ldrb r3, [r0, r3, lsr #3]
110 eor r3, r3, #0xff @ now looking for a 1 bit 110 eor r3, r3, #0xff @ now looking for a 1 bit
111 movs r3, r3, lsr ip @ shift off unused bits 111 movs r3, r3, lsr ip @ shift off unused bits
112 bne .found 112 bne .L_found
113 orr r2, r2, #7 @ if zero, then no bits here 113 orr r2, r2, #7 @ if zero, then no bits here
114 add r2, r2, #1 @ align bit pointer 114 add r2, r2, #1 @ align bit pointer
115 b 2b @ loop for next bit 115 b 2b @ loop for next bit
@@ -121,7 +121,7 @@ ENTRY(_find_first_bit_be)
1211: eor r3, r2, #0x18 @ big endian byte ordering 1211: eor r3, r2, #0x18 @ big endian byte ordering
122 ldrb r3, [r0, r3, lsr #3] 122 ldrb r3, [r0, r3, lsr #3]
123 movs r3, r3 123 movs r3, r3
124 bne .found @ any now set - found zero bit 124 bne .L_found @ any now set - found zero bit
125 add r2, r2, #8 @ next bit pointer 125 add r2, r2, #8 @ next bit pointer
1262: cmp r2, r1 @ any more? 1262: cmp r2, r1 @ any more?
127 blo 1b 127 blo 1b
@@ -136,7 +136,7 @@ ENTRY(_find_next_bit_be)
136 eor r3, r2, #0x18 @ big endian byte ordering 136 eor r3, r2, #0x18 @ big endian byte ordering
137 ldrb r3, [r0, r3, lsr #3] 137 ldrb r3, [r0, r3, lsr #3]
138 movs r3, r3, lsr ip @ shift off unused bits 138 movs r3, r3, lsr ip @ shift off unused bits
139 bne .found 139 bne .L_found
140 orr r2, r2, #7 @ if zero, then no bits here 140 orr r2, r2, #7 @ if zero, then no bits here
141 add r2, r2, #1 @ align bit pointer 141 add r2, r2, #1 @ align bit pointer
142 b 2b @ loop for next bit 142 b 2b @ loop for next bit
@@ -146,7 +146,7 @@ ENTRY(_find_next_bit_be)
146/* 146/*
147 * One or more bits in the LSB of r3 are assumed to be set. 147 * One or more bits in the LSB of r3 are assumed to be set.
148 */ 148 */
149.found: 149.L_found:
150#if __LINUX_ARM_ARCH__ >= 5 150#if __LINUX_ARM_ARCH__ >= 5
151 rsb r1, r3, #0 151 rsb r1, r3, #0
152 and r3, r3, r1 152 and r3, r3, r1