aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/lib
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-10-19 18:15:58 -0400
committerDavid S. Miller <davem@davemloft.net>2011-10-20 18:17:22 -0400
commit045b7de9ca0cf09f1adc3efa467f668b89238390 (patch)
treedf3fdd26cb3eb4b3259a76cb8091b0763f779570 /arch/sparc/lib
parentfd11e153b82ad1c84ccc71ba1cfedc222465198c (diff)
sparc32: Remove non-kernel code from memcpy implementation.
Signed-off-by: David S. Miller <davem@davemloft.net> Tested-by: Kjetil Oftedal <oftedal@gmail.com>
Diffstat (limited to 'arch/sparc/lib')
-rw-r--r--arch/sparc/lib/memcpy.S607
1 files changed, 2 insertions, 605 deletions
diff --git a/arch/sparc/lib/memcpy.S b/arch/sparc/lib/memcpy.S
index 34fe65751737..6a8ef5d8daf0 100644
--- a/arch/sparc/lib/memcpy.S
+++ b/arch/sparc/lib/memcpy.S
@@ -7,17 +7,12 @@
7 * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 7 * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 */ 8 */
9 9
10#ifdef __KERNEL__ 10#define FUNC(x) \
11
12#define FUNC(x) \
13 .globl x; \ 11 .globl x; \
14 .type x,@function; \ 12 .type x,@function; \
15 .align 4; \ 13 .align 4; \
16x: 14x:
17 15
18#undef FASTER_REVERSE
19#undef FASTER_NONALIGNED
20#define FASTER_ALIGNED
21 16
22/* In kernel these functions don't return a value. 17/* In kernel these functions don't return a value.
23 * One should use macros in asm/string.h for that purpose. 18 * One should use macros in asm/string.h for that purpose.
@@ -26,21 +21,6 @@ x:
26#define SETUP_RETL 21#define SETUP_RETL
27#define RETL_INSN clr %o0 22#define RETL_INSN clr %o0
28 23
29#else
30
31/* libc */
32
33#include "DEFS.h"
34
35#define FASTER_REVERSE
36#define FASTER_NONALIGNED
37#define FASTER_ALIGNED
38
39#define SETUP_RETL mov %o0, %g6
40#define RETL_INSN mov %g6, %o0
41
42#endif
43
44/* Both these macros have to start with exactly the same insn */ 24/* Both these macros have to start with exactly the same insn */
45#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ 25#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
46 ldd [%src + (offset) + 0x00], %t0; \ 26 ldd [%src + (offset) + 0x00], %t0; \
@@ -164,30 +144,6 @@ x:
164 .text 144 .text
165 .align 4 145 .align 4
166 146
167#ifdef FASTER_REVERSE
168
16970: /* rdword_align */
170
171 andcc %o1, 1, %g0
172 be 4f
173 andcc %o1, 2, %g0
174
175 ldub [%o1 - 1], %g2
176 sub %o1, 1, %o1
177 stb %g2, [%o0 - 1]
178 sub %o2, 1, %o2
179 be 3f
180 sub %o0, 1, %o0
1814:
182 lduh [%o1 - 2], %g2
183 sub %o1, 2, %o1
184 sth %g2, [%o0 - 2]
185 sub %o2, 2, %o2
186 b 3f
187 sub %o0, 2, %o0
188
189#endif /* FASTER_REVERSE */
190
1910: 1470:
192 retl 148 retl
193 nop ! Only bcopy returns here and it retuns void... 149 nop ! Only bcopy returns here and it retuns void...
@@ -207,8 +163,6 @@ FUNC(memmove)
207 bleu 0f 163 bleu 0f
208 andcc %o4, 3, %o5 164 andcc %o4, 3, %o5
209 165
210#ifndef FASTER_REVERSE
211
212 add %o1, %o2, %o1 166 add %o1, %o2, %o1
213 add %o0, %o2, %o0 167 add %o0, %o2, %o0
214 sub %o1, 1, %o1 168 sub %o1, 1, %o1
@@ -226,294 +180,6 @@ FUNC(memmove)
226 retl 180 retl
227 RETL_INSN 181 RETL_INSN
228 182
229#else /* FASTER_REVERSE */
230
231 add %o1, %o2, %o1
232 add %o0, %o2, %o0
233 bne 77f
234 cmp %o2, 15
235 bleu 91f
236 andcc %o1, 3, %g0
237 bne 70b
2383:
239 andcc %o1, 4, %g0
240
241 be 2f
242 mov %o2, %g1
243
244 ld [%o1 - 4], %o4
245 sub %g1, 4, %g1
246 st %o4, [%o0 - 4]
247 sub %o1, 4, %o1
248 sub %o0, 4, %o0
2492:
250 andcc %g1, 0xffffff80, %g7
251 be 3f
252 andcc %o0, 4, %g0
253
254 be 74f + 4
2555:
256 RMOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
257 RMOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
258 RMOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
259 RMOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
260 subcc %g7, 128, %g7
261 sub %o1, 128, %o1
262 bne 5b
263 sub %o0, 128, %o0
2643:
265 andcc %g1, 0x70, %g7
266 be 72f
267 andcc %g1, 8, %g0
268
269 sethi %hi(72f), %o5
270 srl %g7, 1, %o4
271 add %g7, %o4, %o4
272 sub %o1, %g7, %o1
273 sub %o5, %o4, %o5
274 jmpl %o5 + %lo(72f), %g0
275 sub %o0, %g7, %o0
276
27771: /* rmemcpy_table */
278 RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
279 RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
280 RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
281 RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
282 RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
283 RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
284 RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
285
28672: /* rmemcpy_table_end */
287
288 be 73f
289 andcc %g1, 4, %g0
290
291 ldd [%o1 - 0x08], %g2
292 sub %o0, 8, %o0
293 sub %o1, 8, %o1
294 st %g2, [%o0]
295 st %g3, [%o0 + 0x04]
296
29773: /* rmemcpy_last7 */
298
299 be 1f
300 andcc %g1, 2, %g0
301
302 ld [%o1 - 4], %g2
303 sub %o1, 4, %o1
304 st %g2, [%o0 - 4]
305 sub %o0, 4, %o0
3061:
307 be 1f
308 andcc %g1, 1, %g0
309
310 lduh [%o1 - 2], %g2
311 sub %o1, 2, %o1
312 sth %g2, [%o0 - 2]
313 sub %o0, 2, %o0
3141:
315 be 1f
316 nop
317
318 ldub [%o1 - 1], %g2
319 stb %g2, [%o0 - 1]
3201:
321 retl
322 RETL_INSN
323
32474: /* rldd_std */
325 RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
326 RMOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
327 RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
328 RMOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
329 subcc %g7, 128, %g7
330 sub %o1, 128, %o1
331 bne 74b
332 sub %o0, 128, %o0
333
334 andcc %g1, 0x70, %g7
335 be 72b
336 andcc %g1, 8, %g0
337
338 sethi %hi(72b), %o5
339 srl %g7, 1, %o4
340 add %g7, %o4, %o4
341 sub %o1, %g7, %o1
342 sub %o5, %o4, %o5
343 jmpl %o5 + %lo(72b), %g0
344 sub %o0, %g7, %o0
345
34675: /* rshort_end */
347
348 and %o2, 0xe, %o3
3492:
350 sethi %hi(76f), %o5
351 sll %o3, 3, %o4
352 sub %o0, %o3, %o0
353 sub %o5, %o4, %o5
354 sub %o1, %o3, %o1
355 jmpl %o5 + %lo(76f), %g0
356 andcc %o2, 1, %g0
357
358 RMOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
359 RMOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
360 RMOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
361 RMOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
362 RMOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
363 RMOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
364 RMOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
365
36676: /* rshort_table_end */
367
368 be 1f
369 nop
370 ldub [%o1 - 1], %g2
371 stb %g2, [%o0 - 1]
3721:
373 retl
374 RETL_INSN
375
37691: /* rshort_aligned_end */
377
378 bne 75b
379 andcc %o2, 8, %g0
380
381 be 1f
382 andcc %o2, 4, %g0
383
384 ld [%o1 - 0x08], %g2
385 ld [%o1 - 0x04], %g3
386 sub %o1, 8, %o1
387 st %g2, [%o0 - 0x08]
388 st %g3, [%o0 - 0x04]
389 sub %o0, 8, %o0
3901:
391 b 73b
392 mov %o2, %g1
393
39477: /* rnon_aligned */
395 cmp %o2, 15
396 bleu 75b
397 andcc %o0, 3, %g0
398 be 64f
399 andcc %o0, 1, %g0
400 be 63f
401 andcc %o0, 2, %g0
402 ldub [%o1 - 1], %g5
403 sub %o1, 1, %o1
404 stb %g5, [%o0 - 1]
405 sub %o0, 1, %o0
406 be 64f
407 sub %o2, 1, %o2
40863:
409 ldub [%o1 - 1], %g5
410 sub %o1, 2, %o1
411 stb %g5, [%o0 - 1]
412 sub %o0, 2, %o0
413 ldub [%o1], %g5
414 sub %o2, 2, %o2
415 stb %g5, [%o0]
41664:
417 and %o1, 3, %g2
418 and %o1, -4, %o1
419 and %o2, 0xc, %g3
420 add %o1, 4, %o1
421 cmp %g3, 4
422 sll %g2, 3, %g4
423 mov 32, %g2
424 be 4f
425 sub %g2, %g4, %g7
426
427 blu 3f
428 cmp %g3, 8
429
430 be 2f
431 srl %o2, 2, %g3
432
433 ld [%o1 - 4], %o3
434 add %o0, -8, %o0
435 ld [%o1 - 8], %o4
436 add %o1, -16, %o1
437 b 7f
438 add %g3, 1, %g3
4392:
440 ld [%o1 - 4], %o4
441 add %o0, -4, %o0
442 ld [%o1 - 8], %g1
443 add %o1, -12, %o1
444 b 8f
445 add %g3, 2, %g3
4463:
447 ld [%o1 - 4], %o5
448 add %o0, -12, %o0
449 ld [%o1 - 8], %o3
450 add %o1, -20, %o1
451 b 6f
452 srl %o2, 2, %g3
4534:
454 ld [%o1 - 4], %g1
455 srl %o2, 2, %g3
456 ld [%o1 - 8], %o5
457 add %o1, -24, %o1
458 add %o0, -16, %o0
459 add %g3, -1, %g3
460
461 ld [%o1 + 12], %o3
4625:
463 sll %o5, %g4, %g2
464 srl %g1, %g7, %g5
465 or %g2, %g5, %g2
466 st %g2, [%o0 + 12]
4676:
468 ld [%o1 + 8], %o4
469 sll %o3, %g4, %g2
470 srl %o5, %g7, %g5
471 or %g2, %g5, %g2
472 st %g2, [%o0 + 8]
4737:
474 ld [%o1 + 4], %g1
475 sll %o4, %g4, %g2
476 srl %o3, %g7, %g5
477 or %g2, %g5, %g2
478 st %g2, [%o0 + 4]
4798:
480 ld [%o1], %o5
481 sll %g1, %g4, %g2
482 srl %o4, %g7, %g5
483 addcc %g3, -4, %g3
484 or %g2, %g5, %g2
485 add %o1, -16, %o1
486 st %g2, [%o0]
487 add %o0, -16, %o0
488 bne,a 5b
489 ld [%o1 + 12], %o3
490 sll %o5, %g4, %g2
491 srl %g1, %g7, %g5
492 srl %g4, 3, %g3
493 or %g2, %g5, %g2
494 add %o1, %g3, %o1
495 andcc %o2, 2, %g0
496 st %g2, [%o0 + 12]
497 be 1f
498 andcc %o2, 1, %g0
499
500 ldub [%o1 + 15], %g5
501 add %o1, -2, %o1
502 stb %g5, [%o0 + 11]
503 add %o0, -2, %o0
504 ldub [%o1 + 16], %g5
505 stb %g5, [%o0 + 12]
5061:
507 be 1f
508 nop
509 ldub [%o1 + 15], %g5
510 stb %g5, [%o0 + 11]
5111:
512 retl
513 RETL_INSN
514
515#endif /* FASTER_REVERSE */
516
517/* NOTE: This code is executed just for the cases, 183/* NOTE: This code is executed just for the cases,
518 where %src (=%o1) & 3 is != 0. 184 where %src (=%o1) & 3 is != 0.
519 We need to align it to 4. So, for (%src & 3) 185 We need to align it to 4. So, for (%src & 3)
@@ -653,22 +319,6 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
653 bne 82b 319 bne 82b
654 add %o0, 128, %o0 320 add %o0, 128, %o0
655 321
656#ifndef FASTER_ALIGNED
657
658 andcc %g1, 0x70, %g7
659 be 80b
660 andcc %g1, 8, %g0
661
662 sethi %hi(80b), %o5
663 srl %g7, 1, %o4
664 add %g7, %o4, %o4
665 add %o1, %g7, %o1
666 sub %o5, %o4, %o5
667 jmpl %o5 + %lo(80b), %g0
668 add %o0, %g7, %o0
669
670#else /* FASTER_ALIGNED */
671
672 andcc %g1, 0x70, %g7 322 andcc %g1, 0x70, %g7
673 be 84f 323 be 84f
674 andcc %g1, 8, %g0 324 andcc %g1, 8, %g0
@@ -723,19 +373,9 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
723 retl 373 retl
724 RETL_INSN 374 RETL_INSN
725 375
726#endif /* FASTER_ALIGNED */
727
72886: /* non_aligned */ 37686: /* non_aligned */
729 cmp %o2, 6 377 cmp %o2, 6
730 bleu 88f 378 bleu 88f
731
732#ifdef FASTER_NONALIGNED
733
734 cmp %o2, 256
735 bcc 87f
736
737#endif /* FASTER_NONALIGNED */
738
739 andcc %o0, 3, %g0 379 andcc %o0, 3, %g0
740 be 61f 380 be 61f
741 andcc %o0, 1, %g0 381 andcc %o0, 1, %g0
@@ -855,249 +495,6 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
855 retl 495 retl
856 RETL_INSN 496 RETL_INSN
857 497
858#ifdef FASTER_NONALIGNED
859
86087: /* faster_nonaligned */
861
862 andcc %o1, 3, %g0
863 be 3f
864 andcc %o1, 1, %g0
865
866 be 4f
867 andcc %o1, 2, %g0
868
869 ldub [%o1], %g2
870 add %o1, 1, %o1
871 stb %g2, [%o0]
872 sub %o2, 1, %o2
873 bne 3f
874 add %o0, 1, %o0
8754:
876 lduh [%o1], %g2
877 add %o1, 2, %o1
878 srl %g2, 8, %g3
879 sub %o2, 2, %o2
880 stb %g3, [%o0]
881 add %o0, 2, %o0
882 stb %g2, [%o0 - 1]
8833:
884 andcc %o1, 4, %g0
885
886 bne 2f
887 cmp %o5, 1
888
889 ld [%o1], %o4
890 srl %o4, 24, %g2
891 stb %g2, [%o0]
892 srl %o4, 16, %g3
893 stb %g3, [%o0 + 1]
894 srl %o4, 8, %g2
895 stb %g2, [%o0 + 2]
896 sub %o2, 4, %o2
897 stb %o4, [%o0 + 3]
898 add %o1, 4, %o1
899 add %o0, 4, %o0
9002:
901 be 33f
902 cmp %o5, 2
903 be 32f
904 sub %o2, 4, %o2
90531:
906 ld [%o1], %g2
907 add %o1, 4, %o1
908 srl %g2, 24, %g3
909 and %o0, 7, %g5
910 stb %g3, [%o0]
911 cmp %g5, 7
912 sll %g2, 8, %g1
913 add %o0, 4, %o0
914 be 41f
915 and %o2, 0xffffffc0, %o3
916 ld [%o0 - 7], %o4
9174:
918 SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
919 SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
920 SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
921 SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
922 subcc %o3, 64, %o3
923 add %o1, 64, %o1
924 bne 4b
925 add %o0, 64, %o0
926
927 andcc %o2, 0x30, %o3
928 be,a 1f
929 srl %g1, 16, %g2
9304:
931 SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
932 subcc %o3, 16, %o3
933 add %o1, 16, %o1
934 bne 4b
935 add %o0, 16, %o0
936
937 srl %g1, 16, %g2
9381:
939 st %o4, [%o0 - 7]
940 sth %g2, [%o0 - 3]
941 srl %g1, 8, %g4
942 b 88f
943 stb %g4, [%o0 - 1]
94432:
945 ld [%o1], %g2
946 add %o1, 4, %o1
947 srl %g2, 16, %g3
948 and %o0, 7, %g5
949 sth %g3, [%o0]
950 cmp %g5, 6
951 sll %g2, 16, %g1
952 add %o0, 4, %o0
953 be 42f
954 and %o2, 0xffffffc0, %o3
955 ld [%o0 - 6], %o4
9564:
957 SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
958 SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
959 SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
960 SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
961 subcc %o3, 64, %o3
962 add %o1, 64, %o1
963 bne 4b
964 add %o0, 64, %o0
965
966 andcc %o2, 0x30, %o3
967 be,a 1f
968 srl %g1, 16, %g2
9694:
970 SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
971 subcc %o3, 16, %o3
972 add %o1, 16, %o1
973 bne 4b
974 add %o0, 16, %o0
975
976 srl %g1, 16, %g2
9771:
978 st %o4, [%o0 - 6]
979 b 88f
980 sth %g2, [%o0 - 2]
98133:
982 ld [%o1], %g2
983 sub %o2, 4, %o2
984 srl %g2, 24, %g3
985 and %o0, 7, %g5
986 stb %g3, [%o0]
987 cmp %g5, 5
988 srl %g2, 8, %g4
989 sll %g2, 24, %g1
990 sth %g4, [%o0 + 1]
991 add %o1, 4, %o1
992 be 43f
993 and %o2, 0xffffffc0, %o3
994
995 ld [%o0 - 1], %o4
996 add %o0, 4, %o0
9974:
998 SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
999 SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
1000 SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
1001 SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
1002 subcc %o3, 64, %o3
1003 add %o1, 64, %o1
1004 bne 4b
1005 add %o0, 64, %o0
1006
1007 andcc %o2, 0x30, %o3
1008 be,a 1f
1009 srl %g1, 24, %g2
10104:
1011 SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1)
1012 subcc %o3, 16, %o3
1013 add %o1, 16, %o1
1014 bne 4b
1015 add %o0, 16, %o0
1016
1017 srl %g1, 24, %g2
10181:
1019 st %o4, [%o0 - 5]
1020 b 88f
1021 stb %g2, [%o0 - 1]
102241:
1023 SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1024 SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1025 SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1026 SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1027 subcc %o3, 64, %o3
1028 add %o1, 64, %o1
1029 bne 41b
1030 add %o0, 64, %o0
1031
1032 andcc %o2, 0x30, %o3
1033 be,a 1f
1034 srl %g1, 16, %g2
10354:
1036 SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3)
1037 subcc %o3, 16, %o3
1038 add %o1, 16, %o1
1039 bne 4b
1040 add %o0, 16, %o0
1041
1042 srl %g1, 16, %g2
10431:
1044 sth %g2, [%o0 - 3]
1045 srl %g1, 8, %g4
1046 b 88f
1047 stb %g4, [%o0 - 1]
104843:
1049 SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
1050 SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
1051 SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
1052 SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
1053 subcc %o3, 64, %o3
1054 add %o1, 64, %o1
1055 bne 43b
1056 add %o0, 64, %o0
1057
1058 andcc %o2, 0x30, %o3
1059 be,a 1f
1060 srl %g1, 24, %g2
10614:
1062 SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3)
1063 subcc %o3, 16, %o3
1064 add %o1, 16, %o1
1065 bne 4b
1066 add %o0, 16, %o0
1067
1068 srl %g1, 24, %g2
10691:
1070 stb %g2, [%o0 + 3]
1071 b 88f
1072 add %o0, 4, %o0
107342:
1074 SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1075 SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1076 SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1077 SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1078 subcc %o3, 64, %o3
1079 add %o1, 64, %o1
1080 bne 42b
1081 add %o0, 64, %o0
1082
1083 andcc %o2, 0x30, %o3
1084 be,a 1f
1085 srl %g1, 16, %g2
10864:
1087 SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2)
1088 subcc %o3, 16, %o3
1089 add %o1, 16, %o1
1090 bne 4b
1091 add %o0, 16, %o0
1092
1093 srl %g1, 16, %g2
10941:
1095 sth %g2, [%o0 - 2]
1096
1097 /* Fall through */
1098
1099#endif /* FASTER_NONALIGNED */
1100
110188: /* short_end */ 49888: /* short_end */
1102 499
1103 and %o2, 0xe, %o3 500 and %o2, 0xe, %o3