diff options
author | Paul Mackerras <paulus@samba.org> | 2005-10-10 07:52:43 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-10-10 07:52:43 -0400 |
commit | 70d64ceaa1a84d2502405422a4dfd3f87786a347 (patch) | |
tree | 23e38168021988d34b11c6f41cfff82f8095092e /arch/powerpc/lib/memcpy_64.S | |
parent | a432403a89646614252c3bb6dfbe897c8312ab35 (diff) |
powerpc: Rename files to have consistent _32/_64 suffixes
This doesn't change any code, just renames things so we consistently
have foo_32.c and foo_64.c where we have separate 32- and 64-bit
versions.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/lib/memcpy_64.S')
-rw-r--r-- | arch/powerpc/lib/memcpy_64.S | 172 |
1 files changed, 172 insertions, 0 deletions
diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S new file mode 100644 index 000000000000..9ccacdf5bcb9 --- /dev/null +++ b/arch/powerpc/lib/memcpy_64.S | |||
@@ -0,0 +1,172 @@ | |||
1 | /* | ||
2 | * arch/ppc64/lib/memcpy.S | ||
3 | * | ||
4 | * Copyright (C) 2002 Paul Mackerras, IBM Corp. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <asm/processor.h> | ||
12 | #include <asm/ppc_asm.h> | ||
13 | |||
14 | .align 7 | ||
15 | _GLOBAL(memcpy) | ||
16 | mtcrf 0x01,r5 | ||
17 | cmpldi cr1,r5,16 | ||
18 | neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry | ||
19 | andi. r6,r6,7 | ||
20 | dcbt 0,r4 | ||
21 | blt cr1,.Lshort_copy | ||
22 | bne .Ldst_unaligned | ||
23 | .Ldst_aligned: | ||
24 | andi. r0,r4,7 | ||
25 | addi r3,r3,-16 | ||
26 | bne .Lsrc_unaligned | ||
27 | srdi r7,r5,4 | ||
28 | ld r9,0(r4) | ||
29 | addi r4,r4,-8 | ||
30 | mtctr r7 | ||
31 | andi. r5,r5,7 | ||
32 | bf cr7*4+0,2f | ||
33 | addi r3,r3,8 | ||
34 | addi r4,r4,8 | ||
35 | mr r8,r9 | ||
36 | blt cr1,3f | ||
37 | 1: ld r9,8(r4) | ||
38 | std r8,8(r3) | ||
39 | 2: ldu r8,16(r4) | ||
40 | stdu r9,16(r3) | ||
41 | bdnz 1b | ||
42 | 3: std r8,8(r3) | ||
43 | beqlr | ||
44 | addi r3,r3,16 | ||
45 | ld r9,8(r4) | ||
46 | .Ldo_tail: | ||
47 | bf cr7*4+1,1f | ||
48 | rotldi r9,r9,32 | ||
49 | stw r9,0(r3) | ||
50 | addi r3,r3,4 | ||
51 | 1: bf cr7*4+2,2f | ||
52 | rotldi r9,r9,16 | ||
53 | sth r9,0(r3) | ||
54 | addi r3,r3,2 | ||
55 | 2: bf cr7*4+3,3f | ||
56 | rotldi r9,r9,8 | ||
57 | stb r9,0(r3) | ||
58 | 3: blr | ||
59 | |||
60 | .Lsrc_unaligned: | ||
61 | srdi r6,r5,3 | ||
62 | addi r5,r5,-16 | ||
63 | subf r4,r0,r4 | ||
64 | srdi r7,r5,4 | ||
65 | sldi r10,r0,3 | ||
66 | cmpdi cr6,r6,3 | ||
67 | andi. r5,r5,7 | ||
68 | mtctr r7 | ||
69 | subfic r11,r10,64 | ||
70 | add r5,r5,r0 | ||
71 | |||
72 | bt cr7*4+0,0f | ||
73 | |||
74 | ld r9,0(r4) # 3+2n loads, 2+2n stores | ||
75 | ld r0,8(r4) | ||
76 | sld r6,r9,r10 | ||
77 | ldu r9,16(r4) | ||
78 | srd r7,r0,r11 | ||
79 | sld r8,r0,r10 | ||
80 | or r7,r7,r6 | ||
81 | blt cr6,4f | ||
82 | ld r0,8(r4) | ||
83 | # s1<< in r8, d0=(s0<<|s1>>) in r7, s3 in r0, s2 in r9, nix in r6 & r12 | ||
84 | b 2f | ||
85 | |||
86 | 0: ld r0,0(r4) # 4+2n loads, 3+2n stores | ||
87 | ldu r9,8(r4) | ||
88 | sld r8,r0,r10 | ||
89 | addi r3,r3,-8 | ||
90 | blt cr6,5f | ||
91 | ld r0,8(r4) | ||
92 | srd r12,r9,r11 | ||
93 | sld r6,r9,r10 | ||
94 | ldu r9,16(r4) | ||
95 | or r12,r8,r12 | ||
96 | srd r7,r0,r11 | ||
97 | sld r8,r0,r10 | ||
98 | addi r3,r3,16 | ||
99 | beq cr6,3f | ||
100 | |||
101 | # d0=(s0<<|s1>>) in r12, s1<< in r6, s2>> in r7, s2<< in r8, s3 in r9 | ||
102 | 1: or r7,r7,r6 | ||
103 | ld r0,8(r4) | ||
104 | std r12,8(r3) | ||
105 | 2: srd r12,r9,r11 | ||
106 | sld r6,r9,r10 | ||
107 | ldu r9,16(r4) | ||
108 | or r12,r8,r12 | ||
109 | stdu r7,16(r3) | ||
110 | srd r7,r0,r11 | ||
111 | sld r8,r0,r10 | ||
112 | bdnz 1b | ||
113 | |||
114 | 3: std r12,8(r3) | ||
115 | or r7,r7,r6 | ||
116 | 4: std r7,16(r3) | ||
117 | 5: srd r12,r9,r11 | ||
118 | or r12,r8,r12 | ||
119 | std r12,24(r3) | ||
120 | beqlr | ||
121 | cmpwi cr1,r5,8 | ||
122 | addi r3,r3,32 | ||
123 | sld r9,r9,r10 | ||
124 | ble cr1,.Ldo_tail | ||
125 | ld r0,8(r4) | ||
126 | srd r7,r0,r11 | ||
127 | or r9,r7,r9 | ||
128 | b .Ldo_tail | ||
129 | |||
130 | .Ldst_unaligned: | ||
131 | mtcrf 0x01,r6 # put #bytes to 8B bdry into cr7 | ||
132 | subf r5,r6,r5 | ||
133 | li r7,0 | ||
134 | cmpldi r1,r5,16 | ||
135 | bf cr7*4+3,1f | ||
136 | lbz r0,0(r4) | ||
137 | stb r0,0(r3) | ||
138 | addi r7,r7,1 | ||
139 | 1: bf cr7*4+2,2f | ||
140 | lhzx r0,r7,r4 | ||
141 | sthx r0,r7,r3 | ||
142 | addi r7,r7,2 | ||
143 | 2: bf cr7*4+1,3f | ||
144 | lwzx r0,r7,r4 | ||
145 | stwx r0,r7,r3 | ||
146 | 3: mtcrf 0x01,r5 | ||
147 | add r4,r6,r4 | ||
148 | add r3,r6,r3 | ||
149 | b .Ldst_aligned | ||
150 | |||
151 | .Lshort_copy: | ||
152 | bf cr7*4+0,1f | ||
153 | lwz r0,0(r4) | ||
154 | lwz r9,4(r4) | ||
155 | addi r4,r4,8 | ||
156 | stw r0,0(r3) | ||
157 | stw r9,4(r3) | ||
158 | addi r3,r3,8 | ||
159 | 1: bf cr7*4+1,2f | ||
160 | lwz r0,0(r4) | ||
161 | addi r4,r4,4 | ||
162 | stw r0,0(r3) | ||
163 | addi r3,r3,4 | ||
164 | 2: bf cr7*4+2,3f | ||
165 | lhz r0,0(r4) | ||
166 | addi r4,r4,2 | ||
167 | sth r0,0(r3) | ||
168 | addi r3,r3,2 | ||
169 | 3: bf cr7*4+3,4f | ||
170 | lbz r0,0(r4) | ||
171 | stb r0,0(r3) | ||
172 | 4: blr | ||