diff options
author | Kyle McMartin <kyle@mcmartin.ca> | 2008-07-28 23:02:13 -0400 |
---|---|---|
committer | Kyle McMartin <kyle@hera.kernel.org> | 2008-10-10 12:32:29 -0400 |
commit | deae26bf6a10e47983606f5df080b91e97650ead (patch) | |
tree | 84a8a68145d0f713d7c5a1f9e6b3b03be9b3a4c8 /include/asm-parisc/checksum.h | |
parent | 6c86cb8237bf08443806089130dc108051569a93 (diff) |
parisc: move include/asm-parisc to arch/parisc/include/asm
Diffstat (limited to 'include/asm-parisc/checksum.h')
-rw-r--r-- | include/asm-parisc/checksum.h | 210 |
1 files changed, 0 insertions, 210 deletions
diff --git a/include/asm-parisc/checksum.h b/include/asm-parisc/checksum.h deleted file mode 100644 index e9639ccc3fce..000000000000 --- a/include/asm-parisc/checksum.h +++ /dev/null | |||
@@ -1,210 +0,0 @@ | |||
1 | #ifndef _PARISC_CHECKSUM_H | ||
2 | #define _PARISC_CHECKSUM_H | ||
3 | |||
4 | #include <linux/in6.h> | ||
5 | |||
6 | /* | ||
7 | * computes the checksum of a memory block at buff, length len, | ||
8 | * and adds in "sum" (32-bit) | ||
9 | * | ||
10 | * returns a 32-bit number suitable for feeding into itself | ||
11 | * or csum_tcpudp_magic | ||
12 | * | ||
13 | * this function must be called with even lengths, except | ||
14 | * for the last fragment, which may be odd | ||
15 | * | ||
16 | * it's best to have buff aligned on a 32-bit boundary | ||
17 | */ | ||
18 | extern __wsum csum_partial(const void *, int, __wsum); | ||
19 | |||
20 | /* | ||
21 | * The same as csum_partial, but copies from src while it checksums. | ||
22 | * | ||
23 | * Here even more important to align src and dst on a 32-bit (or even | ||
24 | * better 64-bit) boundary | ||
25 | */ | ||
26 | extern __wsum csum_partial_copy_nocheck(const void *, void *, int, __wsum); | ||
27 | |||
28 | /* | ||
29 | * this is a new version of the above that records errors it finds in *errp, | ||
30 | * but continues and zeros the rest of the buffer. | ||
31 | */ | ||
32 | extern __wsum csum_partial_copy_from_user(const void __user *src, | ||
33 | void *dst, int len, __wsum sum, int *errp); | ||
34 | |||
35 | /* | ||
36 | * Optimized for IP headers, which always checksum on 4 octet boundaries. | ||
37 | * | ||
38 | * Written by Randolph Chung <tausq@debian.org>, and then mucked with by | ||
39 | * LaMont Jones <lamont@debian.org> | ||
40 | */ | ||
41 | static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) | ||
42 | { | ||
43 | unsigned int sum; | ||
44 | |||
45 | __asm__ __volatile__ ( | ||
46 | " ldws,ma 4(%1), %0\n" | ||
47 | " addib,<= -4, %2, 2f\n" | ||
48 | "\n" | ||
49 | " ldws 4(%1), %%r20\n" | ||
50 | " ldws 8(%1), %%r21\n" | ||
51 | " add %0, %%r20, %0\n" | ||
52 | " ldws,ma 12(%1), %%r19\n" | ||
53 | " addc %0, %%r21, %0\n" | ||
54 | " addc %0, %%r19, %0\n" | ||
55 | "1: ldws,ma 4(%1), %%r19\n" | ||
56 | " addib,< 0, %2, 1b\n" | ||
57 | " addc %0, %%r19, %0\n" | ||
58 | "\n" | ||
59 | " extru %0, 31, 16, %%r20\n" | ||
60 | " extru %0, 15, 16, %%r21\n" | ||
61 | " addc %%r20, %%r21, %0\n" | ||
62 | " extru %0, 15, 16, %%r21\n" | ||
63 | " add %0, %%r21, %0\n" | ||
64 | " subi -1, %0, %0\n" | ||
65 | "2:\n" | ||
66 | : "=r" (sum), "=r" (iph), "=r" (ihl) | ||
67 | : "1" (iph), "2" (ihl) | ||
68 | : "r19", "r20", "r21", "memory"); | ||
69 | |||
70 | return (__force __sum16)sum; | ||
71 | } | ||
72 | |||
73 | /* | ||
74 | * Fold a partial checksum | ||
75 | */ | ||
76 | static inline __sum16 csum_fold(__wsum csum) | ||
77 | { | ||
78 | u32 sum = (__force u32)csum; | ||
79 | /* add the swapped two 16-bit halves of sum, | ||
80 | a possible carry from adding the two 16-bit halves, | ||
81 | will carry from the lower half into the upper half, | ||
82 | giving us the correct sum in the upper half. */ | ||
83 | sum += (sum << 16) + (sum >> 16); | ||
84 | return (__force __sum16)(~sum >> 16); | ||
85 | } | ||
86 | |||
87 | static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, | ||
88 | unsigned short len, | ||
89 | unsigned short proto, | ||
90 | __wsum sum) | ||
91 | { | ||
92 | __asm__( | ||
93 | " add %1, %0, %0\n" | ||
94 | " addc %2, %0, %0\n" | ||
95 | " addc %3, %0, %0\n" | ||
96 | " addc %%r0, %0, %0\n" | ||
97 | : "=r" (sum) | ||
98 | : "r" (daddr), "r"(saddr), "r"(proto+len), "0"(sum)); | ||
99 | return sum; | ||
100 | } | ||
101 | |||
102 | /* | ||
103 | * computes the checksum of the TCP/UDP pseudo-header | ||
104 | * returns a 16-bit checksum, already complemented | ||
105 | */ | ||
106 | static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, | ||
107 | unsigned short len, | ||
108 | unsigned short proto, | ||
109 | __wsum sum) | ||
110 | { | ||
111 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); | ||
112 | } | ||
113 | |||
114 | /* | ||
115 | * this routine is used for miscellaneous IP-like checksums, mainly | ||
116 | * in icmp.c | ||
117 | */ | ||
118 | static inline __sum16 ip_compute_csum(const void *buf, int len) | ||
119 | { | ||
120 | return csum_fold (csum_partial(buf, len, 0)); | ||
121 | } | ||
122 | |||
123 | |||
124 | #define _HAVE_ARCH_IPV6_CSUM | ||
125 | static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, | ||
126 | const struct in6_addr *daddr, | ||
127 | __u32 len, unsigned short proto, | ||
128 | __wsum sum) | ||
129 | { | ||
130 | __asm__ __volatile__ ( | ||
131 | |||
132 | #if BITS_PER_LONG > 32 | ||
133 | |||
134 | /* | ||
135 | ** We can execute two loads and two adds per cycle on PA 8000. | ||
136 | ** But add insn's get serialized waiting for the carry bit. | ||
137 | ** Try to keep 4 registers with "live" values ahead of the ALU. | ||
138 | */ | ||
139 | |||
140 | " ldd,ma 8(%1), %%r19\n" /* get 1st saddr word */ | ||
141 | " ldd,ma 8(%2), %%r20\n" /* get 1st daddr word */ | ||
142 | " add %8, %3, %3\n"/* add 16-bit proto + len */ | ||
143 | " add %%r19, %0, %0\n" | ||
144 | " ldd,ma 8(%1), %%r21\n" /* 2cd saddr */ | ||
145 | " ldd,ma 8(%2), %%r22\n" /* 2cd daddr */ | ||
146 | " add,dc %%r20, %0, %0\n" | ||
147 | " add,dc %%r21, %0, %0\n" | ||
148 | " add,dc %%r22, %0, %0\n" | ||
149 | " add,dc %3, %0, %0\n" /* fold in proto+len | carry bit */ | ||
150 | " extrd,u %0, 31, 32, %%r19\n" /* copy upper half down */ | ||
151 | " depdi 0, 31, 32, %0\n" /* clear upper half */ | ||
152 | " add %%r19, %0, %0\n" /* fold into 32-bits */ | ||
153 | " addc 0, %0, %0\n" /* add carry */ | ||
154 | |||
155 | #else | ||
156 | |||
157 | /* | ||
158 | ** For PA 1.x, the insn order doesn't matter as much. | ||
159 | ** Insn stream is serialized on the carry bit here too. | ||
160 | ** result from the previous operation (eg r0 + x) | ||
161 | */ | ||
162 | |||
163 | " ldw,ma 4(%1), %%r19\n" /* get 1st saddr word */ | ||
164 | " ldw,ma 4(%2), %%r20\n" /* get 1st daddr word */ | ||
165 | " add %8, %3, %3\n" /* add 16-bit proto + len */ | ||
166 | " add %%r19, %0, %0\n" | ||
167 | " ldw,ma 4(%1), %%r21\n" /* 2cd saddr */ | ||
168 | " addc %%r20, %0, %0\n" | ||
169 | " ldw,ma 4(%2), %%r22\n" /* 2cd daddr */ | ||
170 | " addc %%r21, %0, %0\n" | ||
171 | " ldw,ma 4(%1), %%r19\n" /* 3rd saddr */ | ||
172 | " addc %%r22, %0, %0\n" | ||
173 | " ldw,ma 4(%2), %%r20\n" /* 3rd daddr */ | ||
174 | " addc %%r19, %0, %0\n" | ||
175 | " ldw,ma 4(%1), %%r21\n" /* 4th saddr */ | ||
176 | " addc %%r20, %0, %0\n" | ||
177 | " ldw,ma 4(%2), %%r22\n" /* 4th daddr */ | ||
178 | " addc %%r21, %0, %0\n" | ||
179 | " addc %%r22, %0, %0\n" | ||
180 | " addc %3, %0, %0\n" /* fold in proto+len, catch carry */ | ||
181 | |||
182 | #endif | ||
183 | : "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len) | ||
184 | : "0" (sum), "1" (saddr), "2" (daddr), "3" (len), "r" (proto) | ||
185 | : "r19", "r20", "r21", "r22"); | ||
186 | return csum_fold(sum); | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * Copy and checksum to user | ||
191 | */ | ||
192 | #define HAVE_CSUM_COPY_USER | ||
193 | static __inline__ __wsum csum_and_copy_to_user(const void *src, | ||
194 | void __user *dst, | ||
195 | int len, __wsum sum, | ||
196 | int *err_ptr) | ||
197 | { | ||
198 | /* code stolen from include/asm-mips64 */ | ||
199 | sum = csum_partial(src, len, sum); | ||
200 | |||
201 | if (copy_to_user(dst, src, len)) { | ||
202 | *err_ptr = -EFAULT; | ||
203 | return (__force __wsum)-1; | ||
204 | } | ||
205 | |||
206 | return sum; | ||
207 | } | ||
208 | |||
209 | #endif | ||
210 | |||