diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-parisc/checksum.h |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-parisc/checksum.h')
-rw-r--r-- | include/asm-parisc/checksum.h | 213 |
1 files changed, 213 insertions, 0 deletions
diff --git a/include/asm-parisc/checksum.h b/include/asm-parisc/checksum.h new file mode 100644 index 000000000000..229cb56fdb7a --- /dev/null +++ b/include/asm-parisc/checksum.h | |||
@@ -0,0 +1,213 @@ | |||
1 | #ifndef _PARISC_CHECKSUM_H | ||
2 | #define _PARISC_CHECKSUM_H | ||
3 | |||
4 | #include <linux/in6.h> | ||
5 | |||
6 | /* | ||
7 | * computes the checksum of a memory block at buff, length len, | ||
8 | * and adds in "sum" (32-bit) | ||
9 | * | ||
10 | * returns a 32-bit number suitable for feeding into itself | ||
11 | * or csum_tcpudp_magic | ||
12 | * | ||
13 | * this function must be called with even lengths, except | ||
14 | * for the last fragment, which may be odd | ||
15 | * | ||
16 | * it's best to have buff aligned on a 32-bit boundary | ||
17 | */ | ||
18 | extern unsigned int csum_partial(const unsigned char *, int, unsigned int); | ||
19 | |||
20 | /* | ||
21 | * The same as csum_partial, but copies from src while it checksums. | ||
22 | * | ||
23 | * Here even more important to align src and dst on a 32-bit (or even | ||
24 | * better 64-bit) boundary | ||
25 | */ | ||
26 | extern unsigned int csum_partial_copy_nocheck(const unsigned char *, unsigned char *, | ||
27 | int, unsigned int); | ||
28 | |||
29 | /* | ||
30 | * this is a new version of the above that records errors it finds in *errp, | ||
31 | * but continues and zeros the rest of the buffer. | ||
32 | */ | ||
33 | extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src, | ||
34 | unsigned char *dst, int len, unsigned int sum, int *errp); | ||
35 | |||
36 | /* | ||
37 | * Optimized for IP headers, which always checksum on 4 octet boundaries. | ||
38 | * | ||
39 | * Written by Randolph Chung <tausq@debian.org>, and then mucked with by | ||
40 | * LaMont Jones <lamont@debian.org> | ||
41 | */ | ||
42 | static inline unsigned short ip_fast_csum(unsigned char * iph, | ||
43 | unsigned int ihl) { | ||
44 | unsigned int sum; | ||
45 | |||
46 | |||
47 | __asm__ __volatile__ ( | ||
48 | " ldws,ma 4(%1), %0\n" | ||
49 | " addib,<= -4, %2, 2f\n" | ||
50 | "\n" | ||
51 | " ldws 4(%1), %%r20\n" | ||
52 | " ldws 8(%1), %%r21\n" | ||
53 | " add %0, %%r20, %0\n" | ||
54 | " ldws,ma 12(%1), %%r19\n" | ||
55 | " addc %0, %%r21, %0\n" | ||
56 | " addc %0, %%r19, %0\n" | ||
57 | "1: ldws,ma 4(%1), %%r19\n" | ||
58 | " addib,< 0, %2, 1b\n" | ||
59 | " addc %0, %%r19, %0\n" | ||
60 | "\n" | ||
61 | " extru %0, 31, 16, %%r20\n" | ||
62 | " extru %0, 15, 16, %%r21\n" | ||
63 | " addc %%r20, %%r21, %0\n" | ||
64 | " extru %0, 15, 16, %%r21\n" | ||
65 | " add %0, %%r21, %0\n" | ||
66 | " subi -1, %0, %0\n" | ||
67 | "2:\n" | ||
68 | : "=r" (sum), "=r" (iph), "=r" (ihl) | ||
69 | : "1" (iph), "2" (ihl) | ||
70 | : "r19", "r20", "r21" ); | ||
71 | |||
72 | return(sum); | ||
73 | } | ||
74 | |||
75 | /* | ||
76 | * Fold a partial checksum | ||
77 | */ | ||
78 | static inline unsigned int csum_fold(unsigned int sum) | ||
79 | { | ||
80 | /* add the swapped two 16-bit halves of sum, | ||
81 | a possible carry from adding the two 16-bit halves, | ||
82 | will carry from the lower half into the upper half, | ||
83 | giving us the correct sum in the upper half. */ | ||
84 | sum += (sum << 16) + (sum >> 16); | ||
85 | return (~sum) >> 16; | ||
86 | } | ||
87 | |||
88 | static inline unsigned long csum_tcpudp_nofold(unsigned long saddr, | ||
89 | unsigned long daddr, | ||
90 | unsigned short len, | ||
91 | unsigned short proto, | ||
92 | unsigned int sum) | ||
93 | { | ||
94 | __asm__( | ||
95 | " add %1, %0, %0\n" | ||
96 | " addc %2, %0, %0\n" | ||
97 | " addc %3, %0, %0\n" | ||
98 | " addc %%r0, %0, %0\n" | ||
99 | : "=r" (sum) | ||
100 | : "r" (daddr), "r"(saddr), "r"((proto<<16)+len), "0"(sum)); | ||
101 | return sum; | ||
102 | } | ||
103 | |||
104 | /* | ||
105 | * computes the checksum of the TCP/UDP pseudo-header | ||
106 | * returns a 16-bit checksum, already complemented | ||
107 | */ | ||
108 | static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, | ||
109 | unsigned long daddr, | ||
110 | unsigned short len, | ||
111 | unsigned short proto, | ||
112 | unsigned int sum) | ||
113 | { | ||
114 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * this routine is used for miscellaneous IP-like checksums, mainly | ||
119 | * in icmp.c | ||
120 | */ | ||
121 | static inline unsigned short ip_compute_csum(unsigned char * buf, int len) { | ||
122 | return csum_fold (csum_partial(buf, len, 0)); | ||
123 | } | ||
124 | |||
125 | |||
126 | #define _HAVE_ARCH_IPV6_CSUM | ||
127 | static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, | ||
128 | struct in6_addr *daddr, | ||
129 | __u16 len, | ||
130 | unsigned short proto, | ||
131 | unsigned int sum) | ||
132 | { | ||
133 | __asm__ __volatile__ ( | ||
134 | |||
135 | #if BITS_PER_LONG > 32 | ||
136 | |||
137 | /* | ||
138 | ** We can execute two loads and two adds per cycle on PA 8000. | ||
139 | ** But add insn's get serialized waiting for the carry bit. | ||
140 | ** Try to keep 4 registers with "live" values ahead of the ALU. | ||
141 | */ | ||
142 | |||
143 | " ldd,ma 8(%1), %%r19\n" /* get 1st saddr word */ | ||
144 | " ldd,ma 8(%2), %%r20\n" /* get 1st daddr word */ | ||
145 | " add %8, %3, %3\n"/* add 16-bit proto + len */ | ||
146 | " add %%r19, %0, %0\n" | ||
147 | " ldd,ma 8(%1), %%r21\n" /* 2cd saddr */ | ||
148 | " ldd,ma 8(%2), %%r22\n" /* 2cd daddr */ | ||
149 | " add,dc %%r20, %0, %0\n" | ||
150 | " add,dc %%r21, %0, %0\n" | ||
151 | " add,dc %%r22, %0, %0\n" | ||
152 | " add,dc %3, %0, %0\n" /* fold in proto+len | carry bit */ | ||
153 | " extrd,u %0, 31, 32, %%r19\n" /* copy upper half down */ | ||
154 | " depdi 0, 31, 32, %0\n" /* clear upper half */ | ||
155 | " add %%r19, %0, %0\n" /* fold into 32-bits */ | ||
156 | " addc 0, %0, %0\n" /* add carry */ | ||
157 | |||
158 | #else | ||
159 | |||
160 | /* | ||
161 | ** For PA 1.x, the insn order doesn't matter as much. | ||
162 | ** Insn stream is serialized on the carry bit here too. | ||
163 | ** result from the previous operation (eg r0 + x) | ||
164 | */ | ||
165 | |||
166 | " ldw,ma 4(%1), %%r19\n" /* get 1st saddr word */ | ||
167 | " ldw,ma 4(%2), %%r20\n" /* get 1st daddr word */ | ||
168 | " add %8, %3, %3\n" /* add 16-bit proto + len */ | ||
169 | " add %%r19, %0, %0\n" | ||
170 | " ldw,ma 4(%1), %%r21\n" /* 2cd saddr */ | ||
171 | " addc %%r20, %0, %0\n" | ||
172 | " ldw,ma 4(%2), %%r22\n" /* 2cd daddr */ | ||
173 | " addc %%r21, %0, %0\n" | ||
174 | " ldw,ma 4(%1), %%r19\n" /* 3rd saddr */ | ||
175 | " addc %%r22, %0, %0\n" | ||
176 | " ldw,ma 4(%2), %%r20\n" /* 3rd daddr */ | ||
177 | " addc %%r19, %0, %0\n" | ||
178 | " ldw,ma 4(%1), %%r21\n" /* 4th saddr */ | ||
179 | " addc %%r20, %0, %0\n" | ||
180 | " ldw,ma 4(%2), %%r22\n" /* 4th daddr */ | ||
181 | " addc %%r21, %0, %0\n" | ||
182 | " addc %%r22, %0, %0\n" | ||
183 | " addc %3, %0, %0\n" /* fold in proto+len, catch carry */ | ||
184 | |||
185 | #endif | ||
186 | : "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len) | ||
187 | : "0" (sum), "1" (saddr), "2" (daddr), "3" (len), "r" (proto) | ||
188 | : "r19", "r20", "r21", "r22"); | ||
189 | return csum_fold(sum); | ||
190 | } | ||
191 | |||
192 | /* | ||
193 | * Copy and checksum to user | ||
194 | */ | ||
195 | #define HAVE_CSUM_COPY_USER | ||
196 | static __inline__ unsigned int csum_and_copy_to_user (const unsigned char *src, | ||
197 | unsigned char __user *dst, | ||
198 | int len, int sum, | ||
199 | int *err_ptr) | ||
200 | { | ||
201 | /* code stolen from include/asm-mips64 */ | ||
202 | sum = csum_partial(src, len, sum); | ||
203 | |||
204 | if (copy_to_user(dst, src, len)) { | ||
205 | *err_ptr = -EFAULT; | ||
206 | return -1; | ||
207 | } | ||
208 | |||
209 | return sum; | ||
210 | } | ||
211 | |||
212 | #endif | ||
213 | |||