diff options
Diffstat (limited to 'include/asm-sparc/checksum.h')
-rw-r--r-- | include/asm-sparc/checksum.h | 103 |
1 files changed, 46 insertions, 57 deletions
diff --git a/include/asm-sparc/checksum.h b/include/asm-sparc/checksum.h index 286158108974..267e631e9bbc 100644 --- a/include/asm-sparc/checksum.h +++ b/include/asm-sparc/checksum.h | |||
@@ -30,7 +30,7 @@ | |||
30 | * | 30 | * |
31 | * it's best to have buff aligned on a 32-bit boundary | 31 | * it's best to have buff aligned on a 32-bit boundary |
32 | */ | 32 | */ |
33 | extern unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); | 33 | extern __wsum csum_partial(const void *buff, int len, __wsum sum); |
34 | 34 | ||
35 | /* the same as csum_partial, but copies from fs:src while it | 35 | /* the same as csum_partial, but copies from fs:src while it |
36 | * checksums | 36 | * checksums |
@@ -41,9 +41,8 @@ extern unsigned int csum_partial(const unsigned char * buff, int len, unsigned i | |||
41 | 41 | ||
42 | extern unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *); | 42 | extern unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *); |
43 | 43 | ||
44 | static inline unsigned int | 44 | static inline __wsum |
45 | csum_partial_copy_nocheck (const unsigned char *src, unsigned char *dst, int len, | 45 | csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) |
46 | unsigned int sum) | ||
47 | { | 46 | { |
48 | register unsigned int ret asm("o0") = (unsigned int)src; | 47 | register unsigned int ret asm("o0") = (unsigned int)src; |
49 | register char *d asm("o1") = dst; | 48 | register char *d asm("o1") = dst; |
@@ -57,42 +56,36 @@ csum_partial_copy_nocheck (const unsigned char *src, unsigned char *dst, int len | |||
57 | : "o2", "o3", "o4", "o5", "o7", | 56 | : "o2", "o3", "o4", "o5", "o7", |
58 | "g2", "g3", "g4", "g5", "g7", | 57 | "g2", "g3", "g4", "g5", "g7", |
59 | "memory", "cc"); | 58 | "memory", "cc"); |
60 | return ret; | 59 | return (__force __wsum)ret; |
61 | } | 60 | } |
62 | 61 | ||
63 | static inline unsigned int | 62 | static inline __wsum |
64 | csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst, int len, | 63 | csum_partial_copy_from_user(const void __user *src, void *dst, int len, |
65 | unsigned int sum, int *err) | 64 | __wsum sum, int *err) |
66 | { | 65 | { |
67 | if (!access_ok (VERIFY_READ, src, len)) { | 66 | register unsigned long ret asm("o0") = (unsigned long)src; |
68 | *err = -EFAULT; | 67 | register char *d asm("o1") = dst; |
69 | memset (dst, 0, len); | 68 | register int l asm("g1") = len; |
70 | return sum; | 69 | register __wsum s asm("g7") = sum; |
71 | } else { | ||
72 | register unsigned long ret asm("o0") = (unsigned long)src; | ||
73 | register char *d asm("o1") = dst; | ||
74 | register int l asm("g1") = len; | ||
75 | register unsigned int s asm("g7") = sum; | ||
76 | 70 | ||
77 | __asm__ __volatile__ ( | 71 | __asm__ __volatile__ ( |
78 | ".section __ex_table,#alloc\n\t" | 72 | ".section __ex_table,#alloc\n\t" |
79 | ".align 4\n\t" | 73 | ".align 4\n\t" |
80 | ".word 1f,2\n\t" | 74 | ".word 1f,2\n\t" |
81 | ".previous\n" | 75 | ".previous\n" |
82 | "1:\n\t" | 76 | "1:\n\t" |
83 | "call __csum_partial_copy_sparc_generic\n\t" | 77 | "call __csum_partial_copy_sparc_generic\n\t" |
84 | " st %8, [%%sp + 64]\n" | 78 | " st %8, [%%sp + 64]\n" |
85 | : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s) | 79 | : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s) |
86 | : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err) | 80 | : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err) |
87 | : "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5", | 81 | : "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5", |
88 | "cc", "memory"); | 82 | "cc", "memory"); |
89 | return ret; | 83 | return (__force __wsum)ret; |
90 | } | 84 | } |
91 | } | ||
92 | 85 | ||
93 | static inline unsigned int | 86 | static inline __wsum |
94 | csum_partial_copy_to_user(const unsigned char *src, unsigned char __user *dst, int len, | 87 | csum_partial_copy_to_user(const void *src, void __user *dst, int len, |
95 | unsigned int sum, int *err) | 88 | __wsum sum, int *err) |
96 | { | 89 | { |
97 | if (!access_ok (VERIFY_WRITE, dst, len)) { | 90 | if (!access_ok (VERIFY_WRITE, dst, len)) { |
98 | *err = -EFAULT; | 91 | *err = -EFAULT; |
@@ -101,7 +94,7 @@ csum_partial_copy_to_user(const unsigned char *src, unsigned char __user *dst, i | |||
101 | register unsigned long ret asm("o0") = (unsigned long)src; | 94 | register unsigned long ret asm("o0") = (unsigned long)src; |
102 | register char __user *d asm("o1") = dst; | 95 | register char __user *d asm("o1") = dst; |
103 | register int l asm("g1") = len; | 96 | register int l asm("g1") = len; |
104 | register unsigned int s asm("g7") = sum; | 97 | register __wsum s asm("g7") = sum; |
105 | 98 | ||
106 | __asm__ __volatile__ ( | 99 | __asm__ __volatile__ ( |
107 | ".section __ex_table,#alloc\n\t" | 100 | ".section __ex_table,#alloc\n\t" |
@@ -116,7 +109,7 @@ csum_partial_copy_to_user(const unsigned char *src, unsigned char __user *dst, i | |||
116 | : "o2", "o3", "o4", "o5", "o7", | 109 | : "o2", "o3", "o4", "o5", "o7", |
117 | "g2", "g3", "g4", "g5", | 110 | "g2", "g3", "g4", "g5", |
118 | "cc", "memory"); | 111 | "cc", "memory"); |
119 | return ret; | 112 | return (__force __wsum)ret; |
120 | } | 113 | } |
121 | } | 114 | } |
122 | 115 | ||
@@ -126,10 +119,9 @@ csum_partial_copy_to_user(const unsigned char *src, unsigned char __user *dst, i | |||
126 | /* ihl is always 5 or greater, almost always is 5, and iph is word aligned | 119 | /* ihl is always 5 or greater, almost always is 5, and iph is word aligned |
127 | * the majority of the time. | 120 | * the majority of the time. |
128 | */ | 121 | */ |
129 | static inline unsigned short ip_fast_csum(const unsigned char *iph, | 122 | static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) |
130 | unsigned int ihl) | ||
131 | { | 123 | { |
132 | unsigned short sum; | 124 | __sum16 sum; |
133 | 125 | ||
134 | /* Note: We must read %2 before we touch %0 for the first time, | 126 | /* Note: We must read %2 before we touch %0 for the first time, |
135 | * because GCC can legitimately use the same register for | 127 | * because GCC can legitimately use the same register for |
@@ -164,7 +156,7 @@ static inline unsigned short ip_fast_csum(const unsigned char *iph, | |||
164 | } | 156 | } |
165 | 157 | ||
166 | /* Fold a partial checksum without adding pseudo headers. */ | 158 | /* Fold a partial checksum without adding pseudo headers. */ |
167 | static inline unsigned int csum_fold(unsigned int sum) | 159 | static inline __sum16 csum_fold(__wsum sum) |
168 | { | 160 | { |
169 | unsigned int tmp; | 161 | unsigned int tmp; |
170 | 162 | ||
@@ -173,23 +165,22 @@ static inline unsigned int csum_fold(unsigned int sum) | |||
173 | "addx\t%1, %%g0, %1\n\t" | 165 | "addx\t%1, %%g0, %1\n\t" |
174 | "xnor\t%%g0, %1, %0" | 166 | "xnor\t%%g0, %1, %0" |
175 | : "=&r" (sum), "=r" (tmp) | 167 | : "=&r" (sum), "=r" (tmp) |
176 | : "0" (sum), "1" (sum<<16) | 168 | : "0" (sum), "1" ((__force u32)sum<<16) |
177 | : "cc"); | 169 | : "cc"); |
178 | return sum; | 170 | return (__force __sum16)sum; |
179 | } | 171 | } |
180 | 172 | ||
181 | static inline unsigned long csum_tcpudp_nofold(unsigned long saddr, | 173 | static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, |
182 | unsigned long daddr, | 174 | unsigned short len, |
183 | unsigned int len, | ||
184 | unsigned short proto, | 175 | unsigned short proto, |
185 | unsigned int sum) | 176 | __wsum sum) |
186 | { | 177 | { |
187 | __asm__ __volatile__("addcc\t%1, %0, %0\n\t" | 178 | __asm__ __volatile__("addcc\t%1, %0, %0\n\t" |
188 | "addxcc\t%2, %0, %0\n\t" | 179 | "addxcc\t%2, %0, %0\n\t" |
189 | "addxcc\t%3, %0, %0\n\t" | 180 | "addxcc\t%3, %0, %0\n\t" |
190 | "addx\t%0, %%g0, %0\n\t" | 181 | "addx\t%0, %%g0, %0\n\t" |
191 | : "=r" (sum), "=r" (saddr) | 182 | : "=r" (sum), "=r" (saddr) |
192 | : "r" (daddr), "r" ((proto<<16)+len), "0" (sum), | 183 | : "r" (daddr), "r" (proto + len), "0" (sum), |
193 | "1" (saddr) | 184 | "1" (saddr) |
194 | : "cc"); | 185 | : "cc"); |
195 | return sum; | 186 | return sum; |
@@ -199,22 +190,20 @@ static inline unsigned long csum_tcpudp_nofold(unsigned long saddr, | |||
199 | * computes the checksum of the TCP/UDP pseudo-header | 190 | * computes the checksum of the TCP/UDP pseudo-header |
200 | * returns a 16-bit checksum, already complemented | 191 | * returns a 16-bit checksum, already complemented |
201 | */ | 192 | */ |
202 | static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, | 193 | static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, |
203 | unsigned long daddr, | ||
204 | unsigned short len, | 194 | unsigned short len, |
205 | unsigned short proto, | 195 | unsigned short proto, |
206 | unsigned int sum) | 196 | __wsum sum) |
207 | { | 197 | { |
208 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); | 198 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); |
209 | } | 199 | } |
210 | 200 | ||
211 | #define _HAVE_ARCH_IPV6_CSUM | 201 | #define _HAVE_ARCH_IPV6_CSUM |
212 | 202 | ||
213 | static inline unsigned short int csum_ipv6_magic(struct in6_addr *saddr, | 203 | static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, |
214 | struct in6_addr *daddr, | 204 | const struct in6_addr *daddr, |
215 | __u32 len, | 205 | __u32 len, unsigned short proto, |
216 | unsigned short proto, | 206 | __wsum sum) |
217 | unsigned int sum) | ||
218 | { | 207 | { |
219 | __asm__ __volatile__ ( | 208 | __asm__ __volatile__ ( |
220 | "addcc %3, %4, %%g4\n\t" | 209 | "addcc %3, %4, %%g4\n\t" |
@@ -245,7 +234,7 @@ static inline unsigned short int csum_ipv6_magic(struct in6_addr *saddr, | |||
245 | } | 234 | } |
246 | 235 | ||
247 | /* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */ | 236 | /* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */ |
248 | static inline unsigned short ip_compute_csum(unsigned char * buff, int len) | 237 | static inline __sum16 ip_compute_csum(const void *buff, int len) |
249 | { | 238 | { |
250 | return csum_fold(csum_partial(buff, len, 0)); | 239 | return csum_fold(csum_partial(buff, len, 0)); |
251 | } | 240 | } |