diff options
| author | Al Viro <viro@zeniv.linux.org.uk> | 2006-11-15 00:16:55 -0500 |
|---|---|---|
| committer | David S. Miller <davem@sunset.davemloft.net> | 2006-12-03 00:23:06 -0500 |
| commit | 85d20dee20f0958df1615e73698f6b0c525812f7 (patch) | |
| tree | 47ddcace761e2c26da0dafc437cef5a2ec852183 | |
| parent | 322529961e3b3e64fdf1a3e46a45294456c91acf (diff) | |
[NET]: M32R checksum annotations and cleanups.
* sanitize prototypes, annotate
* ntohs -> shift in checksum calculations in l-e case
* kill shift-by-16 in checksum calculations
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>
| -rw-r--r-- | arch/m32r/lib/csum_partial_copy.c | 12 | ||||
| -rw-r--r-- | include/asm-m32r/checksum.h | 52 |
2 files changed, 29 insertions, 35 deletions
diff --git a/arch/m32r/lib/csum_partial_copy.c b/arch/m32r/lib/csum_partial_copy.c index 3d5f06145854..5596f3df833f 100644 --- a/arch/m32r/lib/csum_partial_copy.c +++ b/arch/m32r/lib/csum_partial_copy.c | |||
| @@ -27,9 +27,8 @@ | |||
| 27 | /* | 27 | /* |
| 28 | * Copy while checksumming, otherwise like csum_partial | 28 | * Copy while checksumming, otherwise like csum_partial |
| 29 | */ | 29 | */ |
| 30 | unsigned int | 30 | __wsum |
| 31 | csum_partial_copy_nocheck (const unsigned char *src, unsigned char *dst, | 31 | csum_partial_copy_nocheck (const void *src, void *dst, int len, __wsum sum) |
| 32 | int len, unsigned int sum) | ||
| 33 | { | 32 | { |
| 34 | sum = csum_partial(src, len, sum); | 33 | sum = csum_partial(src, len, sum); |
| 35 | memcpy(dst, src, len); | 34 | memcpy(dst, src, len); |
| @@ -42,10 +41,9 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck); | |||
| 42 | * Copy from userspace and compute checksum. If we catch an exception | 41 | * Copy from userspace and compute checksum. If we catch an exception |
| 43 | * then zero the rest of the buffer. | 42 | * then zero the rest of the buffer. |
| 44 | */ | 43 | */ |
| 45 | unsigned int | 44 | __wsum |
| 46 | csum_partial_copy_from_user (const unsigned char __user *src, | 45 | csum_partial_copy_from_user (const void __user *src, void *dst, |
| 47 | unsigned char *dst, | 46 | int len, __wsum sum, int *err_ptr) |
| 48 | int len, unsigned int sum, int *err_ptr) | ||
| 49 | { | 47 | { |
| 50 | int missing; | 48 | int missing; |
| 51 | 49 | ||
diff --git a/include/asm-m32r/checksum.h b/include/asm-m32r/checksum.h index 877ebf46e9ff..a7a7c4f44abe 100644 --- a/include/asm-m32r/checksum.h +++ b/include/asm-m32r/checksum.h | |||
| @@ -31,8 +31,7 @@ | |||
| 31 | * | 31 | * |
| 32 | * it's best to have buff aligned on a 32-bit boundary | 32 | * it's best to have buff aligned on a 32-bit boundary |
| 33 | */ | 33 | */ |
| 34 | asmlinkage unsigned int csum_partial(const unsigned char *buff, | 34 | asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum); |
| 35 | int len, unsigned int sum); | ||
| 36 | 35 | ||
| 37 | /* | 36 | /* |
| 38 | * The same as csum_partial, but copies from src while it checksums. | 37 | * The same as csum_partial, but copies from src while it checksums. |
| @@ -40,24 +39,22 @@ asmlinkage unsigned int csum_partial(const unsigned char *buff, | |||
| 40 | * Here even more important to align src and dst on a 32-bit (or even | 39 | * Here even more important to align src and dst on a 32-bit (or even |
| 41 | * better 64-bit) boundary | 40 | * better 64-bit) boundary |
| 42 | */ | 41 | */ |
| 43 | extern unsigned int csum_partial_copy_nocheck(const unsigned char *src, | 42 | extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, |
| 44 | unsigned char *dst, | 43 | int len, __wsum sum); |
| 45 | int len, unsigned int sum); | ||
| 46 | 44 | ||
| 47 | /* | 45 | /* |
| 48 | * This is a new version of the above that records errors it finds in *errp, | 46 | * This is a new version of the above that records errors it finds in *errp, |
| 49 | * but continues and zeros thre rest of the buffer. | 47 | * but continues and zeros thre rest of the buffer. |
| 50 | */ | 48 | */ |
| 51 | extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src, | 49 | extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, |
| 52 | unsigned char *dst, | 50 | int len, __wsum sum, |
| 53 | int len, unsigned int sum, | ||
| 54 | int *err_ptr); | 51 | int *err_ptr); |
| 55 | 52 | ||
| 56 | /* | 53 | /* |
| 57 | * Fold a partial checksum | 54 | * Fold a partial checksum |
| 58 | */ | 55 | */ |
| 59 | 56 | ||
| 60 | static inline unsigned int csum_fold(unsigned int sum) | 57 | static inline __sum16 csum_fold(__wsum sum) |
| 61 | { | 58 | { |
| 62 | unsigned long tmpreg; | 59 | unsigned long tmpreg; |
| 63 | __asm__( | 60 | __asm__( |
| @@ -72,16 +69,17 @@ static inline unsigned int csum_fold(unsigned int sum) | |||
| 72 | : "0" (sum) | 69 | : "0" (sum) |
| 73 | : "cbit" | 70 | : "cbit" |
| 74 | ); | 71 | ); |
| 75 | return sum; | 72 | return (__force __sum16)sum; |
| 76 | } | 73 | } |
| 77 | 74 | ||
| 78 | /* | 75 | /* |
| 79 | * This is a version of ip_compute_csum() optimized for IP headers, | 76 | * This is a version of ip_compute_csum() optimized for IP headers, |
| 80 | * which always checksum on 4 octet boundaries. | 77 | * which always checksum on 4 octet boundaries. |
| 81 | */ | 78 | */ |
| 82 | static inline unsigned short ip_fast_csum(unsigned char * iph, | 79 | static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) |
| 83 | unsigned int ihl) { | 80 | { |
| 84 | unsigned long sum, tmpreg0, tmpreg1; | 81 | unsigned long tmpreg0, tmpreg1; |
| 82 | __wsum sum; | ||
| 85 | 83 | ||
| 86 | __asm__ __volatile__( | 84 | __asm__ __volatile__( |
| 87 | " ld %0, @%1+ \n" | 85 | " ld %0, @%1+ \n" |
| @@ -115,16 +113,15 @@ static inline unsigned short ip_fast_csum(unsigned char * iph, | |||
| 115 | return csum_fold(sum); | 113 | return csum_fold(sum); |
| 116 | } | 114 | } |
| 117 | 115 | ||
| 118 | static inline unsigned long csum_tcpudp_nofold(unsigned long saddr, | 116 | static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, |
| 119 | unsigned long daddr, | ||
| 120 | unsigned short len, | 117 | unsigned short len, |
| 121 | unsigned short proto, | 118 | unsigned short proto, |
| 122 | unsigned int sum) | 119 | __wsum sum) |
| 123 | { | 120 | { |
| 124 | #if defined(__LITTLE_ENDIAN) | 121 | #if defined(__LITTLE_ENDIAN) |
| 125 | unsigned long len_proto = (ntohs(len)<<16)+proto*256; | 122 | unsigned long len_proto = (proto + len) << 8; |
| 126 | #else | 123 | #else |
| 127 | unsigned long len_proto = (proto<<16)+len; | 124 | unsigned long len_proto = proto + len; |
| 128 | #endif | 125 | #endif |
| 129 | unsigned long tmpreg; | 126 | unsigned long tmpreg; |
| 130 | 127 | ||
| @@ -147,11 +144,10 @@ static inline unsigned long csum_tcpudp_nofold(unsigned long saddr, | |||
| 147 | * computes the checksum of the TCP/UDP pseudo-header | 144 | * computes the checksum of the TCP/UDP pseudo-header |
| 148 | * returns a 16-bit checksum, already complemented | 145 | * returns a 16-bit checksum, already complemented |
| 149 | */ | 146 | */ |
| 150 | static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, | 147 | static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, |
| 151 | unsigned long daddr, | ||
| 152 | unsigned short len, | 148 | unsigned short len, |
| 153 | unsigned short proto, | 149 | unsigned short proto, |
| 154 | unsigned int sum) | 150 | __wsum sum) |
| 155 | { | 151 | { |
| 156 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); | 152 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); |
| 157 | } | 153 | } |
| @@ -161,16 +157,16 @@ static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, | |||
| 161 | * in icmp.c | 157 | * in icmp.c |
| 162 | */ | 158 | */ |
| 163 | 159 | ||
| 164 | static inline unsigned short ip_compute_csum(unsigned char * buff, int len) { | 160 | static inline __sum16 ip_compute_csum(const void *buff, int len) |
| 161 | { | ||
| 165 | return csum_fold (csum_partial(buff, len, 0)); | 162 | return csum_fold (csum_partial(buff, len, 0)); |
| 166 | } | 163 | } |
| 167 | 164 | ||
| 168 | #define _HAVE_ARCH_IPV6_CSUM | 165 | #define _HAVE_ARCH_IPV6_CSUM |
| 169 | static inline unsigned short int csum_ipv6_magic(struct in6_addr *saddr, | 166 | static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, |
| 170 | struct in6_addr *daddr, | 167 | const struct in6_addr *daddr, |
| 171 | __u16 len, | 168 | __u32 len, unsigned short proto, |
| 172 | unsigned short proto, | 169 | __wsum sum) |
| 173 | unsigned int sum) | ||
| 174 | { | 170 | { |
| 175 | unsigned long tmpreg0, tmpreg1, tmpreg2, tmpreg3; | 171 | unsigned long tmpreg0, tmpreg1, tmpreg2, tmpreg3; |
| 176 | __asm__( | 172 | __asm__( |
| @@ -197,7 +193,7 @@ static inline unsigned short int csum_ipv6_magic(struct in6_addr *saddr, | |||
| 197 | : "=&r" (sum), "=&r" (tmpreg0), "=&r" (tmpreg1), | 193 | : "=&r" (sum), "=&r" (tmpreg0), "=&r" (tmpreg1), |
| 198 | "=&r" (tmpreg2), "=&r" (tmpreg3) | 194 | "=&r" (tmpreg2), "=&r" (tmpreg3) |
| 199 | : "r" (saddr), "r" (daddr), | 195 | : "r" (saddr), "r" (daddr), |
| 200 | "r" (htonl((__u32) (len))), "r" (htonl(proto)), "0" (sum) | 196 | "r" (htonl(len)), "r" (htonl(proto)), "0" (sum) |
| 201 | : "cbit" | 197 | : "cbit" |
| 202 | ); | 198 | ); |
| 203 | 199 | ||
