diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2006-11-15 00:18:39 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-12-03 00:23:10 -0500 |
commit | 7814e4b6d6ce59071887600a8659641ba3d30a43 (patch) | |
tree | 494eb459dc88f208806f6e3bdbbc212d6157ef3d | |
parent | 8e3d8433d8c22ca6c42cba4a67d300c39aae7822 (diff) |
[NET]: PARISC checksum annotations and cleanups.
* sanitized prototypes, annotated
* kill shift-by-16 in checksum calculation
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | arch/parisc/lib/checksum.c | 17 | ||||
-rw-r--r-- | include/asm-parisc/checksum.h | 55 |
2 files changed, 36 insertions, 36 deletions
diff --git a/arch/parisc/lib/checksum.c b/arch/parisc/lib/checksum.c index 8a1e08068e7d..462696d30d3b 100644 --- a/arch/parisc/lib/checksum.c +++ b/arch/parisc/lib/checksum.c | |||
@@ -101,11 +101,14 @@ out: | |||
101 | /* | 101 | /* |
102 | * computes a partial checksum, e.g. for TCP/UDP fragments | 102 | * computes a partial checksum, e.g. for TCP/UDP fragments |
103 | */ | 103 | */ |
104 | unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum) | 104 | /* |
105 | * why bother folding? | ||
106 | */ | ||
107 | __wsum csum_partial(const void *buff, int len, __wsum sum) | ||
105 | { | 108 | { |
106 | unsigned int result = do_csum(buff, len); | 109 | unsigned int result = do_csum(buff, len); |
107 | addc(result, sum); | 110 | addc(result, sum); |
108 | return from32to16(result); | 111 | return (__force __wsum)from32to16(result); |
109 | } | 112 | } |
110 | 113 | ||
111 | EXPORT_SYMBOL(csum_partial); | 114 | EXPORT_SYMBOL(csum_partial); |
@@ -113,8 +116,8 @@ EXPORT_SYMBOL(csum_partial); | |||
113 | /* | 116 | /* |
114 | * copy while checksumming, otherwise like csum_partial | 117 | * copy while checksumming, otherwise like csum_partial |
115 | */ | 118 | */ |
116 | unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, | 119 | __wsum csum_partial_copy_nocheck(const void *src, void *dst, |
117 | int len, unsigned int sum) | 120 | int len, __wsum sum) |
118 | { | 121 | { |
119 | /* | 122 | /* |
120 | * It's 2:30 am and I don't feel like doing it real ... | 123 | * It's 2:30 am and I don't feel like doing it real ... |
@@ -131,9 +134,9 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck); | |||
131 | * Copy from userspace and compute checksum. If we catch an exception | 134 | * Copy from userspace and compute checksum. If we catch an exception |
132 | * then zero the rest of the buffer. | 135 | * then zero the rest of the buffer. |
133 | */ | 136 | */ |
134 | unsigned int csum_partial_copy_from_user(const unsigned char __user *src, | 137 | __wsum csum_partial_copy_from_user(const void __user *src, |
135 | unsigned char *dst, int len, | 138 | void *dst, int len, |
136 | unsigned int sum, int *err_ptr) | 139 | __wsum sum, int *err_ptr) |
137 | { | 140 | { |
138 | int missing; | 141 | int missing; |
139 | 142 | ||
diff --git a/include/asm-parisc/checksum.h b/include/asm-parisc/checksum.h index 229cb56fdb7a..cc3ec1bd8919 100644 --- a/include/asm-parisc/checksum.h +++ b/include/asm-parisc/checksum.h | |||
@@ -15,7 +15,7 @@ | |||
15 | * | 15 | * |
16 | * it's best to have buff aligned on a 32-bit boundary | 16 | * it's best to have buff aligned on a 32-bit boundary |
17 | */ | 17 | */ |
18 | extern unsigned int csum_partial(const unsigned char *, int, unsigned int); | 18 | extern __wsum csum_partial(const void *, int, __wsum); |
19 | 19 | ||
20 | /* | 20 | /* |
21 | * The same as csum_partial, but copies from src while it checksums. | 21 | * The same as csum_partial, but copies from src while it checksums. |
@@ -23,15 +23,14 @@ extern unsigned int csum_partial(const unsigned char *, int, unsigned int); | |||
23 | * Here even more important to align src and dst on a 32-bit (or even | 23 | * Here even more important to align src and dst on a 32-bit (or even |
24 | * better 64-bit) boundary | 24 | * better 64-bit) boundary |
25 | */ | 25 | */ |
26 | extern unsigned int csum_partial_copy_nocheck(const unsigned char *, unsigned char *, | 26 | extern __wsum csum_partial_copy_nocheck(const void *, void *, int, __wsum); |
27 | int, unsigned int); | ||
28 | 27 | ||
29 | /* | 28 | /* |
30 | * this is a new version of the above that records errors it finds in *errp, | 29 | * this is a new version of the above that records errors it finds in *errp, |
31 | * but continues and zeros the rest of the buffer. | 30 | * but continues and zeros the rest of the buffer. |
32 | */ | 31 | */ |
33 | extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src, | 32 | extern __wsum csum_partial_copy_from_user(const void __user *src, |
34 | unsigned char *dst, int len, unsigned int sum, int *errp); | 33 | void *dst, int len, __wsum sum, int *errp); |
35 | 34 | ||
36 | /* | 35 | /* |
37 | * Optimized for IP headers, which always checksum on 4 octet boundaries. | 36 | * Optimized for IP headers, which always checksum on 4 octet boundaries. |
@@ -39,11 +38,10 @@ extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src, | |||
39 | * Written by Randolph Chung <tausq@debian.org>, and then mucked with by | 38 | * Written by Randolph Chung <tausq@debian.org>, and then mucked with by |
40 | * LaMont Jones <lamont@debian.org> | 39 | * LaMont Jones <lamont@debian.org> |
41 | */ | 40 | */ |
42 | static inline unsigned short ip_fast_csum(unsigned char * iph, | 41 | static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) |
43 | unsigned int ihl) { | 42 | { |
44 | unsigned int sum; | 43 | unsigned int sum; |
45 | 44 | ||
46 | |||
47 | __asm__ __volatile__ ( | 45 | __asm__ __volatile__ ( |
48 | " ldws,ma 4(%1), %0\n" | 46 | " ldws,ma 4(%1), %0\n" |
49 | " addib,<= -4, %2, 2f\n" | 47 | " addib,<= -4, %2, 2f\n" |
@@ -69,27 +67,27 @@ static inline unsigned short ip_fast_csum(unsigned char * iph, | |||
69 | : "1" (iph), "2" (ihl) | 67 | : "1" (iph), "2" (ihl) |
70 | : "r19", "r20", "r21" ); | 68 | : "r19", "r20", "r21" ); |
71 | 69 | ||
72 | return(sum); | 70 | return (__force __sum16)sum; |
73 | } | 71 | } |
74 | 72 | ||
75 | /* | 73 | /* |
76 | * Fold a partial checksum | 74 | * Fold a partial checksum |
77 | */ | 75 | */ |
78 | static inline unsigned int csum_fold(unsigned int sum) | 76 | static inline __sum16 csum_fold(__wsum csum) |
79 | { | 77 | { |
78 | u32 sum = (__force u32)csum; | ||
80 | /* add the swapped two 16-bit halves of sum, | 79 | /* add the swapped two 16-bit halves of sum, |
81 | a possible carry from adding the two 16-bit halves, | 80 | a possible carry from adding the two 16-bit halves, |
82 | will carry from the lower half into the upper half, | 81 | will carry from the lower half into the upper half, |
83 | giving us the correct sum in the upper half. */ | 82 | giving us the correct sum in the upper half. */ |
84 | sum += (sum << 16) + (sum >> 16); | 83 | sum += (sum << 16) + (sum >> 16); |
85 | return (~sum) >> 16; | 84 | return (__force __sum16)(~sum >> 16); |
86 | } | 85 | } |
87 | 86 | ||
88 | static inline unsigned long csum_tcpudp_nofold(unsigned long saddr, | 87 | static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, |
89 | unsigned long daddr, | ||
90 | unsigned short len, | 88 | unsigned short len, |
91 | unsigned short proto, | 89 | unsigned short proto, |
92 | unsigned int sum) | 90 | __wsum sum) |
93 | { | 91 | { |
94 | __asm__( | 92 | __asm__( |
95 | " add %1, %0, %0\n" | 93 | " add %1, %0, %0\n" |
@@ -97,19 +95,18 @@ static inline unsigned long csum_tcpudp_nofold(unsigned long saddr, | |||
97 | " addc %3, %0, %0\n" | 95 | " addc %3, %0, %0\n" |
98 | " addc %%r0, %0, %0\n" | 96 | " addc %%r0, %0, %0\n" |
99 | : "=r" (sum) | 97 | : "=r" (sum) |
100 | : "r" (daddr), "r"(saddr), "r"((proto<<16)+len), "0"(sum)); | 98 | : "r" (daddr), "r"(saddr), "r"(proto+len), "0"(sum)); |
101 | return sum; | 99 | return sum; |
102 | } | 100 | } |
103 | 101 | ||
104 | /* | 102 | /* |
105 | * computes the checksum of the TCP/UDP pseudo-header | 103 | * computes the checksum of the TCP/UDP pseudo-header |
106 | * returns a 16-bit checksum, already complemented | 104 | * returns a 16-bit checksum, already complemented |
107 | */ | 105 | */ |
108 | static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, | 106 | static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, |
109 | unsigned long daddr, | ||
110 | unsigned short len, | 107 | unsigned short len, |
111 | unsigned short proto, | 108 | unsigned short proto, |
112 | unsigned int sum) | 109 | __wsum sum) |
113 | { | 110 | { |
114 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); | 111 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); |
115 | } | 112 | } |
@@ -118,17 +115,17 @@ static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, | |||
118 | * this routine is used for miscellaneous IP-like checksums, mainly | 115 | * this routine is used for miscellaneous IP-like checksums, mainly |
119 | * in icmp.c | 116 | * in icmp.c |
120 | */ | 117 | */ |
121 | static inline unsigned short ip_compute_csum(unsigned char * buf, int len) { | 118 | static inline __sum16 ip_compute_csum(const void *buf, int len) |
119 | { | ||
122 | return csum_fold (csum_partial(buf, len, 0)); | 120 | return csum_fold (csum_partial(buf, len, 0)); |
123 | } | 121 | } |
124 | 122 | ||
125 | 123 | ||
126 | #define _HAVE_ARCH_IPV6_CSUM | 124 | #define _HAVE_ARCH_IPV6_CSUM |
127 | static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, | 125 | static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, |
128 | struct in6_addr *daddr, | 126 | const struct in6_addr *daddr, |
129 | __u16 len, | 127 | __u32 len, unsigned short proto, |
130 | unsigned short proto, | 128 | __wsum sum) |
131 | unsigned int sum) | ||
132 | { | 129 | { |
133 | __asm__ __volatile__ ( | 130 | __asm__ __volatile__ ( |
134 | 131 | ||
@@ -193,9 +190,9 @@ static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, | |||
193 | * Copy and checksum to user | 190 | * Copy and checksum to user |
194 | */ | 191 | */ |
195 | #define HAVE_CSUM_COPY_USER | 192 | #define HAVE_CSUM_COPY_USER |
196 | static __inline__ unsigned int csum_and_copy_to_user (const unsigned char *src, | 193 | static __inline__ __wsum csum_and_copy_to_user(const void *src, |
197 | unsigned char __user *dst, | 194 | void __user *dst, |
198 | int len, int sum, | 195 | int len, __wsum sum, |
199 | int *err_ptr) | 196 | int *err_ptr) |
200 | { | 197 | { |
201 | /* code stolen from include/asm-mips64 */ | 198 | /* code stolen from include/asm-mips64 */ |
@@ -203,7 +200,7 @@ static __inline__ unsigned int csum_and_copy_to_user (const unsigned char *src, | |||
203 | 200 | ||
204 | if (copy_to_user(dst, src, len)) { | 201 | if (copy_to_user(dst, src, len)) { |
205 | *err_ptr = -EFAULT; | 202 | *err_ptr = -EFAULT; |
206 | return -1; | 203 | return (__force __wsum)-1; |
207 | } | 204 | } |
208 | 205 | ||
209 | return sum; | 206 | return sum; |