aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2006-11-15 00:23:40 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-12-03 00:23:24 -0500
commitb8e4e01dd5576a14602a49829f9f9d997bb60b6b (patch)
tree9f3a297db0bf96f55997f319ae56addfcf406288 /include
parentd5c63936410fbcabd92df1ac12f3f63ca23c7a86 (diff)
[NET]: XTENSA checksum annotations and cleanups.
* sanitize prototypes, annotate * kill csum_partial_copy_fromuser * kill csum_partial_copy * kill useless shifts * usual ntohs->shift Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/asm-xtensa/checksum.h64
1 files changed, 26 insertions, 38 deletions
diff --git a/include/asm-xtensa/checksum.h b/include/asm-xtensa/checksum.h
index 03114f8d1e18..5435aff9a4b7 100644
--- a/include/asm-xtensa/checksum.h
+++ b/include/asm-xtensa/checksum.h
@@ -26,7 +26,7 @@
26 * 26 *
27 * it's best to have buff aligned on a 32-bit boundary 27 * it's best to have buff aligned on a 32-bit boundary
28 */ 28 */
29asmlinkage unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); 29asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
30 30
31/* 31/*
32 * the same as csum_partial, but copies from src while it 32 * the same as csum_partial, but copies from src while it
@@ -36,7 +36,7 @@ asmlinkage unsigned int csum_partial(const unsigned char * buff, int len, unsign
36 * better 64-bit) boundary 36 * better 64-bit) boundary
37 */ 37 */
38 38
39asmlinkage unsigned int csum_partial_copy_generic( const char *src, char *dst, int len, int sum, 39asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len, __wsum sum,
40 int *src_err_ptr, int *dst_err_ptr); 40 int *src_err_ptr, int *dst_err_ptr);
41 41
42/* 42/*
@@ -46,34 +46,25 @@ asmlinkage unsigned int csum_partial_copy_generic( const char *src, char *dst, i
46 * If you use these functions directly please don't forget the access_ok(). 46 * If you use these functions directly please don't forget the access_ok().
47 */ 47 */
48static inline 48static inline
49unsigned int csum_partial_copy_nocheck ( const char *src, char *dst, 49__wsum csum_partial_copy_nocheck(const void *src, void *dst,
50 int len, int sum) 50 int len, __wsum sum)
51{ 51{
52 return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL); 52 return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
53} 53}
54 54
55static inline 55static inline
56unsigned int csum_partial_copy_from_user ( const char *src, char *dst, 56__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
57 int len, int sum, int *err_ptr) 57 int len, __wsum sum, int *err_ptr)
58{ 58{
59 return csum_partial_copy_generic ( src, dst, len, sum, err_ptr, NULL); 59 return csum_partial_copy_generic((__force const void *)src, dst,
60 len, sum, err_ptr, NULL);
60} 61}
61 62
62/* 63/*
63 * These are the old (and unsafe) way of doing checksums, a warning message will be
64 * printed if they are used and an exeption occurs.
65 *
66 * these functions should go away after some time.
67 */
68
69#define csum_partial_copy_fromuser csum_partial_copy
70unsigned int csum_partial_copy( const char *src, char *dst, int len, int sum);
71
72/*
73 * Fold a partial checksum 64 * Fold a partial checksum
74 */ 65 */
75 66
76static __inline__ unsigned int csum_fold(unsigned int sum) 67static __inline__ __sum16 csum_fold(__wsum sum)
77{ 68{
78 unsigned int __dummy; 69 unsigned int __dummy;
79 __asm__("extui %1, %0, 16, 16\n\t" 70 __asm__("extui %1, %0, 16, 16\n\t"
@@ -87,14 +78,14 @@ static __inline__ unsigned int csum_fold(unsigned int sum)
87 "extui %0, %0, 0, 16\n\t" 78 "extui %0, %0, 0, 16\n\t"
88 : "=r" (sum), "=&r" (__dummy) 79 : "=r" (sum), "=&r" (__dummy)
89 : "0" (sum)); 80 : "0" (sum));
90 return sum; 81 return (__force __sum16)sum;
91} 82}
92 83
93/* 84/*
94 * This is a version of ip_compute_csum() optimized for IP headers, 85 * This is a version of ip_compute_csum() optimized for IP headers,
95 * which always checksum on 4 octet boundaries. 86 * which always checksum on 4 octet boundaries.
96 */ 87 */
97static __inline__ unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl) 88static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
98{ 89{
99 unsigned int sum, tmp, endaddr; 90 unsigned int sum, tmp, endaddr;
100 91
@@ -127,17 +118,16 @@ static __inline__ unsigned short ip_fast_csum(unsigned char * iph, unsigned int
127 return csum_fold(sum); 118 return csum_fold(sum);
128} 119}
129 120
130static __inline__ unsigned long csum_tcpudp_nofold(unsigned long saddr, 121static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
131 unsigned long daddr,
132 unsigned short len, 122 unsigned short len,
133 unsigned short proto, 123 unsigned short proto,
134 unsigned int sum) 124 __wsum sum)
135{ 125{
136 126
137#ifdef __XTENSA_EL__ 127#ifdef __XTENSA_EL__
138 unsigned long len_proto = (ntohs(len)<<16)+proto*256; 128 unsigned long len_proto = (len + proto) << 8;
139#elif defined(__XTENSA_EB__) 129#elif defined(__XTENSA_EB__)
140 unsigned long len_proto = (proto<<16)+len; 130 unsigned long len_proto = len + proto;
141#else 131#else
142# error processor byte order undefined! 132# error processor byte order undefined!
143#endif 133#endif
@@ -162,11 +152,10 @@ static __inline__ unsigned long csum_tcpudp_nofold(unsigned long saddr,
162 * computes the checksum of the TCP/UDP pseudo-header 152 * computes the checksum of the TCP/UDP pseudo-header
163 * returns a 16-bit checksum, already complemented 153 * returns a 16-bit checksum, already complemented
164 */ 154 */
165static __inline__ unsigned short int csum_tcpudp_magic(unsigned long saddr, 155static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
166 unsigned long daddr,
167 unsigned short len, 156 unsigned short len,
168 unsigned short proto, 157 unsigned short proto,
169 unsigned int sum) 158 __wsum sum)
170{ 159{
171 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); 160 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
172} 161}
@@ -176,17 +165,16 @@ static __inline__ unsigned short int csum_tcpudp_magic(unsigned long saddr,
176 * in icmp.c 165 * in icmp.c
177 */ 166 */
178 167
179static __inline__ unsigned short ip_compute_csum(unsigned char * buff, int len) 168static __inline__ __sum16 ip_compute_csum(const void *buff, int len)
180{ 169{
181 return csum_fold (csum_partial(buff, len, 0)); 170 return csum_fold (csum_partial(buff, len, 0));
182} 171}
183 172
184#define _HAVE_ARCH_IPV6_CSUM 173#define _HAVE_ARCH_IPV6_CSUM
185static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, 174static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
186 struct in6_addr *daddr, 175 const struct in6_addr *daddr,
187 __u32 len, 176 __u32 len, unsigned short proto,
188 unsigned short proto, 177 __wsum sum)
189 unsigned int sum)
190{ 178{
191 unsigned int __dummy; 179 unsigned int __dummy;
192 __asm__("l32i %1, %2, 0\n\t" 180 __asm__("l32i %1, %2, 0\n\t"
@@ -248,8 +236,8 @@ static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
248 * Copy and checksum to user 236 * Copy and checksum to user
249 */ 237 */
250#define HAVE_CSUM_COPY_USER 238#define HAVE_CSUM_COPY_USER
251static __inline__ unsigned int csum_and_copy_to_user (const char *src, char *dst, 239static __inline__ __wsum csum_and_copy_to_user(const void *src, void __user *dst,
252 int len, int sum, int *err_ptr) 240 int len, __wsum sum, int *err_ptr)
253{ 241{
254 if (access_ok(VERIFY_WRITE, dst, len)) 242 if (access_ok(VERIFY_WRITE, dst, len))
255 return csum_partial_copy_generic(src, dst, len, sum, NULL, err_ptr); 243 return csum_partial_copy_generic(src, dst, len, sum, NULL, err_ptr);
@@ -257,6 +245,6 @@ static __inline__ unsigned int csum_and_copy_to_user (const char *src, char *dst
257 if (len) 245 if (len)
258 *err_ptr = -EFAULT; 246 *err_ptr = -EFAULT;
259 247
260 return -1; /* invalid checksum */ 248 return (__force __wsum)-1; /* invalid checksum */
261} 249}
262#endif 250#endif