aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-parisc
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2006-11-15 00:18:39 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-12-03 00:23:10 -0500
commit7814e4b6d6ce59071887600a8659641ba3d30a43 (patch)
tree494eb459dc88f208806f6e3bdbbc212d6157ef3d /include/asm-parisc
parent8e3d8433d8c22ca6c42cba4a67d300c39aae7822 (diff)
[NET]: PARISC checksum annotations and cleanups.
* sanitized prototypes, annotated * kill shift-by-16 in checksum calculation Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-parisc')
-rw-r--r--include/asm-parisc/checksum.h55
1 files changed, 26 insertions, 29 deletions
diff --git a/include/asm-parisc/checksum.h b/include/asm-parisc/checksum.h
index 229cb56fdb7..cc3ec1bd891 100644
--- a/include/asm-parisc/checksum.h
+++ b/include/asm-parisc/checksum.h
@@ -15,7 +15,7 @@
15 * 15 *
16 * it's best to have buff aligned on a 32-bit boundary 16 * it's best to have buff aligned on a 32-bit boundary
17 */ 17 */
18extern unsigned int csum_partial(const unsigned char *, int, unsigned int); 18extern __wsum csum_partial(const void *, int, __wsum);
19 19
20/* 20/*
21 * The same as csum_partial, but copies from src while it checksums. 21 * The same as csum_partial, but copies from src while it checksums.
@@ -23,15 +23,14 @@ extern unsigned int csum_partial(const unsigned char *, int, unsigned int);
23 * Here even more important to align src and dst on a 32-bit (or even 23 * Here even more important to align src and dst on a 32-bit (or even
24 * better 64-bit) boundary 24 * better 64-bit) boundary
25 */ 25 */
26extern unsigned int csum_partial_copy_nocheck(const unsigned char *, unsigned char *, 26extern __wsum csum_partial_copy_nocheck(const void *, void *, int, __wsum);
27 int, unsigned int);
28 27
29/* 28/*
30 * this is a new version of the above that records errors it finds in *errp, 29 * this is a new version of the above that records errors it finds in *errp,
31 * but continues and zeros the rest of the buffer. 30 * but continues and zeros the rest of the buffer.
32 */ 31 */
33extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src, 32extern __wsum csum_partial_copy_from_user(const void __user *src,
34 unsigned char *dst, int len, unsigned int sum, int *errp); 33 void *dst, int len, __wsum sum, int *errp);
35 34
36/* 35/*
37 * Optimized for IP headers, which always checksum on 4 octet boundaries. 36 * Optimized for IP headers, which always checksum on 4 octet boundaries.
@@ -39,11 +38,10 @@ extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src,
39 * Written by Randolph Chung <tausq@debian.org>, and then mucked with by 38 * Written by Randolph Chung <tausq@debian.org>, and then mucked with by
40 * LaMont Jones <lamont@debian.org> 39 * LaMont Jones <lamont@debian.org>
41 */ 40 */
42static inline unsigned short ip_fast_csum(unsigned char * iph, 41static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
43 unsigned int ihl) { 42{
44 unsigned int sum; 43 unsigned int sum;
45 44
46
47 __asm__ __volatile__ ( 45 __asm__ __volatile__ (
48" ldws,ma 4(%1), %0\n" 46" ldws,ma 4(%1), %0\n"
49" addib,<= -4, %2, 2f\n" 47" addib,<= -4, %2, 2f\n"
@@ -69,27 +67,27 @@ static inline unsigned short ip_fast_csum(unsigned char * iph,
69 : "1" (iph), "2" (ihl) 67 : "1" (iph), "2" (ihl)
70 : "r19", "r20", "r21" ); 68 : "r19", "r20", "r21" );
71 69
72 return(sum); 70 return (__force __sum16)sum;
73} 71}
74 72
75/* 73/*
76 * Fold a partial checksum 74 * Fold a partial checksum
77 */ 75 */
78static inline unsigned int csum_fold(unsigned int sum) 76static inline __sum16 csum_fold(__wsum csum)
79{ 77{
78 u32 sum = (__force u32)csum;
80 /* add the swapped two 16-bit halves of sum, 79 /* add the swapped two 16-bit halves of sum,
81 a possible carry from adding the two 16-bit halves, 80 a possible carry from adding the two 16-bit halves,
82 will carry from the lower half into the upper half, 81 will carry from the lower half into the upper half,
83 giving us the correct sum in the upper half. */ 82 giving us the correct sum in the upper half. */
84 sum += (sum << 16) + (sum >> 16); 83 sum += (sum << 16) + (sum >> 16);
85 return (~sum) >> 16; 84 return (__force __sum16)(~sum >> 16);
86} 85}
87 86
88static inline unsigned long csum_tcpudp_nofold(unsigned long saddr, 87static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
89 unsigned long daddr,
90 unsigned short len, 88 unsigned short len,
91 unsigned short proto, 89 unsigned short proto,
92 unsigned int sum) 90 __wsum sum)
93{ 91{
94 __asm__( 92 __asm__(
95 " add %1, %0, %0\n" 93 " add %1, %0, %0\n"
@@ -97,19 +95,18 @@ static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
97 " addc %3, %0, %0\n" 95 " addc %3, %0, %0\n"
98 " addc %%r0, %0, %0\n" 96 " addc %%r0, %0, %0\n"
99 : "=r" (sum) 97 : "=r" (sum)
100 : "r" (daddr), "r"(saddr), "r"((proto<<16)+len), "0"(sum)); 98 : "r" (daddr), "r"(saddr), "r"(proto+len), "0"(sum));
101 return sum; 99 return sum;
102} 100}
103 101
104/* 102/*
105 * computes the checksum of the TCP/UDP pseudo-header 103 * computes the checksum of the TCP/UDP pseudo-header
106 * returns a 16-bit checksum, already complemented 104 * returns a 16-bit checksum, already complemented
107 */ 105 */
108static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, 106static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
109 unsigned long daddr,
110 unsigned short len, 107 unsigned short len,
111 unsigned short proto, 108 unsigned short proto,
112 unsigned int sum) 109 __wsum sum)
113{ 110{
114 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); 111 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
115} 112}
@@ -118,17 +115,17 @@ static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
118 * this routine is used for miscellaneous IP-like checksums, mainly 115 * this routine is used for miscellaneous IP-like checksums, mainly
119 * in icmp.c 116 * in icmp.c
120 */ 117 */
121static inline unsigned short ip_compute_csum(unsigned char * buf, int len) { 118static inline __sum16 ip_compute_csum(const void *buf, int len)
119{
122 return csum_fold (csum_partial(buf, len, 0)); 120 return csum_fold (csum_partial(buf, len, 0));
123} 121}
124 122
125 123
126#define _HAVE_ARCH_IPV6_CSUM 124#define _HAVE_ARCH_IPV6_CSUM
127static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, 125static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
128 struct in6_addr *daddr, 126 const struct in6_addr *daddr,
129 __u16 len, 127 __u32 len, unsigned short proto,
130 unsigned short proto, 128 __wsum sum)
131 unsigned int sum)
132{ 129{
133 __asm__ __volatile__ ( 130 __asm__ __volatile__ (
134 131
@@ -193,9 +190,9 @@ static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
193 * Copy and checksum to user 190 * Copy and checksum to user
194 */ 191 */
195#define HAVE_CSUM_COPY_USER 192#define HAVE_CSUM_COPY_USER
196static __inline__ unsigned int csum_and_copy_to_user (const unsigned char *src, 193static __inline__ __wsum csum_and_copy_to_user(const void *src,
197 unsigned char __user *dst, 194 void __user *dst,
198 int len, int sum, 195 int len, __wsum sum,
199 int *err_ptr) 196 int *err_ptr)
200{ 197{
201 /* code stolen from include/asm-mips64 */ 198 /* code stolen from include/asm-mips64 */
@@ -203,7 +200,7 @@ static __inline__ unsigned int csum_and_copy_to_user (const unsigned char *src,
203 200
204 if (copy_to_user(dst, src, len)) { 201 if (copy_to_user(dst, src, len)) {
205 *err_ptr = -EFAULT; 202 *err_ptr = -EFAULT;
206 return -1; 203 return (__force __wsum)-1;
207 } 204 }
208 205
209 return sum; 206 return sum;