diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2006-11-15 00:17:56 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-12-03 00:23:08 -0500 |
commit | 59ed05a7e891d694a43df96ac613f7e8e164eb95 (patch) | |
tree | 90132f4879dcbec3cdd8b04c10fd16b2bce40422 /include/asm-m68knommu | |
parent | 2061acaaae0e165f0104ec9d327a02addbcabd62 (diff) |
[NET]: M68Knommu checksum annotations and cleanups.
* sanitize prototypes, annotated
* collapsed csum_partial_copy()
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-m68knommu')
-rw-r--r-- | include/asm-m68knommu/checksum.h | 46 |
1 files changed, 22 insertions, 24 deletions
diff --git a/include/asm-m68knommu/checksum.h b/include/asm-m68knommu/checksum.h index 294ec7583ac9..81883482ffb1 100644 --- a/include/asm-m68knommu/checksum.h +++ b/include/asm-m68knommu/checksum.h | |||
@@ -15,7 +15,7 @@ | |||
15 | * | 15 | * |
16 | * it's best to have buff aligned on a 32-bit boundary | 16 | * it's best to have buff aligned on a 32-bit boundary |
17 | */ | 17 | */ |
18 | unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); | 18 | __wsum csum_partial(const void *buff, int len, __wsum sum); |
19 | 19 | ||
20 | /* | 20 | /* |
21 | * the same as csum_partial, but copies from src while it | 21 | * the same as csum_partial, but copies from src while it |
@@ -25,8 +25,8 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) | |||
25 | * better 64-bit) boundary | 25 | * better 64-bit) boundary |
26 | */ | 26 | */ |
27 | 27 | ||
28 | unsigned int csum_partial_copy(const unsigned char *src, unsigned char *dst, | 28 | __wsum csum_partial_copy_nocheck(const void *src, void *dst, |
29 | int len, int sum); | 29 | int len, __wsum sum); |
30 | 30 | ||
31 | 31 | ||
32 | /* | 32 | /* |
@@ -36,33 +36,31 @@ unsigned int csum_partial_copy(const unsigned char *src, unsigned char *dst, | |||
36 | * better 64-bit) boundary | 36 | * better 64-bit) boundary |
37 | */ | 37 | */ |
38 | 38 | ||
39 | extern unsigned int csum_partial_copy_from_user(const unsigned char *src, | 39 | extern __wsum csum_partial_copy_from_user(const void __user *src, |
40 | unsigned char *dst, int len, int sum, int *csum_err); | 40 | void *dst, int len, __wsum sum, int *csum_err); |
41 | 41 | ||
42 | #define csum_partial_copy_nocheck(src, dst, len, sum) \ | 42 | __sum16 ip_fast_csum(const void *iph, unsigned int ihl); |
43 | csum_partial_copy((src), (dst), (len), (sum)) | ||
44 | |||
45 | unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl); | ||
46 | 43 | ||
47 | /* | 44 | /* |
48 | * Fold a partial checksum | 45 | * Fold a partial checksum |
49 | */ | 46 | */ |
50 | 47 | ||
51 | static inline unsigned int csum_fold(unsigned int sum) | 48 | static inline __sum16 csum_fold(__wsum sum) |
52 | { | 49 | { |
50 | unsigned int tmp = (__force u32)sum; | ||
53 | #ifdef CONFIG_COLDFIRE | 51 | #ifdef CONFIG_COLDFIRE |
54 | sum = (sum & 0xffff) + (sum >> 16); | 52 | tmp = (tmp & 0xffff) + (tmp >> 16); |
55 | sum = (sum & 0xffff) + (sum >> 16); | 53 | tmp = (tmp & 0xffff) + (tmp >> 16); |
54 | return (__force __sum16)~tmp; | ||
56 | #else | 55 | #else |
57 | unsigned int tmp = sum; | ||
58 | __asm__("swap %1\n\t" | 56 | __asm__("swap %1\n\t" |
59 | "addw %1, %0\n\t" | 57 | "addw %1, %0\n\t" |
60 | "clrw %1\n\t" | 58 | "clrw %1\n\t" |
61 | "addxw %1, %0" | 59 | "addxw %1, %0" |
62 | : "=&d" (sum), "=&d" (tmp) | 60 | : "=&d" (sum), "=&d" (tmp) |
63 | : "0" (sum), "1" (sum)); | 61 | : "0" (sum), "1" (sum)); |
62 | return (__force __sum16)~sum; | ||
64 | #endif | 63 | #endif |
65 | return ~sum; | ||
66 | } | 64 | } |
67 | 65 | ||
68 | 66 | ||
@@ -71,9 +69,9 @@ static inline unsigned int csum_fold(unsigned int sum) | |||
71 | * returns a 16-bit checksum, already complemented | 69 | * returns a 16-bit checksum, already complemented |
72 | */ | 70 | */ |
73 | 71 | ||
74 | static inline unsigned int | 72 | static inline __wsum |
75 | csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len, | 73 | csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, |
76 | unsigned short proto, unsigned int sum) | 74 | unsigned short proto, __wsum sum) |
77 | { | 75 | { |
78 | __asm__ ("addl %1,%0\n\t" | 76 | __asm__ ("addl %1,%0\n\t" |
79 | "addxl %4,%0\n\t" | 77 | "addxl %4,%0\n\t" |
@@ -86,9 +84,9 @@ csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len, | |||
86 | return sum; | 84 | return sum; |
87 | } | 85 | } |
88 | 86 | ||
89 | static inline unsigned short int | 87 | static inline __sum16 |
90 | csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len, | 88 | csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, |
91 | unsigned short proto, unsigned int sum) | 89 | unsigned short proto, __wsum sum) |
92 | { | 90 | { |
93 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); | 91 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); |
94 | } | 92 | } |
@@ -98,12 +96,12 @@ csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len, | |||
98 | * in icmp.c | 96 | * in icmp.c |
99 | */ | 97 | */ |
100 | 98 | ||
101 | extern unsigned short ip_compute_csum(const unsigned char * buff, int len); | 99 | extern __sum16 ip_compute_csum(const void *buff, int len); |
102 | 100 | ||
103 | #define _HAVE_ARCH_IPV6_CSUM | 101 | #define _HAVE_ARCH_IPV6_CSUM |
104 | static __inline__ unsigned short int | 102 | static __inline__ __sum16 |
105 | csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, | 103 | csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, |
106 | __u32 len, unsigned short proto, unsigned int sum) | 104 | __u32 len, unsigned short proto, __wsum sum) |
107 | { | 105 | { |
108 | register unsigned long tmp; | 106 | register unsigned long tmp; |
109 | __asm__("addl %2@,%0\n\t" | 107 | __asm__("addl %2@,%0\n\t" |