diff options
-rw-r--r-- | arch/m68knommu/kernel/m68k_ksyms.c | 2 | ||||
-rw-r--r-- | arch/m68knommu/lib/checksum.c | 28 | ||||
-rw-r--r-- | include/asm-m68knommu/checksum.h | 46 |
3 files changed, 37 insertions, 39 deletions
diff --git a/arch/m68knommu/kernel/m68k_ksyms.c b/arch/m68knommu/kernel/m68k_ksyms.c index 1e62150f3588..25327c9eadd7 100644 --- a/arch/m68knommu/kernel/m68k_ksyms.c +++ b/arch/m68knommu/kernel/m68k_ksyms.c | |||
@@ -38,7 +38,7 @@ EXPORT_SYMBOL(ip_fast_csum); | |||
38 | EXPORT_SYMBOL(kernel_thread); | 38 | EXPORT_SYMBOL(kernel_thread); |
39 | 39 | ||
40 | /* Networking helper routines. */ | 40 | /* Networking helper routines. */ |
41 | EXPORT_SYMBOL(csum_partial_copy); | 41 | EXPORT_SYMBOL(csum_partial_copy_nocheck); |
42 | 42 | ||
43 | /* The following are special because they're not called | 43 | /* The following are special because they're not called |
44 | explicitly (the C compiler generates them). Fortunately, | 44 | explicitly (the C compiler generates them). Fortunately, |
diff --git a/arch/m68knommu/lib/checksum.c b/arch/m68knommu/lib/checksum.c index 7bec6fdee34b..269d83bfbbe1 100644 --- a/arch/m68knommu/lib/checksum.c +++ b/arch/m68knommu/lib/checksum.c | |||
@@ -96,9 +96,9 @@ out: | |||
96 | * This is a version of ip_compute_csum() optimized for IP headers, | 96 | * This is a version of ip_compute_csum() optimized for IP headers, |
97 | * which always checksum on 4 octet boundaries. | 97 | * which always checksum on 4 octet boundaries. |
98 | */ | 98 | */ |
99 | unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl) | 99 | __sum16 ip_fast_csum(const void *iph, unsigned int ihl) |
100 | { | 100 | { |
101 | return ~do_csum(iph,ihl*4); | 101 | return (__force __sum16)~do_csum(iph,ihl*4); |
102 | } | 102 | } |
103 | 103 | ||
104 | /* | 104 | /* |
@@ -113,15 +113,15 @@ unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl) | |||
113 | * | 113 | * |
114 | * it's best to have buff aligned on a 32-bit boundary | 114 | * it's best to have buff aligned on a 32-bit boundary |
115 | */ | 115 | */ |
116 | unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) | 116 | __wsum csum_partial(const void *buff, int len, __wsum sum) |
117 | { | 117 | { |
118 | unsigned int result = do_csum(buff, len); | 118 | unsigned int result = do_csum(buff, len); |
119 | 119 | ||
120 | /* add in old sum, and carry.. */ | 120 | /* add in old sum, and carry.. */ |
121 | result += sum; | 121 | result += (__force u32)sum; |
122 | if (sum > result) | 122 | if ((__force u32)sum > result) |
123 | result += 1; | 123 | result += 1; |
124 | return result; | 124 | return (__force __wsum)result; |
125 | } | 125 | } |
126 | 126 | ||
127 | EXPORT_SYMBOL(csum_partial); | 127 | EXPORT_SYMBOL(csum_partial); |
@@ -130,21 +130,21 @@ EXPORT_SYMBOL(csum_partial); | |||
130 | * this routine is used for miscellaneous IP-like checksums, mainly | 130 | * this routine is used for miscellaneous IP-like checksums, mainly |
131 | * in icmp.c | 131 | * in icmp.c |
132 | */ | 132 | */ |
133 | unsigned short ip_compute_csum(const unsigned char * buff, int len) | 133 | __sum16 ip_compute_csum(const void *buff, int len) |
134 | { | 134 | { |
135 | return ~do_csum(buff,len); | 135 | return (__force __sum16)~do_csum(buff,len); |
136 | } | 136 | } |
137 | 137 | ||
138 | /* | 138 | /* |
139 | * copy from fs while checksumming, otherwise like csum_partial | 139 | * copy from fs while checksumming, otherwise like csum_partial |
140 | */ | 140 | */ |
141 | 141 | ||
142 | unsigned int | 142 | __wsum |
143 | csum_partial_copy_from_user(const unsigned char *src, unsigned char *dst, | 143 | csum_partial_copy_from_user(const void __user *src, void *dst, |
144 | int len, int sum, int *csum_err) | 144 | int len, __wsum sum, int *csum_err) |
145 | { | 145 | { |
146 | if (csum_err) *csum_err = 0; | 146 | if (csum_err) *csum_err = 0; |
147 | memcpy(dst, src, len); | 147 | memcpy(dst, (__force const void *)src, len); |
148 | return csum_partial(dst, len, sum); | 148 | return csum_partial(dst, len, sum); |
149 | } | 149 | } |
150 | 150 | ||
@@ -152,8 +152,8 @@ csum_partial_copy_from_user(const unsigned char *src, unsigned char *dst, | |||
152 | * copy from ds while checksumming, otherwise like csum_partial | 152 | * copy from ds while checksumming, otherwise like csum_partial |
153 | */ | 153 | */ |
154 | 154 | ||
155 | unsigned int | 155 | __wsum |
156 | csum_partial_copy(const unsigned char *src, unsigned char *dst, int len, int sum) | 156 | csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) |
157 | { | 157 | { |
158 | memcpy(dst, src, len); | 158 | memcpy(dst, src, len); |
159 | return csum_partial(dst, len, sum); | 159 | return csum_partial(dst, len, sum); |
diff --git a/include/asm-m68knommu/checksum.h b/include/asm-m68knommu/checksum.h index 294ec7583ac9..81883482ffb1 100644 --- a/include/asm-m68knommu/checksum.h +++ b/include/asm-m68knommu/checksum.h | |||
@@ -15,7 +15,7 @@ | |||
15 | * | 15 | * |
16 | * it's best to have buff aligned on a 32-bit boundary | 16 | * it's best to have buff aligned on a 32-bit boundary |
17 | */ | 17 | */ |
18 | unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); | 18 | __wsum csum_partial(const void *buff, int len, __wsum sum); |
19 | 19 | ||
20 | /* | 20 | /* |
21 | * the same as csum_partial, but copies from src while it | 21 | * the same as csum_partial, but copies from src while it |
@@ -25,8 +25,8 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) | |||
25 | * better 64-bit) boundary | 25 | * better 64-bit) boundary |
26 | */ | 26 | */ |
27 | 27 | ||
28 | unsigned int csum_partial_copy(const unsigned char *src, unsigned char *dst, | 28 | __wsum csum_partial_copy_nocheck(const void *src, void *dst, |
29 | int len, int sum); | 29 | int len, __wsum sum); |
30 | 30 | ||
31 | 31 | ||
32 | /* | 32 | /* |
@@ -36,33 +36,31 @@ unsigned int csum_partial_copy(const unsigned char *src, unsigned char *dst, | |||
36 | * better 64-bit) boundary | 36 | * better 64-bit) boundary |
37 | */ | 37 | */ |
38 | 38 | ||
39 | extern unsigned int csum_partial_copy_from_user(const unsigned char *src, | 39 | extern __wsum csum_partial_copy_from_user(const void __user *src, |
40 | unsigned char *dst, int len, int sum, int *csum_err); | 40 | void *dst, int len, __wsum sum, int *csum_err); |
41 | 41 | ||
42 | #define csum_partial_copy_nocheck(src, dst, len, sum) \ | 42 | __sum16 ip_fast_csum(const void *iph, unsigned int ihl); |
43 | csum_partial_copy((src), (dst), (len), (sum)) | ||
44 | |||
45 | unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl); | ||
46 | 43 | ||
47 | /* | 44 | /* |
48 | * Fold a partial checksum | 45 | * Fold a partial checksum |
49 | */ | 46 | */ |
50 | 47 | ||
51 | static inline unsigned int csum_fold(unsigned int sum) | 48 | static inline __sum16 csum_fold(__wsum sum) |
52 | { | 49 | { |
50 | unsigned int tmp = (__force u32)sum; | ||
53 | #ifdef CONFIG_COLDFIRE | 51 | #ifdef CONFIG_COLDFIRE |
54 | sum = (sum & 0xffff) + (sum >> 16); | 52 | tmp = (tmp & 0xffff) + (tmp >> 16); |
55 | sum = (sum & 0xffff) + (sum >> 16); | 53 | tmp = (tmp & 0xffff) + (tmp >> 16); |
54 | return (__force __sum16)~tmp; | ||
56 | #else | 55 | #else |
57 | unsigned int tmp = sum; | ||
58 | __asm__("swap %1\n\t" | 56 | __asm__("swap %1\n\t" |
59 | "addw %1, %0\n\t" | 57 | "addw %1, %0\n\t" |
60 | "clrw %1\n\t" | 58 | "clrw %1\n\t" |
61 | "addxw %1, %0" | 59 | "addxw %1, %0" |
62 | : "=&d" (sum), "=&d" (tmp) | 60 | : "=&d" (sum), "=&d" (tmp) |
63 | : "0" (sum), "1" (sum)); | 61 | : "0" (sum), "1" (sum)); |
62 | return (__force __sum16)~sum; | ||
64 | #endif | 63 | #endif |
65 | return ~sum; | ||
66 | } | 64 | } |
67 | 65 | ||
68 | 66 | ||
@@ -71,9 +69,9 @@ static inline unsigned int csum_fold(unsigned int sum) | |||
71 | * returns a 16-bit checksum, already complemented | 69 | * returns a 16-bit checksum, already complemented |
72 | */ | 70 | */ |
73 | 71 | ||
74 | static inline unsigned int | 72 | static inline __wsum |
75 | csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len, | 73 | csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, |
76 | unsigned short proto, unsigned int sum) | 74 | unsigned short proto, __wsum sum) |
77 | { | 75 | { |
78 | __asm__ ("addl %1,%0\n\t" | 76 | __asm__ ("addl %1,%0\n\t" |
79 | "addxl %4,%0\n\t" | 77 | "addxl %4,%0\n\t" |
@@ -86,9 +84,9 @@ csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len, | |||
86 | return sum; | 84 | return sum; |
87 | } | 85 | } |
88 | 86 | ||
89 | static inline unsigned short int | 87 | static inline __sum16 |
90 | csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len, | 88 | csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, |
91 | unsigned short proto, unsigned int sum) | 89 | unsigned short proto, __wsum sum) |
92 | { | 90 | { |
93 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); | 91 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); |
94 | } | 92 | } |
@@ -98,12 +96,12 @@ csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len, | |||
98 | * in icmp.c | 96 | * in icmp.c |
99 | */ | 97 | */ |
100 | 98 | ||
101 | extern unsigned short ip_compute_csum(const unsigned char * buff, int len); | 99 | extern __sum16 ip_compute_csum(const void *buff, int len); |
102 | 100 | ||
103 | #define _HAVE_ARCH_IPV6_CSUM | 101 | #define _HAVE_ARCH_IPV6_CSUM |
104 | static __inline__ unsigned short int | 102 | static __inline__ __sum16 |
105 | csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, | 103 | csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr, |
106 | __u32 len, unsigned short proto, unsigned int sum) | 104 | __u32 len, unsigned short proto, __wsum sum) |
107 | { | 105 | { |
108 | register unsigned long tmp; | 106 | register unsigned long tmp; |
109 | __asm__("addl %2@,%0\n\t" | 107 | __asm__("addl %2@,%0\n\t" |