aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/h8300/kernel/h8300_ksyms.c2
-rw-r--r--arch/h8300/lib/checksum.c29
-rw-r--r--include/asm-h8300/checksum.h31
3 files changed, 32 insertions, 30 deletions
diff --git a/arch/h8300/kernel/h8300_ksyms.c b/arch/h8300/kernel/h8300_ksyms.c
index 9b4be053de3c..d1b15267ac81 100644
--- a/arch/h8300/kernel/h8300_ksyms.c
+++ b/arch/h8300/kernel/h8300_ksyms.c
@@ -39,7 +39,7 @@ EXPORT_SYMBOL(enable_irq);
39EXPORT_SYMBOL(disable_irq); 39EXPORT_SYMBOL(disable_irq);
40 40
41/* Networking helper routines. */ 41/* Networking helper routines. */
42EXPORT_SYMBOL(csum_partial_copy); 42EXPORT_SYMBOL(csum_partial_copy_nocheck);
43 43
44/* The following are special because they're not called 44/* The following are special because they're not called
45 explicitly (the C compiler generates them). Fortunately, 45 explicitly (the C compiler generates them). Fortunately,
diff --git a/arch/h8300/lib/checksum.c b/arch/h8300/lib/checksum.c
index 5aa688d9242d..bdc5b032acd6 100644
--- a/arch/h8300/lib/checksum.c
+++ b/arch/h8300/lib/checksum.c
@@ -96,9 +96,9 @@ out:
96 * This is a version of ip_compute_csum() optimized for IP headers, 96 * This is a version of ip_compute_csum() optimized for IP headers,
97 * which always checksum on 4 octet boundaries. 97 * which always checksum on 4 octet boundaries.
98 */ 98 */
99unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl) 99__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
100{ 100{
101 return ~do_csum(iph,ihl*4); 101 return (__force __sum16)~do_csum(iph,ihl*4);
102} 102}
103 103
104/* 104/*
@@ -113,15 +113,19 @@ unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl)
113 * 113 *
114 * it's best to have buff aligned on a 32-bit boundary 114 * it's best to have buff aligned on a 32-bit boundary
115 */ 115 */
116unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) 116/*
117 * Egads... That thing apparently assumes that *all* checksums it ever sees will
118 * be folded. Very likely a bug.
119 */
120__wsum csum_partial(const void *buff, int len, __wsum sum)
117{ 121{
118 unsigned int result = do_csum(buff, len); 122 unsigned int result = do_csum(buff, len);
119 123
120 /* add in old sum, and carry.. */ 124 /* add in old sum, and carry.. */
121 result += sum; 125 result += (__force u32)sum;
122 /* 16+c bits -> 16 bits */ 126 /* 16+c bits -> 16 bits */
123 result = (result & 0xffff) + (result >> 16); 127 result = (result & 0xffff) + (result >> 16);
124 return result; 128 return (__force __wsum)result;
125} 129}
126 130
127EXPORT_SYMBOL(csum_partial); 131EXPORT_SYMBOL(csum_partial);
@@ -130,20 +134,21 @@ EXPORT_SYMBOL(csum_partial);
130 * this routine is used for miscellaneous IP-like checksums, mainly 134 * this routine is used for miscellaneous IP-like checksums, mainly
131 * in icmp.c 135 * in icmp.c
132 */ 136 */
133unsigned short ip_compute_csum(const unsigned char * buff, int len) 137__sum16 ip_compute_csum(const void *buff, int len)
134{ 138{
135 return ~do_csum(buff,len); 139 return (__force __sum16)~do_csum(buff,len);
136} 140}
137 141
138/* 142/*
139 * copy from fs while checksumming, otherwise like csum_partial 143 * copy from fs while checksumming, otherwise like csum_partial
140 */ 144 */
141 145
142unsigned int 146__wsum
143csum_partial_copy_from_user(const char *src, char *dst, int len, int sum, int *csum_err) 147csum_partial_copy_from_user(const void __user *src, void *dst, int len,
148 __wsum sum, int *csum_err)
144{ 149{
145 if (csum_err) *csum_err = 0; 150 if (csum_err) *csum_err = 0;
146 memcpy(dst, src, len); 151 memcpy(dst, (__force const void *)src, len);
147 return csum_partial(dst, len, sum); 152 return csum_partial(dst, len, sum);
148} 153}
149 154
@@ -151,8 +156,8 @@ csum_partial_copy_from_user(const char *src, char *dst, int len, int sum, int *c
151 * copy from ds while checksumming, otherwise like csum_partial 156 * copy from ds while checksumming, otherwise like csum_partial
152 */ 157 */
153 158
154unsigned int 159__wsum
155csum_partial_copy(const char *src, char *dst, int len, int sum) 160csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
156{ 161{
157 memcpy(dst, src, len); 162 memcpy(dst, src, len);
158 return csum_partial(dst, len, sum); 163 return csum_partial(dst, len, sum);
diff --git a/include/asm-h8300/checksum.h b/include/asm-h8300/checksum.h
index 3051931dd301..98724e12508c 100644
--- a/include/asm-h8300/checksum.h
+++ b/include/asm-h8300/checksum.h
@@ -13,7 +13,7 @@
13 * 13 *
14 * it's best to have buff aligned on a 32-bit boundary 14 * it's best to have buff aligned on a 32-bit boundary
15 */ 15 */
16unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); 16__wsum csum_partial(const void *buff, int len, __wsum sum);
17 17
18/* 18/*
19 * the same as csum_partial, but copies from src while it 19 * the same as csum_partial, but copies from src while it
@@ -23,7 +23,7 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
23 * better 64-bit) boundary 23 * better 64-bit) boundary
24 */ 24 */
25 25
26unsigned int csum_partial_copy(const char *src, char *dst, int len, int sum); 26__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
27 27
28 28
29/* 29/*
@@ -33,20 +33,17 @@ unsigned int csum_partial_copy(const char *src, char *dst, int len, int sum);
33 * better 64-bit) boundary 33 * better 64-bit) boundary
34 */ 34 */
35 35
36extern unsigned int csum_partial_copy_from_user(const char *src, char *dst, 36extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
37 int len, int sum, int *csum_err); 37 int len, __wsum sum, int *csum_err);
38 38
39#define csum_partial_copy_nocheck(src, dst, len, sum) \ 39__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
40 csum_partial_copy((src), (dst), (len), (sum))
41
42unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl);
43 40
44 41
45/* 42/*
46 * Fold a partial checksum 43 * Fold a partial checksum
47 */ 44 */
48 45
49static inline unsigned int csum_fold(unsigned int sum) 46static inline __sum16 csum_fold(__wsum sum)
50{ 47{
51 __asm__("mov.l %0,er0\n\t" 48 __asm__("mov.l %0,er0\n\t"
52 "add.w e0,r0\n\t" 49 "add.w e0,r0\n\t"
@@ -58,7 +55,7 @@ static inline unsigned int csum_fold(unsigned int sum)
58 : "=r"(sum) 55 : "=r"(sum)
59 : "0"(sum) 56 : "0"(sum)
60 : "er0"); 57 : "er0");
61 return ~sum; 58 return (__force __sum16)~sum;
62} 59}
63 60
64 61
@@ -67,9 +64,9 @@ static inline unsigned int csum_fold(unsigned int sum)
67 * returns a 16-bit checksum, already complemented 64 * returns a 16-bit checksum, already complemented
68 */ 65 */
69 66
70static inline unsigned int 67static inline __wsum
71csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len, 68csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
72 unsigned short proto, unsigned int sum) 69 unsigned short proto, __wsum sum)
73{ 70{
74 __asm__ ("sub.l er0,er0\n\t" 71 __asm__ ("sub.l er0,er0\n\t"
75 "add.l %2,%0\n\t" 72 "add.l %2,%0\n\t"
@@ -88,9 +85,9 @@ csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len,
88 return sum; 85 return sum;
89} 86}
90 87
91static inline unsigned short int 88static inline __sum16
92csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len, 89csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
93 unsigned short proto, unsigned int sum) 90 unsigned short proto, __wsum sum)
94{ 91{
95 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); 92 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
96} 93}
@@ -100,6 +97,6 @@ csum_tcpudp_magic(unsigned long saddr, unsigned long daddr, unsigned short len,
100 * in icmp.c 97 * in icmp.c
101 */ 98 */
102 99
103extern unsigned short ip_compute_csum(const unsigned char * buff, int len); 100extern __sum16 ip_compute_csum(const void *buff, int len);
104 101
105#endif /* _H8300_CHECKSUM_H */ 102#endif /* _H8300_CHECKSUM_H */