aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Ungerer <gerg@uclinux.org>2009-07-02 02:11:58 -0400
committerGreg Ungerer <gerg@uclinux.org>2009-09-15 19:43:37 -0400
commit6192c1ea0ac5806592c5c9cc2b2b94b0298df02b (patch)
tree456489cbe5ffa011c294f84558aa1bf5d512a829
parent3ddc7e261a990d03fa4e78e37e6623ca45b18d77 (diff)
m68k: merge the mmu and non-mmu versions of checksum.h
The mmu and non-mmu versions of checksum.h are mostly the same, merge them. Signed-off-by: Greg Ungerer <gerg@uclinux.org>
-rw-r--r--arch/m68k/include/asm/checksum.h173
-rw-r--r--arch/m68k/include/asm/checksum_mm.h148
-rw-r--r--arch/m68k/include/asm/checksum_no.h132
-rw-r--r--arch/m68knommu/lib/checksum.c11
4 files changed, 171 insertions, 293 deletions
diff --git a/arch/m68k/include/asm/checksum.h b/arch/m68k/include/asm/checksum.h
index 1cf544767453..ec514485c8b6 100644
--- a/arch/m68k/include/asm/checksum.h
+++ b/arch/m68k/include/asm/checksum.h
@@ -1,5 +1,170 @@
1#ifdef __uClinux__ 1#ifndef _M68K_CHECKSUM_H
2#include "checksum_no.h" 2#define _M68K_CHECKSUM_H
3
4#include <linux/in6.h>
5
6/*
7 * computes the checksum of a memory block at buff, length len,
8 * and adds in "sum" (32-bit)
9 *
10 * returns a 32-bit number suitable for feeding into itself
11 * or csum_tcpudp_magic
12 *
13 * this function must be called with even lengths, except
14 * for the last fragment, which may be odd
15 *
16 * it's best to have buff aligned on a 32-bit boundary
17 */
18__wsum csum_partial(const void *buff, int len, __wsum sum);
19
20/*
21 * the same as csum_partial, but copies from src while it
22 * checksums
23 *
24 * here even more important to align src and dst on a 32-bit (or even
25 * better 64-bit) boundary
26 */
27
28extern __wsum csum_partial_copy_from_user(const void __user *src,
29 void *dst,
30 int len, __wsum sum,
31 int *csum_err);
32
33extern __wsum csum_partial_copy_nocheck(const void *src,
34 void *dst, int len,
35 __wsum sum);
36
37
38#ifdef CONFIG_COLDFIRE
39
40/*
41 * The ColdFire cores don't support all the 68k instructions used
42 * in the optimized checksum code below. So it reverts back to using
43 * more standard C coded checksums. The fast checksum code is
44 * significantly larger than the optimized version, so it is not
45 * inlined here.
46 */
47__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
48
49static inline __sum16 csum_fold(__wsum sum)
50{
51 unsigned int tmp = (__force u32)sum;
52
53 tmp = (tmp & 0xffff) + (tmp >> 16);
54 tmp = (tmp & 0xffff) + (tmp >> 16);
55
56 return (__force __sum16)~tmp;
57}
58
3#else 59#else
4#include "checksum_mm.h" 60
5#endif 61/*
62 * This is a version of ip_fast_csum() optimized for IP headers,
63 * which always checksum on 4 octet boundaries.
64 */
65static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
66{
67 unsigned int sum = 0;
68 unsigned long tmp;
69
70 __asm__ ("subqw #1,%2\n"
71 "1:\t"
72 "movel %1@+,%3\n\t"
73 "addxl %3,%0\n\t"
74 "dbra %2,1b\n\t"
75 "movel %0,%3\n\t"
76 "swap %3\n\t"
77 "addxw %3,%0\n\t"
78 "clrw %3\n\t"
79 "addxw %3,%0\n\t"
80 : "=d" (sum), "=&a" (iph), "=&d" (ihl), "=&d" (tmp)
81 : "0" (sum), "1" (iph), "2" (ihl)
82 : "memory");
83 return (__force __sum16)~sum;
84}
85
86static inline __sum16 csum_fold(__wsum sum)
87{
88 unsigned int tmp = (__force u32)sum;
89
90 __asm__("swap %1\n\t"
91 "addw %1, %0\n\t"
92 "clrw %1\n\t"
93 "addxw %1, %0"
94 : "=&d" (sum), "=&d" (tmp)
95 : "0" (sum), "1" (tmp));
96
97 return (__force __sum16)~sum;
98}
99
100#endif /* CONFIG_COLDFIRE */
101
102static inline __wsum
103csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
104 unsigned short proto, __wsum sum)
105{
106 __asm__ ("addl %2,%0\n\t"
107 "addxl %3,%0\n\t"
108 "addxl %4,%0\n\t"
109 "clrl %1\n\t"
110 "addxl %1,%0"
111 : "=&d" (sum), "=d" (saddr)
112 : "g" (daddr), "1" (saddr), "d" (len + proto),
113 "0" (sum));
114 return sum;
115}
116
117
118/*
119 * computes the checksum of the TCP/UDP pseudo-header
120 * returns a 16-bit checksum, already complemented
121 */
122static inline __sum16
123csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
124 unsigned short proto, __wsum sum)
125{
126 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
127}
128
129/*
130 * this routine is used for miscellaneous IP-like checksums, mainly
131 * in icmp.c
132 */
133
134static inline __sum16 ip_compute_csum(const void *buff, int len)
135{
136 return csum_fold (csum_partial(buff, len, 0));
137}
138
139#define _HAVE_ARCH_IPV6_CSUM
140static __inline__ __sum16
141csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
142 __u32 len, unsigned short proto, __wsum sum)
143{
144 register unsigned long tmp;
145 __asm__("addl %2@,%0\n\t"
146 "movel %2@(4),%1\n\t"
147 "addxl %1,%0\n\t"
148 "movel %2@(8),%1\n\t"
149 "addxl %1,%0\n\t"
150 "movel %2@(12),%1\n\t"
151 "addxl %1,%0\n\t"
152 "movel %3@,%1\n\t"
153 "addxl %1,%0\n\t"
154 "movel %3@(4),%1\n\t"
155 "addxl %1,%0\n\t"
156 "movel %3@(8),%1\n\t"
157 "addxl %1,%0\n\t"
158 "movel %3@(12),%1\n\t"
159 "addxl %1,%0\n\t"
160 "addxl %4,%0\n\t"
161 "clrl %1\n\t"
162 "addxl %1,%0"
163 : "=&d" (sum), "=&d" (tmp)
164 : "a" (saddr), "a" (daddr), "d" (len + proto),
165 "0" (sum));
166
167 return csum_fold(sum);
168}
169
170#endif /* _M68K_CHECKSUM_H */
diff --git a/arch/m68k/include/asm/checksum_mm.h b/arch/m68k/include/asm/checksum_mm.h
deleted file mode 100644
index 494f9aec37ea..000000000000
--- a/arch/m68k/include/asm/checksum_mm.h
+++ /dev/null
@@ -1,148 +0,0 @@
1#ifndef _M68K_CHECKSUM_H
2#define _M68K_CHECKSUM_H
3
4#include <linux/in6.h>
5
6/*
7 * computes the checksum of a memory block at buff, length len,
8 * and adds in "sum" (32-bit)
9 *
10 * returns a 32-bit number suitable for feeding into itself
11 * or csum_tcpudp_magic
12 *
13 * this function must be called with even lengths, except
14 * for the last fragment, which may be odd
15 *
16 * it's best to have buff aligned on a 32-bit boundary
17 */
18__wsum csum_partial(const void *buff, int len, __wsum sum);
19
20/*
21 * the same as csum_partial, but copies from src while it
22 * checksums
23 *
24 * here even more important to align src and dst on a 32-bit (or even
25 * better 64-bit) boundary
26 */
27
28extern __wsum csum_partial_copy_from_user(const void __user *src,
29 void *dst,
30 int len, __wsum sum,
31 int *csum_err);
32
33extern __wsum csum_partial_copy_nocheck(const void *src,
34 void *dst, int len,
35 __wsum sum);
36
37/*
38 * This is a version of ip_compute_csum() optimized for IP headers,
39 * which always checksum on 4 octet boundaries.
40 *
41 */
42static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
43{
44 unsigned int sum = 0;
45 unsigned long tmp;
46
47 __asm__ ("subqw #1,%2\n"
48 "1:\t"
49 "movel %1@+,%3\n\t"
50 "addxl %3,%0\n\t"
51 "dbra %2,1b\n\t"
52 "movel %0,%3\n\t"
53 "swap %3\n\t"
54 "addxw %3,%0\n\t"
55 "clrw %3\n\t"
56 "addxw %3,%0\n\t"
57 : "=d" (sum), "=&a" (iph), "=&d" (ihl), "=&d" (tmp)
58 : "0" (sum), "1" (iph), "2" (ihl)
59 : "memory");
60 return (__force __sum16)~sum;
61}
62
63/*
64 * Fold a partial checksum
65 */
66
67static inline __sum16 csum_fold(__wsum sum)
68{
69 unsigned int tmp = (__force u32)sum;
70 __asm__("swap %1\n\t"
71 "addw %1, %0\n\t"
72 "clrw %1\n\t"
73 "addxw %1, %0"
74 : "=&d" (sum), "=&d" (tmp)
75 : "0" (sum), "1" (tmp));
76 return (__force __sum16)~sum;
77}
78
79
80static inline __wsum
81csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
82 unsigned short proto, __wsum sum)
83{
84 __asm__ ("addl %2,%0\n\t"
85 "addxl %3,%0\n\t"
86 "addxl %4,%0\n\t"
87 "clrl %1\n\t"
88 "addxl %1,%0"
89 : "=&d" (sum), "=d" (saddr)
90 : "g" (daddr), "1" (saddr), "d" (len + proto),
91 "0" (sum));
92 return sum;
93}
94
95
96/*
97 * computes the checksum of the TCP/UDP pseudo-header
98 * returns a 16-bit checksum, already complemented
99 */
100static inline __sum16
101csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
102 unsigned short proto, __wsum sum)
103{
104 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
105}
106
107/*
108 * this routine is used for miscellaneous IP-like checksums, mainly
109 * in icmp.c
110 */
111
112static inline __sum16 ip_compute_csum(const void *buff, int len)
113{
114 return csum_fold (csum_partial(buff, len, 0));
115}
116
117#define _HAVE_ARCH_IPV6_CSUM
118static __inline__ __sum16
119csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
120 __u32 len, unsigned short proto, __wsum sum)
121{
122 register unsigned long tmp;
123 __asm__("addl %2@,%0\n\t"
124 "movel %2@(4),%1\n\t"
125 "addxl %1,%0\n\t"
126 "movel %2@(8),%1\n\t"
127 "addxl %1,%0\n\t"
128 "movel %2@(12),%1\n\t"
129 "addxl %1,%0\n\t"
130 "movel %3@,%1\n\t"
131 "addxl %1,%0\n\t"
132 "movel %3@(4),%1\n\t"
133 "addxl %1,%0\n\t"
134 "movel %3@(8),%1\n\t"
135 "addxl %1,%0\n\t"
136 "movel %3@(12),%1\n\t"
137 "addxl %1,%0\n\t"
138 "addxl %4,%0\n\t"
139 "clrl %1\n\t"
140 "addxl %1,%0"
141 : "=&d" (sum), "=&d" (tmp)
142 : "a" (saddr), "a" (daddr), "d" (len + proto),
143 "0" (sum));
144
145 return csum_fold(sum);
146}
147
148#endif /* _M68K_CHECKSUM_H */
diff --git a/arch/m68k/include/asm/checksum_no.h b/arch/m68k/include/asm/checksum_no.h
deleted file mode 100644
index 81883482ffb1..000000000000
--- a/arch/m68k/include/asm/checksum_no.h
+++ /dev/null
@@ -1,132 +0,0 @@
1#ifndef _M68K_CHECKSUM_H
2#define _M68K_CHECKSUM_H
3
4#include <linux/in6.h>
5
6/*
7 * computes the checksum of a memory block at buff, length len,
8 * and adds in "sum" (32-bit)
9 *
10 * returns a 32-bit number suitable for feeding into itself
11 * or csum_tcpudp_magic
12 *
13 * this function must be called with even lengths, except
14 * for the last fragment, which may be odd
15 *
16 * it's best to have buff aligned on a 32-bit boundary
17 */
18__wsum csum_partial(const void *buff, int len, __wsum sum);
19
20/*
21 * the same as csum_partial, but copies from src while it
22 * checksums
23 *
24 * here even more important to align src and dst on a 32-bit (or even
25 * better 64-bit) boundary
26 */
27
28__wsum csum_partial_copy_nocheck(const void *src, void *dst,
29 int len, __wsum sum);
30
31
32/*
33 * the same as csum_partial_copy, but copies from user space.
34 *
35 * here even more important to align src and dst on a 32-bit (or even
36 * better 64-bit) boundary
37 */
38
39extern __wsum csum_partial_copy_from_user(const void __user *src,
40 void *dst, int len, __wsum sum, int *csum_err);
41
42__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
43
44/*
45 * Fold a partial checksum
46 */
47
48static inline __sum16 csum_fold(__wsum sum)
49{
50 unsigned int tmp = (__force u32)sum;
51#ifdef CONFIG_COLDFIRE
52 tmp = (tmp & 0xffff) + (tmp >> 16);
53 tmp = (tmp & 0xffff) + (tmp >> 16);
54 return (__force __sum16)~tmp;
55#else
56 __asm__("swap %1\n\t"
57 "addw %1, %0\n\t"
58 "clrw %1\n\t"
59 "addxw %1, %0"
60 : "=&d" (sum), "=&d" (tmp)
61 : "0" (sum), "1" (sum));
62 return (__force __sum16)~sum;
63#endif
64}
65
66
67/*
68 * computes the checksum of the TCP/UDP pseudo-header
69 * returns a 16-bit checksum, already complemented
70 */
71
72static inline __wsum
73csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
74 unsigned short proto, __wsum sum)
75{
76 __asm__ ("addl %1,%0\n\t"
77 "addxl %4,%0\n\t"
78 "addxl %5,%0\n\t"
79 "clrl %1\n\t"
80 "addxl %1,%0"
81 : "=&d" (sum), "=&d" (saddr)
82 : "0" (daddr), "1" (saddr), "d" (len + proto),
83 "d"(sum));
84 return sum;
85}
86
87static inline __sum16
88csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
89 unsigned short proto, __wsum sum)
90{
91 return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
92}
93
94/*
95 * this routine is used for miscellaneous IP-like checksums, mainly
96 * in icmp.c
97 */
98
99extern __sum16 ip_compute_csum(const void *buff, int len);
100
101#define _HAVE_ARCH_IPV6_CSUM
102static __inline__ __sum16
103csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
104 __u32 len, unsigned short proto, __wsum sum)
105{
106 register unsigned long tmp;
107 __asm__("addl %2@,%0\n\t"
108 "movel %2@(4),%1\n\t"
109 "addxl %1,%0\n\t"
110 "movel %2@(8),%1\n\t"
111 "addxl %1,%0\n\t"
112 "movel %2@(12),%1\n\t"
113 "addxl %1,%0\n\t"
114 "movel %3@,%1\n\t"
115 "addxl %1,%0\n\t"
116 "movel %3@(4),%1\n\t"
117 "addxl %1,%0\n\t"
118 "movel %3@(8),%1\n\t"
119 "addxl %1,%0\n\t"
120 "movel %3@(12),%1\n\t"
121 "addxl %1,%0\n\t"
122 "addxl %4,%0\n\t"
123 "clrl %1\n\t"
124 "addxl %1,%0"
125 : "=&d" (sum), "=&d" (tmp)
126 : "a" (saddr), "a" (daddr), "d" (len + proto),
127 "0" (sum));
128
129 return csum_fold(sum);
130}
131
132#endif /* _M68K_CHECKSUM_H */
diff --git a/arch/m68knommu/lib/checksum.c b/arch/m68knommu/lib/checksum.c
index 269d83bfbbe1..eccf25d3d73e 100644
--- a/arch/m68knommu/lib/checksum.c
+++ b/arch/m68knommu/lib/checksum.c
@@ -92,6 +92,7 @@ out:
92 return result; 92 return result;
93} 93}
94 94
95#ifdef CONFIG_COLDFIRE
95/* 96/*
96 * This is a version of ip_compute_csum() optimized for IP headers, 97 * This is a version of ip_compute_csum() optimized for IP headers,
97 * which always checksum on 4 octet boundaries. 98 * which always checksum on 4 octet boundaries.
@@ -100,6 +101,7 @@ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
100{ 101{
101 return (__force __sum16)~do_csum(iph,ihl*4); 102 return (__force __sum16)~do_csum(iph,ihl*4);
102} 103}
104#endif
103 105
104/* 106/*
105 * computes the checksum of a memory block at buff, length len, 107 * computes the checksum of a memory block at buff, length len,
@@ -127,15 +129,6 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
127EXPORT_SYMBOL(csum_partial); 129EXPORT_SYMBOL(csum_partial);
128 130
129/* 131/*
130 * this routine is used for miscellaneous IP-like checksums, mainly
131 * in icmp.c
132 */
133__sum16 ip_compute_csum(const void *buff, int len)
134{
135 return (__force __sum16)~do_csum(buff,len);
136}
137
138/*
139 * copy from fs while checksumming, otherwise like csum_partial 132 * copy from fs while checksumming, otherwise like csum_partial
140 */ 133 */
141 134