diff options
-rw-r--r-- | include/asm-avr32/checksum.h | 40 |
1 files changed, 18 insertions, 22 deletions
diff --git a/include/asm-avr32/checksum.h b/include/asm-avr32/checksum.h index 41b7af09edc4..af9d53f0f5d2 100644 --- a/include/asm-avr32/checksum.h +++ b/include/asm-avr32/checksum.h | |||
@@ -20,8 +20,7 @@ | |||
20 | * | 20 | * |
21 | * it's best to have buff aligned on a 32-bit boundary | 21 | * it's best to have buff aligned on a 32-bit boundary |
22 | */ | 22 | */ |
23 | unsigned int csum_partial(const unsigned char * buff, int len, | 23 | __wsum csum_partial(const void *buff, int len, __wsum sum); |
24 | unsigned int sum); | ||
25 | 24 | ||
26 | /* | 25 | /* |
27 | * the same as csum_partial, but copies from src while it | 26 | * the same as csum_partial, but copies from src while it |
@@ -30,8 +29,8 @@ unsigned int csum_partial(const unsigned char * buff, int len, | |||
30 | * here even more important to align src and dst on a 32-bit (or even | 29 | * here even more important to align src and dst on a 32-bit (or even |
31 | * better 64-bit) boundary | 30 | * better 64-bit) boundary |
32 | */ | 31 | */ |
33 | unsigned int csum_partial_copy_generic(const char *src, char *dst, int len, | 32 | __wsum csum_partial_copy_generic(const void *src, void *dst, int len, |
34 | int sum, int *src_err_ptr, | 33 | __wsum sum, int *src_err_ptr, |
35 | int *dst_err_ptr); | 34 | int *dst_err_ptr); |
36 | 35 | ||
37 | /* | 36 | /* |
@@ -42,17 +41,17 @@ unsigned int csum_partial_copy_generic(const char *src, char *dst, int len, | |||
42 | * verify_area(). | 41 | * verify_area(). |
43 | */ | 42 | */ |
44 | static inline | 43 | static inline |
45 | unsigned int csum_partial_copy_nocheck(const char *src, char *dst, | 44 | __wsum csum_partial_copy_nocheck(const void *src, void *dst, |
46 | int len, int sum) | 45 | int len, __wsum sum) |
47 | { | 46 | { |
48 | return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); | 47 | return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); |
49 | } | 48 | } |
50 | 49 | ||
51 | static inline | 50 | static inline |
52 | unsigned int csum_partial_copy_from_user (const char __user *src, char *dst, | 51 | __wsum csum_partial_copy_from_user(const void __user *src, void *dst, |
53 | int len, int sum, int *err_ptr) | 52 | int len, __wsum sum, int *err_ptr) |
54 | { | 53 | { |
55 | return csum_partial_copy_generic((const char __force *)src, dst, len, | 54 | return csum_partial_copy_generic((const void __force *)src, dst, len, |
56 | sum, err_ptr, NULL); | 55 | sum, err_ptr, NULL); |
57 | } | 56 | } |
58 | 57 | ||
@@ -60,8 +59,7 @@ unsigned int csum_partial_copy_from_user (const char __user *src, char *dst, | |||
60 | * This is a version of ip_compute_csum() optimized for IP headers, | 59 | * This is a version of ip_compute_csum() optimized for IP headers, |
61 | * which always checksum on 4 octet boundaries. | 60 | * which always checksum on 4 octet boundaries. |
62 | */ | 61 | */ |
63 | static inline unsigned short ip_fast_csum(unsigned char *iph, | 62 | static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) |
64 | unsigned int ihl) | ||
65 | { | 63 | { |
66 | unsigned int sum, tmp; | 64 | unsigned int sum, tmp; |
67 | 65 | ||
@@ -90,14 +88,14 @@ static inline unsigned short ip_fast_csum(unsigned char *iph, | |||
90 | : "=r"(sum), "=r"(iph), "=r"(ihl), "=r"(tmp) | 88 | : "=r"(sum), "=r"(iph), "=r"(ihl), "=r"(tmp) |
91 | : "1"(iph), "2"(ihl) | 89 | : "1"(iph), "2"(ihl) |
92 | : "memory", "cc"); | 90 | : "memory", "cc"); |
93 | return sum; | 91 | return (__force __sum16)sum; |
94 | } | 92 | } |
95 | 93 | ||
96 | /* | 94 | /* |
97 | * Fold a partial checksum | 95 | * Fold a partial checksum |
98 | */ | 96 | */ |
99 | 97 | ||
100 | static inline unsigned int csum_fold(unsigned int sum) | 98 | static inline __sum16 csum_fold(__wsum sum) |
101 | { | 99 | { |
102 | unsigned int tmp; | 100 | unsigned int tmp; |
103 | 101 | ||
@@ -109,21 +107,20 @@ static inline unsigned int csum_fold(unsigned int sum) | |||
109 | : "=&r"(sum), "=&r"(tmp) | 107 | : "=&r"(sum), "=&r"(tmp) |
110 | : "0"(sum)); | 108 | : "0"(sum)); |
111 | 109 | ||
112 | return ~sum; | 110 | return (__force __sum16)~sum; |
113 | } | 111 | } |
114 | 112 | ||
115 | static inline unsigned long csum_tcpudp_nofold(unsigned long saddr, | 113 | static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, |
116 | unsigned long daddr, | ||
117 | unsigned short len, | 114 | unsigned short len, |
118 | unsigned short proto, | 115 | unsigned short proto, |
119 | unsigned int sum) | 116 | __wsum sum) |
120 | { | 117 | { |
121 | asm(" add %0, %1\n" | 118 | asm(" add %0, %1\n" |
122 | " adc %0, %0, %2\n" | 119 | " adc %0, %0, %2\n" |
123 | " adc %0, %0, %3\n" | 120 | " adc %0, %0, %3\n" |
124 | " acr %0" | 121 | " acr %0" |
125 | : "=r"(sum) | 122 | : "=r"(sum) |
126 | : "r"(daddr), "r"(saddr), "r"(ntohs(len) | (proto << 16)), | 123 | : "r"(daddr), "r"(saddr), "r"(len + proto), |
127 | "0"(sum) | 124 | "0"(sum) |
128 | : "cc"); | 125 | : "cc"); |
129 | 126 | ||
@@ -134,11 +131,10 @@ static inline unsigned long csum_tcpudp_nofold(unsigned long saddr, | |||
134 | * computes the checksum of the TCP/UDP pseudo-header | 131 | * computes the checksum of the TCP/UDP pseudo-header |
135 | * returns a 16-bit checksum, already complemented | 132 | * returns a 16-bit checksum, already complemented |
136 | */ | 133 | */ |
137 | static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, | 134 | static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, |
138 | unsigned long daddr, | ||
139 | unsigned short len, | 135 | unsigned short len, |
140 | unsigned short proto, | 136 | unsigned short proto, |
141 | unsigned int sum) | 137 | __wsum sum) |
142 | { | 138 | { |
143 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); | 139 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); |
144 | } | 140 | } |
@@ -148,7 +144,7 @@ static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, | |||
148 | * in icmp.c | 144 | * in icmp.c |
149 | */ | 145 | */ |
150 | 146 | ||
151 | static inline unsigned short ip_compute_csum(unsigned char * buff, int len) | 147 | static inline __sum16 ip_compute_csum(const void *buff, int len) |
152 | { | 148 | { |
153 | return csum_fold(csum_partial(buff, len, 0)); | 149 | return csum_fold(csum_partial(buff, len, 0)); |
154 | } | 150 | } |