diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2006-11-15 00:15:19 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-12-03 00:23:02 -0500 |
commit | 3532010bcf7699f2ce9a2baab58b4b9a5426d97e (patch) | |
tree | 47d1c423fe2345bea93ff3a576363971b9b0a572 /include/asm-cris | |
parent | 9be259aae5264511fe0a8b5e3d6711e0fd1d55df (diff) |
[NET]: Cris checksum annotations and cleanups.
* sanitize prototypes and annotate
* kill cast-as-lvalue abuses in csum_partial()
* usual ntohs-equals-shift for checksum purposes
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-cris')
-rw-r--r-- | include/asm-cris/arch-v10/checksum.h | 10 | ||||
-rw-r--r-- | include/asm-cris/arch-v32/checksum.h | 10 | ||||
-rw-r--r-- | include/asm-cris/checksum.h | 34 |
3 files changed, 25 insertions, 29 deletions
diff --git a/include/asm-cris/arch-v10/checksum.h b/include/asm-cris/arch-v10/checksum.h index 633f234f336b..b8000c5d7fe1 100644 --- a/include/asm-cris/arch-v10/checksum.h +++ b/include/asm-cris/arch-v10/checksum.h | |||
@@ -8,11 +8,11 @@ | |||
8 | * to split all of those into 16-bit components, then add. | 8 | * to split all of those into 16-bit components, then add. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | static inline unsigned int | 11 | static inline __wsum |
12 | csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len, | 12 | csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, |
13 | unsigned short proto, unsigned int sum) | 13 | unsigned short proto, __wsum sum) |
14 | { | 14 | { |
15 | int res; | 15 | __wsum res; |
16 | __asm__ ("add.d %2, %0\n\t" | 16 | __asm__ ("add.d %2, %0\n\t" |
17 | "ax\n\t" | 17 | "ax\n\t" |
18 | "add.d %3, %0\n\t" | 18 | "add.d %3, %0\n\t" |
@@ -21,7 +21,7 @@ csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, unsigned short len, | |||
21 | "ax\n\t" | 21 | "ax\n\t" |
22 | "addq 0, %0\n" | 22 | "addq 0, %0\n" |
23 | : "=r" (res) | 23 | : "=r" (res) |
24 | : "0" (sum), "r" (daddr), "r" (saddr), "r" ((ntohs(len) << 16) + (proto << 8))); | 24 | : "0" (sum), "r" (daddr), "r" (saddr), "r" ((len + proto) << 8)); |
25 | 25 | ||
26 | return res; | 26 | return res; |
27 | } | 27 | } |
diff --git a/include/asm-cris/arch-v32/checksum.h b/include/asm-cris/arch-v32/checksum.h index 97ef89efea62..e5dcfce6e0dc 100644 --- a/include/asm-cris/arch-v32/checksum.h +++ b/include/asm-cris/arch-v32/checksum.h | |||
@@ -9,11 +9,11 @@ | |||
9 | * checksum. Which means it would be necessary to split all those into | 9 | * checksum. Which means it would be necessary to split all those into |
10 | * 16-bit components and then add. | 10 | * 16-bit components and then add. |
11 | */ | 11 | */ |
12 | static inline unsigned int | 12 | static inline __wsum |
13 | csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, | 13 | csum_tcpudp_nofold(__be32 saddr, __be32 daddr, |
14 | unsigned short len, unsigned short proto, unsigned int sum) | 14 | unsigned short len, unsigned short proto, __wsum sum) |
15 | { | 15 | { |
16 | int res; | 16 | __wsum res; |
17 | 17 | ||
18 | __asm__ __volatile__ ("add.d %2, %0\n\t" | 18 | __asm__ __volatile__ ("add.d %2, %0\n\t" |
19 | "addc %3, %0\n\t" | 19 | "addc %3, %0\n\t" |
@@ -21,7 +21,7 @@ csum_tcpudp_nofold(unsigned long saddr, unsigned long daddr, | |||
21 | "addc 0, %0\n\t" | 21 | "addc 0, %0\n\t" |
22 | : "=r" (res) | 22 | : "=r" (res) |
23 | : "0" (sum), "r" (daddr), "r" (saddr), \ | 23 | : "0" (sum), "r" (daddr), "r" (saddr), \ |
24 | "r" ((ntohs(len) << 16) + (proto << 8))); | 24 | "r" ((len + proto) << 8)); |
25 | 25 | ||
26 | return res; | 26 | return res; |
27 | } | 27 | } |
diff --git a/include/asm-cris/checksum.h b/include/asm-cris/checksum.h index 26a7719bbb84..180dbf2757b0 100644 --- a/include/asm-cris/checksum.h +++ b/include/asm-cris/checksum.h | |||
@@ -17,7 +17,7 @@ | |||
17 | * | 17 | * |
18 | * it's best to have buff aligned on a 32-bit boundary | 18 | * it's best to have buff aligned on a 32-bit boundary |
19 | */ | 19 | */ |
20 | unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum); | 20 | __wsum csum_partial(const void *buff, int len, __wsum sum); |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * the same as csum_partial, but copies from src while it | 23 | * the same as csum_partial, but copies from src while it |
@@ -27,26 +27,23 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum) | |||
27 | * better 64-bit) boundary | 27 | * better 64-bit) boundary |
28 | */ | 28 | */ |
29 | 29 | ||
30 | unsigned int csum_partial_copy_nocheck(const char *src, char *dst, | 30 | __wsum csum_partial_copy_nocheck(const void *src, void *dst, |
31 | int len, unsigned int sum); | 31 | int len, __wsum sum); |
32 | 32 | ||
33 | /* | 33 | /* |
34 | * Fold a partial checksum into a word | 34 | * Fold a partial checksum into a word |
35 | */ | 35 | */ |
36 | 36 | ||
37 | static inline unsigned int csum_fold(unsigned int sum) | 37 | static inline __sum16 csum_fold(__wsum csum) |
38 | { | 38 | { |
39 | /* the while loop is unnecessary really, it's always enough with two | 39 | u32 sum = (__force u32)csum; |
40 | iterations */ | 40 | sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */ |
41 | 41 | sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */ | |
42 | while(sum >> 16) | 42 | return (__force __sum16)~sum; |
43 | sum = (sum & 0xffff) + (sum >> 16); /* add in end-around carry */ | ||
44 | |||
45 | return ~sum; | ||
46 | } | 43 | } |
47 | 44 | ||
48 | extern unsigned int csum_partial_copy_from_user(const char *src, char *dst, | 45 | extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, |
49 | int len, unsigned int sum, | 46 | int len, __wsum sum, |
50 | int *errptr); | 47 | int *errptr); |
51 | 48 | ||
52 | /* | 49 | /* |
@@ -55,8 +52,7 @@ extern unsigned int csum_partial_copy_from_user(const char *src, char *dst, | |||
55 | * | 52 | * |
56 | */ | 53 | */ |
57 | 54 | ||
58 | static inline unsigned short ip_fast_csum(unsigned char * iph, | 55 | static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) |
59 | unsigned int ihl) | ||
60 | { | 56 | { |
61 | return csum_fold(csum_partial(iph, ihl * 4, 0)); | 57 | return csum_fold(csum_partial(iph, ihl * 4, 0)); |
62 | } | 58 | } |
@@ -66,11 +62,10 @@ static inline unsigned short ip_fast_csum(unsigned char * iph, | |||
66 | * returns a 16-bit checksum, already complemented | 62 | * returns a 16-bit checksum, already complemented |
67 | */ | 63 | */ |
68 | 64 | ||
69 | static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, | 65 | static inline __sum16 int csum_tcpudp_magic(__be32 saddr, __be32 daddr, |
70 | unsigned long daddr, | ||
71 | unsigned short len, | 66 | unsigned short len, |
72 | unsigned short proto, | 67 | unsigned short proto, |
73 | unsigned int sum) | 68 | __wsum sum) |
74 | { | 69 | { |
75 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); | 70 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); |
76 | } | 71 | } |
@@ -80,7 +75,8 @@ static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, | |||
80 | * in icmp.c | 75 | * in icmp.c |
81 | */ | 76 | */ |
82 | 77 | ||
83 | static inline unsigned short ip_compute_csum(unsigned char * buff, int len) { | 78 | static inline __sum16 ip_compute_csum(const void *buff, int len) |
79 | { | ||
84 | return csum_fold (csum_partial(buff, len, 0)); | 80 | return csum_fold (csum_partial(buff, len, 0)); |
85 | } | 81 | } |
86 | 82 | ||