diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2006-11-15 00:20:08 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-12-03 00:23:14 -0500 |
commit | a4f89fb7c072b8592b296c2ba216269c0c96db43 (patch) | |
tree | 81ed700573ed0bcf23b99e82ae0b538ac16e62e9 /arch | |
parent | 9d3d41955845939cb41b87affb039db0bae03b65 (diff) |
[NET]: X86_64 checksum annotations and cleanups.
* sanitize prototypes, annotate
* usual ntohs->shift
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86_64/lib/csum-partial.c | 7 | ||||
-rw-r--r-- | arch/x86_64/lib/csum-wrappers.c | 37 |
2 files changed, 25 insertions, 19 deletions
diff --git a/arch/x86_64/lib/csum-partial.c b/arch/x86_64/lib/csum-partial.c index c493735218da..06ae630de82b 100644 --- a/arch/x86_64/lib/csum-partial.c +++ b/arch/x86_64/lib/csum-partial.c | |||
@@ -132,9 +132,10 @@ static __force_inline unsigned do_csum(const unsigned char *buff, unsigned len) | |||
132 | * | 132 | * |
133 | * it's best to have buff aligned on a 64-bit boundary | 133 | * it's best to have buff aligned on a 64-bit boundary |
134 | */ | 134 | */ |
135 | unsigned csum_partial(const unsigned char *buff, unsigned len, unsigned sum) | 135 | __wsum csum_partial(const void *buff, int len, __wsum sum) |
136 | { | 136 | { |
137 | return add32_with_carry(do_csum(buff, len), sum); | 137 | return (__force __wsum)add32_with_carry(do_csum(buff, len), |
138 | (__force u32)sum); | ||
138 | } | 139 | } |
139 | 140 | ||
140 | EXPORT_SYMBOL(csum_partial); | 141 | EXPORT_SYMBOL(csum_partial); |
@@ -143,7 +144,7 @@ EXPORT_SYMBOL(csum_partial); | |||
143 | * this routine is used for miscellaneous IP-like checksums, mainly | 144 | * this routine is used for miscellaneous IP-like checksums, mainly |
144 | * in icmp.c | 145 | * in icmp.c |
145 | */ | 146 | */ |
146 | unsigned short ip_compute_csum(unsigned char * buff, int len) | 147 | __sum16 ip_compute_csum(const void *buff, int len) |
147 | { | 148 | { |
148 | return csum_fold(csum_partial(buff,len,0)); | 149 | return csum_fold(csum_partial(buff,len,0)); |
149 | } | 150 | } |
diff --git a/arch/x86_64/lib/csum-wrappers.c b/arch/x86_64/lib/csum-wrappers.c index b1320ec58428..fd42a4a095fc 100644 --- a/arch/x86_64/lib/csum-wrappers.c +++ b/arch/x86_64/lib/csum-wrappers.c | |||
@@ -18,9 +18,9 @@ | |||
18 | * Returns an 32bit unfolded checksum of the buffer. | 18 | * Returns an 32bit unfolded checksum of the buffer. |
19 | * src and dst are best aligned to 64bits. | 19 | * src and dst are best aligned to 64bits. |
20 | */ | 20 | */ |
21 | unsigned int | 21 | __wsum |
22 | csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst, | 22 | csum_partial_copy_from_user(const void __user *src, void *dst, |
23 | int len, unsigned int isum, int *errp) | 23 | int len, __wsum isum, int *errp) |
24 | { | 24 | { |
25 | might_sleep(); | 25 | might_sleep(); |
26 | *errp = 0; | 26 | *errp = 0; |
@@ -34,17 +34,19 @@ csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst, | |||
34 | if (unlikely((unsigned long)src & 6)) { | 34 | if (unlikely((unsigned long)src & 6)) { |
35 | while (((unsigned long)src & 6) && len >= 2) { | 35 | while (((unsigned long)src & 6) && len >= 2) { |
36 | __u16 val16; | 36 | __u16 val16; |
37 | *errp = __get_user(val16, (__u16 __user *)src); | 37 | *errp = __get_user(val16, (const __u16 __user *)src); |
38 | if (*errp) | 38 | if (*errp) |
39 | return isum; | 39 | return isum; |
40 | *(__u16 *)dst = val16; | 40 | *(__u16 *)dst = val16; |
41 | isum = add32_with_carry(isum, val16); | 41 | isum = (__force __wsum)add32_with_carry( |
42 | (__force unsigned)isum, val16); | ||
42 | src += 2; | 43 | src += 2; |
43 | dst += 2; | 44 | dst += 2; |
44 | len -= 2; | 45 | len -= 2; |
45 | } | 46 | } |
46 | } | 47 | } |
47 | isum = csum_partial_copy_generic((__force void *)src,dst,len,isum,errp,NULL); | 48 | isum = csum_partial_copy_generic((__force const void *)src, |
49 | dst, len, isum, errp, NULL); | ||
48 | if (likely(*errp == 0)) | 50 | if (likely(*errp == 0)) |
49 | return isum; | 51 | return isum; |
50 | } | 52 | } |
@@ -66,9 +68,9 @@ EXPORT_SYMBOL(csum_partial_copy_from_user); | |||
66 | * Returns an 32bit unfolded checksum of the buffer. | 68 | * Returns an 32bit unfolded checksum of the buffer. |
67 | * src and dst are best aligned to 64bits. | 69 | * src and dst are best aligned to 64bits. |
68 | */ | 70 | */ |
69 | unsigned int | 71 | __wsum |
70 | csum_partial_copy_to_user(unsigned const char *src, unsigned char __user *dst, | 72 | csum_partial_copy_to_user(const void *src, void __user *dst, |
71 | int len, unsigned int isum, int *errp) | 73 | int len, __wsum isum, int *errp) |
72 | { | 74 | { |
73 | might_sleep(); | 75 | might_sleep(); |
74 | if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) { | 76 | if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) { |
@@ -79,7 +81,8 @@ csum_partial_copy_to_user(unsigned const char *src, unsigned char __user *dst, | |||
79 | if (unlikely((unsigned long)dst & 6)) { | 81 | if (unlikely((unsigned long)dst & 6)) { |
80 | while (((unsigned long)dst & 6) && len >= 2) { | 82 | while (((unsigned long)dst & 6) && len >= 2) { |
81 | __u16 val16 = *(__u16 *)src; | 83 | __u16 val16 = *(__u16 *)src; |
82 | isum = add32_with_carry(isum, val16); | 84 | isum = (__force __wsum)add32_with_carry( |
85 | (__force unsigned)isum, val16); | ||
83 | *errp = __put_user(val16, (__u16 __user *)dst); | 86 | *errp = __put_user(val16, (__u16 __user *)dst); |
84 | if (*errp) | 87 | if (*errp) |
85 | return isum; | 88 | return isum; |
@@ -104,19 +107,21 @@ EXPORT_SYMBOL(csum_partial_copy_to_user); | |||
104 | * | 107 | * |
105 | * Returns an 32bit unfolded checksum of the buffer. | 108 | * Returns an 32bit unfolded checksum of the buffer. |
106 | */ | 109 | */ |
107 | unsigned int | 110 | __wsum |
108 | csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, int len, unsigned int sum) | 111 | csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) |
109 | { | 112 | { |
110 | return csum_partial_copy_generic(src,dst,len,sum,NULL,NULL); | 113 | return csum_partial_copy_generic(src,dst,len,sum,NULL,NULL); |
111 | } | 114 | } |
112 | EXPORT_SYMBOL(csum_partial_copy_nocheck); | 115 | EXPORT_SYMBOL(csum_partial_copy_nocheck); |
113 | 116 | ||
114 | unsigned short csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, | 117 | __sum16 csum_ipv6_magic(const struct in6_addr *saddr, |
115 | __u32 len, unsigned short proto, unsigned int sum) | 118 | const struct in6_addr *daddr, |
119 | __u32 len, unsigned short proto, __wsum sum) | ||
116 | { | 120 | { |
117 | __u64 rest, sum64; | 121 | __u64 rest, sum64; |
118 | 122 | ||
119 | rest = (__u64)htonl(len) + (__u64)htons(proto) + (__u64)sum; | 123 | rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) + |
124 | (__force __u64)sum; | ||
120 | asm(" addq (%[saddr]),%[sum]\n" | 125 | asm(" addq (%[saddr]),%[sum]\n" |
121 | " adcq 8(%[saddr]),%[sum]\n" | 126 | " adcq 8(%[saddr]),%[sum]\n" |
122 | " adcq (%[daddr]),%[sum]\n" | 127 | " adcq (%[daddr]),%[sum]\n" |
@@ -124,7 +129,7 @@ unsigned short csum_ipv6_magic(struct in6_addr *saddr, struct in6_addr *daddr, | |||
124 | " adcq $0,%[sum]\n" | 129 | " adcq $0,%[sum]\n" |
125 | : [sum] "=r" (sum64) | 130 | : [sum] "=r" (sum64) |
126 | : "[sum]" (rest),[saddr] "r" (saddr), [daddr] "r" (daddr)); | 131 | : "[sum]" (rest),[saddr] "r" (saddr), [daddr] "r" (daddr)); |
127 | return csum_fold(add32_with_carry(sum64 & 0xffffffff, sum64>>32)); | 132 | return csum_fold((__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32)); |
128 | } | 133 | } |
129 | 134 | ||
130 | EXPORT_SYMBOL(csum_ipv6_magic); | 135 | EXPORT_SYMBOL(csum_ipv6_magic); |