diff options
author | Paolo Ciarrocchi <paolo.ciarrocchi@gmail.com> | 2008-02-17 08:56:50 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-02-19 10:18:32 -0500 |
commit | 0df025b709ae09081e21545761a249ec2d969689 (patch) | |
tree | c8be44ee7518981fd9a9f2327d0ec25f205a182b /arch/x86/lib/csum-wrappers_64.c | |
parent | 4b44f810166fb83ad1a817ee599006a7157ee54c (diff) |
x86: coding style fixes in arch/x86/lib/csum-wrappers_64.c
no code changed:
arch/x86/lib/csum-wrappers_64.o:
text data bss dec hex filename
839 0 0 839 347 csum-wrappers_64.o.before
839 0 0 839 347 csum-wrappers_64.o.after
md5:
b31994226c33e0b52bef5a0e110b84b0 csum-wrappers_64.o.before.asm
b31994226c33e0b52bef5a0e110b84b0 csum-wrappers_64.o.after.asm
Signed-off-by: Paolo Ciarrocchi <paolo.ciarrocchi@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/lib/csum-wrappers_64.c')
-rw-r--r-- | arch/x86/lib/csum-wrappers_64.c | 80 |
1 files changed, 40 insertions, 40 deletions
diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c index fd42a4a095fc..95e45dcc5a29 100644 --- a/arch/x86/lib/csum-wrappers_64.c +++ b/arch/x86/lib/csum-wrappers_64.c | |||
@@ -1,117 +1,117 @@ | |||
1 | /* Copyright 2002,2003 Andi Kleen, SuSE Labs. | 1 | /* Copyright 2002,2003 Andi Kleen, SuSE Labs. |
2 | * Subject to the GNU Public License v.2 | 2 | * Subject to the GNU Public License v.2 |
3 | * | 3 | * |
4 | * Wrappers of assembly checksum functions for x86-64. | 4 | * Wrappers of assembly checksum functions for x86-64. |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <asm/checksum.h> | 7 | #include <asm/checksum.h> |
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | 9 | ||
10 | /** | 10 | /** |
11 | * csum_partial_copy_from_user - Copy and checksum from user space. | 11 | * csum_partial_copy_from_user - Copy and checksum from user space. |
12 | * @src: source address (user space) | 12 | * @src: source address (user space) |
13 | * @dst: destination address | 13 | * @dst: destination address |
14 | * @len: number of bytes to be copied. | 14 | * @len: number of bytes to be copied. |
15 | * @isum: initial sum that is added into the result (32bit unfolded) | 15 | * @isum: initial sum that is added into the result (32bit unfolded) |
16 | * @errp: set to -EFAULT for an bad source address. | 16 | * @errp: set to -EFAULT for an bad source address. |
17 | * | 17 | * |
18 | * Returns an 32bit unfolded checksum of the buffer. | 18 | * Returns an 32bit unfolded checksum of the buffer. |
19 | * src and dst are best aligned to 64bits. | 19 | * src and dst are best aligned to 64bits. |
20 | */ | 20 | */ |
21 | __wsum | 21 | __wsum |
22 | csum_partial_copy_from_user(const void __user *src, void *dst, | 22 | csum_partial_copy_from_user(const void __user *src, void *dst, |
23 | int len, __wsum isum, int *errp) | 23 | int len, __wsum isum, int *errp) |
24 | { | 24 | { |
25 | might_sleep(); | 25 | might_sleep(); |
26 | *errp = 0; | 26 | *errp = 0; |
27 | if (likely(access_ok(VERIFY_READ,src, len))) { | 27 | if (likely(access_ok(VERIFY_READ, src, len))) { |
28 | /* Why 6, not 7? To handle odd addresses aligned we | 28 | /* Why 6, not 7? To handle odd addresses aligned we |
29 | would need to do considerable complications to fix the | 29 | would need to do considerable complications to fix the |
30 | checksum which is defined as an 16bit accumulator. The | 30 | checksum which is defined as an 16bit accumulator. The |
31 | fix alignment code is primarily for performance | 31 | fix alignment code is primarily for performance |
32 | compatibility with 32bit and that will handle odd | 32 | compatibility with 32bit and that will handle odd |
33 | addresses slowly too. */ | 33 | addresses slowly too. */ |
34 | if (unlikely((unsigned long)src & 6)) { | 34 | if (unlikely((unsigned long)src & 6)) { |
35 | while (((unsigned long)src & 6) && len >= 2) { | 35 | while (((unsigned long)src & 6) && len >= 2) { |
36 | __u16 val16; | 36 | __u16 val16; |
37 | *errp = __get_user(val16, (const __u16 __user *)src); | 37 | *errp = __get_user(val16, (const __u16 __user *)src); |
38 | if (*errp) | 38 | if (*errp) |
39 | return isum; | 39 | return isum; |
40 | *(__u16 *)dst = val16; | 40 | *(__u16 *)dst = val16; |
41 | isum = (__force __wsum)add32_with_carry( | 41 | isum = (__force __wsum)add32_with_carry( |
42 | (__force unsigned)isum, val16); | 42 | (__force unsigned)isum, val16); |
43 | src += 2; | 43 | src += 2; |
44 | dst += 2; | 44 | dst += 2; |
45 | len -= 2; | 45 | len -= 2; |
46 | } | 46 | } |
47 | } | 47 | } |
48 | isum = csum_partial_copy_generic((__force const void *)src, | 48 | isum = csum_partial_copy_generic((__force const void *)src, |
49 | dst, len, isum, errp, NULL); | 49 | dst, len, isum, errp, NULL); |
50 | if (likely(*errp == 0)) | 50 | if (likely(*errp == 0)) |
51 | return isum; | 51 | return isum; |
52 | } | 52 | } |
53 | *errp = -EFAULT; | 53 | *errp = -EFAULT; |
54 | memset(dst,0,len); | 54 | memset(dst, 0, len); |
55 | return isum; | 55 | return isum; |
56 | } | 56 | } |
57 | 57 | ||
58 | EXPORT_SYMBOL(csum_partial_copy_from_user); | 58 | EXPORT_SYMBOL(csum_partial_copy_from_user); |
59 | 59 | ||
60 | /** | 60 | /** |
61 | * csum_partial_copy_to_user - Copy and checksum to user space. | 61 | * csum_partial_copy_to_user - Copy and checksum to user space. |
62 | * @src: source address | 62 | * @src: source address |
63 | * @dst: destination address (user space) | 63 | * @dst: destination address (user space) |
64 | * @len: number of bytes to be copied. | 64 | * @len: number of bytes to be copied. |
65 | * @isum: initial sum that is added into the result (32bit unfolded) | 65 | * @isum: initial sum that is added into the result (32bit unfolded) |
66 | * @errp: set to -EFAULT for an bad destination address. | 66 | * @errp: set to -EFAULT for an bad destination address. |
67 | * | 67 | * |
68 | * Returns an 32bit unfolded checksum of the buffer. | 68 | * Returns an 32bit unfolded checksum of the buffer. |
69 | * src and dst are best aligned to 64bits. | 69 | * src and dst are best aligned to 64bits. |
70 | */ | 70 | */ |
71 | __wsum | 71 | __wsum |
72 | csum_partial_copy_to_user(const void *src, void __user *dst, | 72 | csum_partial_copy_to_user(const void *src, void __user *dst, |
73 | int len, __wsum isum, int *errp) | 73 | int len, __wsum isum, int *errp) |
74 | { | 74 | { |
75 | might_sleep(); | 75 | might_sleep(); |
76 | if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) { | 76 | if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) { |
77 | *errp = -EFAULT; | 77 | *errp = -EFAULT; |
78 | return 0; | 78 | return 0; |
79 | } | 79 | } |
80 | 80 | ||
81 | if (unlikely((unsigned long)dst & 6)) { | 81 | if (unlikely((unsigned long)dst & 6)) { |
82 | while (((unsigned long)dst & 6) && len >= 2) { | 82 | while (((unsigned long)dst & 6) && len >= 2) { |
83 | __u16 val16 = *(__u16 *)src; | 83 | __u16 val16 = *(__u16 *)src; |
84 | isum = (__force __wsum)add32_with_carry( | 84 | isum = (__force __wsum)add32_with_carry( |
85 | (__force unsigned)isum, val16); | 85 | (__force unsigned)isum, val16); |
86 | *errp = __put_user(val16, (__u16 __user *)dst); | 86 | *errp = __put_user(val16, (__u16 __user *)dst); |
87 | if (*errp) | 87 | if (*errp) |
88 | return isum; | 88 | return isum; |
89 | src += 2; | 89 | src += 2; |
90 | dst += 2; | 90 | dst += 2; |
91 | len -= 2; | 91 | len -= 2; |
92 | } | 92 | } |
93 | } | 93 | } |
94 | 94 | ||
95 | *errp = 0; | 95 | *errp = 0; |
96 | return csum_partial_copy_generic(src, (void __force *)dst,len,isum,NULL,errp); | 96 | return csum_partial_copy_generic(src, (void __force *)dst, len, isum, NULL, errp); |
97 | } | 97 | } |
98 | 98 | ||
99 | EXPORT_SYMBOL(csum_partial_copy_to_user); | 99 | EXPORT_SYMBOL(csum_partial_copy_to_user); |
100 | 100 | ||
101 | /** | 101 | /** |
102 | * csum_partial_copy_nocheck - Copy and checksum. | 102 | * csum_partial_copy_nocheck - Copy and checksum. |
103 | * @src: source address | 103 | * @src: source address |
104 | * @dst: destination address | 104 | * @dst: destination address |
105 | * @len: number of bytes to be copied. | 105 | * @len: number of bytes to be copied. |
106 | * @isum: initial sum that is added into the result (32bit unfolded) | 106 | * @isum: initial sum that is added into the result (32bit unfolded) |
107 | * | 107 | * |
108 | * Returns an 32bit unfolded checksum of the buffer. | 108 | * Returns an 32bit unfolded checksum of the buffer. |
109 | */ | 109 | */ |
110 | __wsum | 110 | __wsum |
111 | csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) | 111 | csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) |
112 | { | 112 | { |
113 | return csum_partial_copy_generic(src,dst,len,sum,NULL,NULL); | 113 | return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); |
114 | } | 114 | } |
115 | EXPORT_SYMBOL(csum_partial_copy_nocheck); | 115 | EXPORT_SYMBOL(csum_partial_copy_nocheck); |
116 | 116 | ||
117 | __sum16 csum_ipv6_magic(const struct in6_addr *saddr, | 117 | __sum16 csum_ipv6_magic(const struct in6_addr *saddr, |
@@ -119,16 +119,16 @@ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, | |||
119 | __u32 len, unsigned short proto, __wsum sum) | 119 | __u32 len, unsigned short proto, __wsum sum) |
120 | { | 120 | { |
121 | __u64 rest, sum64; | 121 | __u64 rest, sum64; |
122 | 122 | ||
123 | rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) + | 123 | rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) + |
124 | (__force __u64)sum; | 124 | (__force __u64)sum; |
125 | asm(" addq (%[saddr]),%[sum]\n" | 125 | asm(" addq (%[saddr]),%[sum]\n" |
126 | " adcq 8(%[saddr]),%[sum]\n" | 126 | " adcq 8(%[saddr]),%[sum]\n" |
127 | " adcq (%[daddr]),%[sum]\n" | 127 | " adcq (%[daddr]),%[sum]\n" |
128 | " adcq 8(%[daddr]),%[sum]\n" | 128 | " adcq 8(%[daddr]),%[sum]\n" |
129 | " adcq $0,%[sum]\n" | 129 | " adcq $0,%[sum]\n" |
130 | : [sum] "=r" (sum64) | 130 | : [sum] "=r" (sum64) |
131 | : "[sum]" (rest),[saddr] "r" (saddr), [daddr] "r" (daddr)); | 131 | : "[sum]" (rest), [saddr] "r" (saddr), [daddr] "r" (daddr)); |
132 | return csum_fold((__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32)); | 132 | return csum_fold((__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32)); |
133 | } | 133 | } |
134 | 134 | ||