aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/lib
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-02-17 10:48:25 -0500
committerIngo Molnar <mingo@elte.hu>2008-02-19 10:18:32 -0500
commitd76c1ae4d1f4f322d47e7c6e47a277384ba9d9cb (patch)
treed9f10bf4d290375b2db8e7c93c55b42176e53b3d /arch/x86/lib
parent0df025b709ae09081e21545761a249ec2d969689 (diff)
x86: clean up csum-wrappers_64.c some more
no code changed: arch/x86/lib/csum-wrappers_64.o: text data bss dec hex filename 839 0 0 839 347 csum-wrappers_64.o.before 839 0 0 839 347 csum-wrappers_64.o.after md5: b31994226c33e0b52bef5a0e110b84b0 csum-wrappers_64.o.before.asm b31994226c33e0b52bef5a0e110b84b0 csum-wrappers_64.o.after.asm Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/lib')
-rw-r--r--arch/x86/lib/csum-wrappers_64.c87
1 files changed, 51 insertions, 36 deletions
diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
index 95e45dcc5a29..459b58a8a15c 100644
--- a/arch/x86/lib/csum-wrappers_64.c
+++ b/arch/x86/lib/csum-wrappers_64.c
@@ -1,9 +1,9 @@
1/* Copyright 2002,2003 Andi Kleen, SuSE Labs. 1/*
2 * Copyright 2002, 2003 Andi Kleen, SuSE Labs.
2 * Subject to the GNU Public License v.2 3 * Subject to the GNU Public License v.2
3 * 4 *
4 * Wrappers of assembly checksum functions for x86-64. 5 * Wrappers of assembly checksum functions for x86-64.
5 */ 6 */
6
7#include <asm/checksum.h> 7#include <asm/checksum.h>
8#include <linux/module.h> 8#include <linux/module.h>
9 9
@@ -24,37 +24,47 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
24{ 24{
25 might_sleep(); 25 might_sleep();
26 *errp = 0; 26 *errp = 0;
27 if (likely(access_ok(VERIFY_READ, src, len))) { 27
28 /* Why 6, not 7? To handle odd addresses aligned we 28 if (!likely(access_ok(VERIFY_READ, src, len)))
29 would need to do considerable complications to fix the 29 goto out_err;
30 checksum which is defined as an 16bit accumulator. The 30
31 fix alignment code is primarily for performance 31 /*
32 compatibility with 32bit and that will handle odd 32 * Why 6, not 7? To handle odd addresses aligned we
33 addresses slowly too. */ 33 * would need to do considerable complications to fix the
34 if (unlikely((unsigned long)src & 6)) { 34 * checksum which is defined as an 16bit accumulator. The
35 while (((unsigned long)src & 6) && len >= 2) { 35 * fix alignment code is primarily for performance
36 __u16 val16; 36 * compatibility with 32bit and that will handle odd
37 *errp = __get_user(val16, (const __u16 __user *)src); 37 * addresses slowly too.
38 if (*errp) 38 */
39 return isum; 39 if (unlikely((unsigned long)src & 6)) {
40 *(__u16 *)dst = val16; 40 while (((unsigned long)src & 6) && len >= 2) {
41 isum = (__force __wsum)add32_with_carry( 41 __u16 val16;
42 (__force unsigned)isum, val16); 42
43 src += 2; 43 *errp = __get_user(val16, (const __u16 __user *)src);
44 dst += 2; 44 if (*errp)
45 len -= 2; 45 return isum;
46 } 46
47 *(__u16 *)dst = val16;
48 isum = (__force __wsum)add32_with_carry(
49 (__force unsigned)isum, val16);
50 src += 2;
51 dst += 2;
52 len -= 2;
47 } 53 }
48 isum = csum_partial_copy_generic((__force const void *)src,
49 dst, len, isum, errp, NULL);
50 if (likely(*errp == 0))
51 return isum;
52 } 54 }
55 isum = csum_partial_copy_generic((__force const void *)src,
56 dst, len, isum, errp, NULL);
57 if (unlikely(*errp))
58 goto out_err;
59
60 return isum;
61
62out_err:
53 *errp = -EFAULT; 63 *errp = -EFAULT;
54 memset(dst, 0, len); 64 memset(dst, 0, len);
65
55 return isum; 66 return isum;
56} 67}
57
58EXPORT_SYMBOL(csum_partial_copy_from_user); 68EXPORT_SYMBOL(csum_partial_copy_from_user);
59 69
60/** 70/**
@@ -73,6 +83,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
73 int len, __wsum isum, int *errp) 83 int len, __wsum isum, int *errp)
74{ 84{
75 might_sleep(); 85 might_sleep();
86
76 if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) { 87 if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) {
77 *errp = -EFAULT; 88 *errp = -EFAULT;
78 return 0; 89 return 0;
@@ -81,6 +92,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
81 if (unlikely((unsigned long)dst & 6)) { 92 if (unlikely((unsigned long)dst & 6)) {
82 while (((unsigned long)dst & 6) && len >= 2) { 93 while (((unsigned long)dst & 6) && len >= 2) {
83 __u16 val16 = *(__u16 *)src; 94 __u16 val16 = *(__u16 *)src;
95
84 isum = (__force __wsum)add32_with_carry( 96 isum = (__force __wsum)add32_with_carry(
85 (__force unsigned)isum, val16); 97 (__force unsigned)isum, val16);
86 *errp = __put_user(val16, (__u16 __user *)dst); 98 *errp = __put_user(val16, (__u16 __user *)dst);
@@ -93,9 +105,9 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
93 } 105 }
94 106
95 *errp = 0; 107 *errp = 0;
96 return csum_partial_copy_generic(src, (void __force *)dst, len, isum, NULL, errp); 108 return csum_partial_copy_generic(src, (void __force *)dst,
109 len, isum, NULL, errp);
97} 110}
98
99EXPORT_SYMBOL(csum_partial_copy_to_user); 111EXPORT_SYMBOL(csum_partial_copy_to_user);
100 112
101/** 113/**
@@ -122,14 +134,17 @@ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
122 134
123 rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) + 135 rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) +
124 (__force __u64)sum; 136 (__force __u64)sum;
125 asm(" addq (%[saddr]),%[sum]\n" 137
126 " adcq 8(%[saddr]),%[sum]\n" 138 asm(" addq (%[saddr]),%[sum]\n"
127 " adcq (%[daddr]),%[sum]\n" 139 " adcq 8(%[saddr]),%[sum]\n"
128 " adcq 8(%[daddr]),%[sum]\n" 140 " adcq (%[daddr]),%[sum]\n"
129 " adcq $0,%[sum]\n" 141 " adcq 8(%[daddr]),%[sum]\n"
142 " adcq $0,%[sum]\n"
143
130 : [sum] "=r" (sum64) 144 : [sum] "=r" (sum64)
131 : "[sum]" (rest), [saddr] "r" (saddr), [daddr] "r" (daddr)); 145 : "[sum]" (rest), [saddr] "r" (saddr), [daddr] "r" (daddr));
132 return csum_fold((__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32));
133}
134 146
147 return csum_fold(
148 (__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32));
149}
135EXPORT_SYMBOL(csum_ipv6_magic); 150EXPORT_SYMBOL(csum_ipv6_magic);