diff options
author | Sam Ravnborg <sam@ravnborg.org> | 2014-05-16 17:25:50 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-05-18 22:01:29 -0400 |
commit | f05a68653e56ca2f23bccf7e50be69486886f052 (patch) | |
tree | c312c5d8fe927962ba365d85cd43de6592bd6642 /arch/sparc/include/asm/checksum_64.h | |
parent | 77e39a79f36ece60769787a33fe5ae0b8b4621ba (diff) |
sparc: drop use of extern for prototypes in arch/sparc/include/asm
Drop extern for all prototypes and adjust alignment of parameters
as required after the removal.
In a few rare cases adjust linelength to conform to maximum 80 chars,
and likewise in a few rare cases adjust alignment of parameters
to static functions.
Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/include/asm/checksum_64.h')
-rw-r--r-- | arch/sparc/include/asm/checksum_64.h | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/arch/sparc/include/asm/checksum_64.h b/arch/sparc/include/asm/checksum_64.h index 019b9615e43c..b3c8b9ded364 100644 --- a/arch/sparc/include/asm/checksum_64.h +++ b/arch/sparc/include/asm/checksum_64.h | |||
@@ -29,7 +29,7 @@ | |||
29 | * | 29 | * |
30 | * it's best to have buff aligned on a 32-bit boundary | 30 | * it's best to have buff aligned on a 32-bit boundary |
31 | */ | 31 | */ |
32 | extern __wsum csum_partial(const void * buff, int len, __wsum sum); | 32 | __wsum csum_partial(const void * buff, int len, __wsum sum); |
33 | 33 | ||
34 | /* the same as csum_partial, but copies from user space while it | 34 | /* the same as csum_partial, but copies from user space while it |
35 | * checksums | 35 | * checksums |
@@ -37,12 +37,12 @@ extern __wsum csum_partial(const void * buff, int len, __wsum sum); | |||
37 | * here even more important to align src and dst on a 32-bit (or even | 37 | * here even more important to align src and dst on a 32-bit (or even |
38 | * better 64-bit) boundary | 38 | * better 64-bit) boundary |
39 | */ | 39 | */ |
40 | extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, | 40 | __wsum csum_partial_copy_nocheck(const void *src, void *dst, |
41 | int len, __wsum sum); | 41 | int len, __wsum sum); |
42 | 42 | ||
43 | extern long __csum_partial_copy_from_user(const void __user *src, | 43 | long __csum_partial_copy_from_user(const void __user *src, |
44 | void *dst, int len, | 44 | void *dst, int len, |
45 | __wsum sum); | 45 | __wsum sum); |
46 | 46 | ||
47 | static inline __wsum | 47 | static inline __wsum |
48 | csum_partial_copy_from_user(const void __user *src, | 48 | csum_partial_copy_from_user(const void __user *src, |
@@ -59,9 +59,9 @@ csum_partial_copy_from_user(const void __user *src, | |||
59 | * Copy and checksum to user | 59 | * Copy and checksum to user |
60 | */ | 60 | */ |
61 | #define HAVE_CSUM_COPY_USER | 61 | #define HAVE_CSUM_COPY_USER |
62 | extern long __csum_partial_copy_to_user(const void *src, | 62 | long __csum_partial_copy_to_user(const void *src, |
63 | void __user *dst, int len, | 63 | void __user *dst, int len, |
64 | __wsum sum); | 64 | __wsum sum); |
65 | 65 | ||
66 | static inline __wsum | 66 | static inline __wsum |
67 | csum_and_copy_to_user(const void *src, | 67 | csum_and_copy_to_user(const void *src, |
@@ -77,7 +77,7 @@ csum_and_copy_to_user(const void *src, | |||
77 | /* ihl is always 5 or greater, almost always is 5, and iph is word aligned | 77 | /* ihl is always 5 or greater, almost always is 5, and iph is word aligned |
78 | * the majority of the time. | 78 | * the majority of the time. |
79 | */ | 79 | */ |
80 | extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); | 80 | __sum16 ip_fast_csum(const void *iph, unsigned int ihl); |
81 | 81 | ||
82 | /* Fold a partial checksum without adding pseudo headers. */ | 82 | /* Fold a partial checksum without adding pseudo headers. */ |
83 | static inline __sum16 csum_fold(__wsum sum) | 83 | static inline __sum16 csum_fold(__wsum sum) |
@@ -96,9 +96,9 @@ static inline __sum16 csum_fold(__wsum sum) | |||
96 | } | 96 | } |
97 | 97 | ||
98 | static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, | 98 | static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, |
99 | unsigned int len, | 99 | unsigned int len, |
100 | unsigned short proto, | 100 | unsigned short proto, |
101 | __wsum sum) | 101 | __wsum sum) |
102 | { | 102 | { |
103 | __asm__ __volatile__( | 103 | __asm__ __volatile__( |
104 | " addcc %1, %0, %0\n" | 104 | " addcc %1, %0, %0\n" |
@@ -116,9 +116,9 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, | |||
116 | * returns a 16-bit checksum, already complemented | 116 | * returns a 16-bit checksum, already complemented |
117 | */ | 117 | */ |
118 | static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, | 118 | static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, |
119 | unsigned short len, | 119 | unsigned short len, |
120 | unsigned short proto, | 120 | unsigned short proto, |
121 | __wsum sum) | 121 | __wsum sum) |
122 | { | 122 | { |
123 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); | 123 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); |
124 | } | 124 | } |