diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-19 10:52:48 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-19 10:52:48 -0500 |
commit | 7f2de8171ddf28fdb2ca7f9a683ee1207849f718 (patch) | |
tree | d89da981ac762de3fd32e1c08ddc8041f3c37519 | |
parent | 59771079c18c44e39106f0f30054025acafadb41 (diff) | |
parent | cf66bb93e0f75e0a4ba1ec070692618fa028e994 (diff) |
Merge tag 'byteswap-for-linus-20121219' of git://git.infradead.org/users/dwmw2/byteswap
Pull preparatory gcc intrisics bswap patch from David Woodhouse:
"This single patch is effectively a no-op for now. It enables
architectures to opt in to using GCC's __builtin_bswapXX() intrinsics
for byteswapping, and if we merge this now then the architecture
maintainers can enable it for their arch during the next cycle without
dependency issues.
It's worth making it a par-arch opt-in, because although in *theory*
the compiler should never do worse than hand-coded assembler (and of
course it also ought to do a lot better on platforms like Atom and
PowerPC which have load-and-swap or store-and-swap instructions), that
isn't always the case. See
http://gcc.gnu.org/bugzilla/show_bug.cgi?id=46453
for example."
* tag 'byteswap-for-linus-20121219' of git://git.infradead.org/users/dwmw2/byteswap:
byteorder: allow arch to opt to use GCC intrinsics for byteswapping
-rw-r--r-- | arch/Kconfig | 19 | ||||
-rw-r--r-- | include/linux/compiler-gcc4.h | 10 | ||||
-rw-r--r-- | include/linux/compiler-intel.h | 7 | ||||
-rw-r--r-- | include/uapi/linux/swab.h | 12 |
4 files changed, 45 insertions, 3 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index 54ffd0f9df21..8e9e3246b2b4 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -113,6 +113,25 @@ config HAVE_EFFICIENT_UNALIGNED_ACCESS | |||
113 | See Documentation/unaligned-memory-access.txt for more | 113 | See Documentation/unaligned-memory-access.txt for more |
114 | information on the topic of unaligned memory accesses. | 114 | information on the topic of unaligned memory accesses. |
115 | 115 | ||
116 | config ARCH_USE_BUILTIN_BSWAP | ||
117 | bool | ||
118 | help | ||
119 | Modern versions of GCC (since 4.4) have builtin functions | ||
120 | for handling byte-swapping. Using these, instead of the old | ||
121 | inline assembler that the architecture code provides in the | ||
122 | __arch_bswapXX() macros, allows the compiler to see what's | ||
123 | happening and offers more opportunity for optimisation. In | ||
124 | particular, the compiler will be able to combine the byteswap | ||
125 | with a nearby load or store and use load-and-swap or | ||
126 | store-and-swap instructions if the architecture has them. It | ||
127 | should almost *never* result in code which is worse than the | ||
128 | hand-coded assembler in <asm/swab.h>. But just in case it | ||
129 | does, the use of the builtins is optional. | ||
130 | |||
131 | Any architecture with load-and-swap or store-and-swap | ||
132 | instructions should set this. And it shouldn't hurt to set it | ||
133 | on architectures that don't have such instructions. | ||
134 | |||
116 | config HAVE_SYSCALL_WRAPPERS | 135 | config HAVE_SYSCALL_WRAPPERS |
117 | bool | 136 | bool |
118 | 137 | ||
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h index 412bc6c2b023..dc16a858e77c 100644 --- a/include/linux/compiler-gcc4.h +++ b/include/linux/compiler-gcc4.h | |||
@@ -63,3 +63,13 @@ | |||
63 | #define __compiletime_warning(message) __attribute__((warning(message))) | 63 | #define __compiletime_warning(message) __attribute__((warning(message))) |
64 | #define __compiletime_error(message) __attribute__((error(message))) | 64 | #define __compiletime_error(message) __attribute__((error(message))) |
65 | #endif | 65 | #endif |
66 | |||
67 | #ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP | ||
68 | #if __GNUC_MINOR__ >= 4 | ||
69 | #define __HAVE_BUILTIN_BSWAP32__ | ||
70 | #define __HAVE_BUILTIN_BSWAP64__ | ||
71 | #endif | ||
72 | #if __GNUC_MINOR__ >= 8 || (defined(__powerpc__) && __GNUC_MINOR__ >= 6) | ||
73 | #define __HAVE_BUILTIN_BSWAP16__ | ||
74 | #endif | ||
75 | #endif | ||
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h index d8e636e5607d..973ce10c40b6 100644 --- a/include/linux/compiler-intel.h +++ b/include/linux/compiler-intel.h | |||
@@ -29,3 +29,10 @@ | |||
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | #define uninitialized_var(x) x | 31 | #define uninitialized_var(x) x |
32 | |||
33 | #ifndef __HAVE_BUILTIN_BSWAP16__ | ||
34 | /* icc has this, but it's called _bswap16 */ | ||
35 | #define __HAVE_BUILTIN_BSWAP16__ | ||
36 | #define __builtin_bswap16 _bswap16 | ||
37 | #endif | ||
38 | |||
diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h index e811474724c2..0e011eb91b5d 100644 --- a/include/uapi/linux/swab.h +++ b/include/uapi/linux/swab.h | |||
@@ -45,7 +45,9 @@ | |||
45 | 45 | ||
46 | static inline __attribute_const__ __u16 __fswab16(__u16 val) | 46 | static inline __attribute_const__ __u16 __fswab16(__u16 val) |
47 | { | 47 | { |
48 | #ifdef __arch_swab16 | 48 | #ifdef __HAVE_BUILTIN_BSWAP16__ |
49 | return __builtin_bswap16(val); | ||
50 | #elif defined (__arch_swab16) | ||
49 | return __arch_swab16(val); | 51 | return __arch_swab16(val); |
50 | #else | 52 | #else |
51 | return ___constant_swab16(val); | 53 | return ___constant_swab16(val); |
@@ -54,7 +56,9 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val) | |||
54 | 56 | ||
55 | static inline __attribute_const__ __u32 __fswab32(__u32 val) | 57 | static inline __attribute_const__ __u32 __fswab32(__u32 val) |
56 | { | 58 | { |
57 | #ifdef __arch_swab32 | 59 | #ifdef __HAVE_BUILTIN_BSWAP32__ |
60 | return __builtin_bswap32(val); | ||
61 | #elif defined(__arch_swab32) | ||
58 | return __arch_swab32(val); | 62 | return __arch_swab32(val); |
59 | #else | 63 | #else |
60 | return ___constant_swab32(val); | 64 | return ___constant_swab32(val); |
@@ -63,7 +67,9 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val) | |||
63 | 67 | ||
64 | static inline __attribute_const__ __u64 __fswab64(__u64 val) | 68 | static inline __attribute_const__ __u64 __fswab64(__u64 val) |
65 | { | 69 | { |
66 | #ifdef __arch_swab64 | 70 | #ifdef __HAVE_BUILTIN_BSWAP64__ |
71 | return __builtin_bswap64(val); | ||
72 | #elif defined (__arch_swab64) | ||
67 | return __arch_swab64(val); | 73 | return __arch_swab64(val); |
68 | #elif defined(__SWAB_64_THRU_32__) | 74 | #elif defined(__SWAB_64_THRU_32__) |
69 | __u32 h = val >> 32; | 75 | __u32 h = val >> 32; |