aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2012-12-03 11:25:40 -0500
committerDavid Woodhouse <David.Woodhouse@intel.com>2012-12-05 20:22:31 -0500
commitcf66bb93e0f75e0a4ba1ec070692618fa028e994 (patch)
tree0ae48658adb29f50bdd85a94cbb84670a234f441
parent27d7c2a006a81c04fab00b8cd81b99af3b32738d (diff)
byteorder: allow arch to opt to use GCC intrinsics for byteswapping
Since GCC 4.4, there have been __builtin_bswap32() and __builtin_bswap16() intrinsics. A __builtin_bswap16() came a little later (4.6 for PowerPC, 48 for other platforms). By using these instead of the inline assembler that most architectures have in their __arch_swabXX() macros, we let the compiler see what's actually happening. The resulting code should be at least as good, and much *better* in the cases where it can be combined with a nearby load or store, using a load-and-byteswap or store-and-byteswap instruction (e.g. lwbrx/stwbrx on PowerPC, movbe on Atom). When GCC is sufficiently recent *and* the architecture opts in to using the intrinsics by setting CONFIG_ARCH_USE_BUILTIN_BSWAP, they will be used in preference to the __arch_swabXX() macros. An architecture which does not set ARCH_USE_BUILTIN_BSWAP will continue to use its own hand-crafted macros. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com> Acked-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r--arch/Kconfig19
-rw-r--r--include/linux/compiler-gcc4.h10
-rw-r--r--include/linux/compiler-intel.h7
-rw-r--r--include/uapi/linux/swab.h12
4 files changed, 45 insertions, 3 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 366ec06a5185..c31416b10586 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -112,6 +112,25 @@ config HAVE_EFFICIENT_UNALIGNED_ACCESS
112 See Documentation/unaligned-memory-access.txt for more 112 See Documentation/unaligned-memory-access.txt for more
113 information on the topic of unaligned memory accesses. 113 information on the topic of unaligned memory accesses.
114 114
115config ARCH_USE_BUILTIN_BSWAP
116 bool
117 help
118 Modern versions of GCC (since 4.4) have builtin functions
119 for handling byte-swapping. Using these, instead of the old
120 inline assembler that the architecture code provides in the
121 __arch_bswapXX() macros, allows the compiler to see what's
122 happening and offers more opportunity for optimisation. In
123 particular, the compiler will be able to combine the byteswap
124 with a nearby load or store and use load-and-swap or
125 store-and-swap instructions if the architecture has them. It
126 should almost *never* result in code which is worse than the
127 hand-coded assembler in <asm/swab.h>. But just in case it
128 does, the use of the builtins is optional.
129
130 Any architecture with load-and-swap or store-and-swap
131 instructions should set this. And it shouldn't hurt to set it
132 on architectures that don't have such instructions.
133
115config HAVE_SYSCALL_WRAPPERS 134config HAVE_SYSCALL_WRAPPERS
116 bool 135 bool
117 136
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index 412bc6c2b023..dc16a858e77c 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -63,3 +63,13 @@
63#define __compiletime_warning(message) __attribute__((warning(message))) 63#define __compiletime_warning(message) __attribute__((warning(message)))
64#define __compiletime_error(message) __attribute__((error(message))) 64#define __compiletime_error(message) __attribute__((error(message)))
65#endif 65#endif
66
67#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
68#if __GNUC_MINOR__ >= 4
69#define __HAVE_BUILTIN_BSWAP32__
70#define __HAVE_BUILTIN_BSWAP64__
71#endif
72#if __GNUC_MINOR__ >= 8 || (defined(__powerpc__) && __GNUC_MINOR__ >= 6)
73#define __HAVE_BUILTIN_BSWAP16__
74#endif
75#endif
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index d8e636e5607d..973ce10c40b6 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -29,3 +29,10 @@
29#endif 29#endif
30 30
31#define uninitialized_var(x) x 31#define uninitialized_var(x) x
32
33#ifndef __HAVE_BUILTIN_BSWAP16__
34/* icc has this, but it's called _bswap16 */
35#define __HAVE_BUILTIN_BSWAP16__
36#define __builtin_bswap16 _bswap16
37#endif
38
diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
index e811474724c2..0e011eb91b5d 100644
--- a/include/uapi/linux/swab.h
+++ b/include/uapi/linux/swab.h
@@ -45,7 +45,9 @@
45 45
46static inline __attribute_const__ __u16 __fswab16(__u16 val) 46static inline __attribute_const__ __u16 __fswab16(__u16 val)
47{ 47{
48#ifdef __arch_swab16 48#ifdef __HAVE_BUILTIN_BSWAP16__
49 return __builtin_bswap16(val);
50#elif defined (__arch_swab16)
49 return __arch_swab16(val); 51 return __arch_swab16(val);
50#else 52#else
51 return ___constant_swab16(val); 53 return ___constant_swab16(val);
@@ -54,7 +56,9 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
54 56
55static inline __attribute_const__ __u32 __fswab32(__u32 val) 57static inline __attribute_const__ __u32 __fswab32(__u32 val)
56{ 58{
57#ifdef __arch_swab32 59#ifdef __HAVE_BUILTIN_BSWAP32__
60 return __builtin_bswap32(val);
61#elif defined(__arch_swab32)
58 return __arch_swab32(val); 62 return __arch_swab32(val);
59#else 63#else
60 return ___constant_swab32(val); 64 return ___constant_swab32(val);
@@ -63,7 +67,9 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
63 67
64static inline __attribute_const__ __u64 __fswab64(__u64 val) 68static inline __attribute_const__ __u64 __fswab64(__u64 val)
65{ 69{
66#ifdef __arch_swab64 70#ifdef __HAVE_BUILTIN_BSWAP64__
71 return __builtin_bswap64(val);
72#elif defined (__arch_swab64)
67 return __arch_swab64(val); 73 return __arch_swab64(val);
68#elif defined(__SWAB_64_THRU_32__) 74#elif defined(__SWAB_64_THRU_32__)
69 __u32 h = val >> 32; 75 __u32 h = val >> 32;