diff options
| author | Denys Vlasenko <dvlasenk@redhat.com> | 2016-03-17 17:22:47 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-17 18:09:34 -0400 |
| commit | e3bde9568d992c5f985e6e30731a5f9f9bef7b13 (patch) | |
| tree | 861438397ca7cbedeab03b3c2ba995b1d437a728 /include/linux/unaligned | |
| parent | bc27fb68aaad44dd8f5c34924f05721f0abaeec1 (diff) | |
include/linux/unaligned: force inlining of byteswap operations
Sometimes gcc mysteriously doesn't inline
very small functions we expect to be inlined. See
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66122
With this .config:
http://busybox.net/~vda/kernel_config_OPTIMIZE_INLINING_and_Os,
the following functions get deinlined many times.
Examples of disassembly:
<get_unaligned_be16> (24 copies, 108 calls):
66 8b 07 mov (%rdi),%ax
55 push %rbp
48 89 e5 mov %rsp,%rbp
86 e0 xchg %ah,%al
5d pop %rbp
c3 retq
<get_unaligned_be32> (25 copies, 181 calls):
8b 07 mov (%rdi),%eax
55 push %rbp
48 89 e5 mov %rsp,%rbp
0f c8 bswap %eax
5d pop %rbp
c3 retq
<get_unaligned_be64> (23 copies, 94 calls):
48 8b 07 mov (%rdi),%rax
55 push %rbp
48 89 e5 mov %rsp,%rbp
48 0f c8 bswap %rax
5d pop %rbp
c3 retq
<put_unaligned_be16> (2 copies, 11 calls):
89 f8 mov %edi,%eax
55 push %rbp
c1 ef 08 shr $0x8,%edi
c1 e0 08 shl $0x8,%eax
09 c7 or %eax,%edi
48 89 e5 mov %rsp,%rbp
66 89 3e mov %di,(%rsi)
<put_unaligned_be32> (8 copies, 43 calls):
55 push %rbp
0f cf bswap %edi
89 3e mov %edi,(%rsi)
48 89 e5 mov %rsp,%rbp
5d pop %rbp
c3 retq
<put_unaligned_be64> (26 copies, 157 calls):
55 push %rbp
48 0f cf bswap %rdi
48 89 3e mov %rdi,(%rsi)
48 89 e5 mov %rsp,%rbp
5d pop %rbp
c3 retq
This patch fixes this via s/inline/__always_inline/.
It only affects arches with efficient unaligned access insns, such as x86.
(arched which lack such ops do not include linux/unaligned/access_ok.h)
Code size decrease after the patch is ~8.5k:
text data bss dec hex filename
92197848 20826112 36417536 149441496 8e84bd8 vmlinux
92189231 20826144 36417536 149432911 8e82a4f vmlinux6_unaligned_be_after
Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Graf <tgraf@suug.ch>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/unaligned')
| -rw-r--r-- | include/linux/unaligned/access_ok.h | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h index 99c1b4d20b0f..33383ca23837 100644 --- a/include/linux/unaligned/access_ok.h +++ b/include/linux/unaligned/access_ok.h | |||
| @@ -4,62 +4,62 @@ | |||
| 4 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
| 5 | #include <asm/byteorder.h> | 5 | #include <asm/byteorder.h> |
| 6 | 6 | ||
| 7 | static inline u16 get_unaligned_le16(const void *p) | 7 | static __always_inline u16 get_unaligned_le16(const void *p) |
| 8 | { | 8 | { |
| 9 | return le16_to_cpup((__le16 *)p); | 9 | return le16_to_cpup((__le16 *)p); |
| 10 | } | 10 | } |
| 11 | 11 | ||
| 12 | static inline u32 get_unaligned_le32(const void *p) | 12 | static __always_inline u32 get_unaligned_le32(const void *p) |
| 13 | { | 13 | { |
| 14 | return le32_to_cpup((__le32 *)p); | 14 | return le32_to_cpup((__le32 *)p); |
| 15 | } | 15 | } |
| 16 | 16 | ||
| 17 | static inline u64 get_unaligned_le64(const void *p) | 17 | static __always_inline u64 get_unaligned_le64(const void *p) |
| 18 | { | 18 | { |
| 19 | return le64_to_cpup((__le64 *)p); | 19 | return le64_to_cpup((__le64 *)p); |
| 20 | } | 20 | } |
| 21 | 21 | ||
| 22 | static inline u16 get_unaligned_be16(const void *p) | 22 | static __always_inline u16 get_unaligned_be16(const void *p) |
| 23 | { | 23 | { |
| 24 | return be16_to_cpup((__be16 *)p); | 24 | return be16_to_cpup((__be16 *)p); |
| 25 | } | 25 | } |
| 26 | 26 | ||
| 27 | static inline u32 get_unaligned_be32(const void *p) | 27 | static __always_inline u32 get_unaligned_be32(const void *p) |
| 28 | { | 28 | { |
| 29 | return be32_to_cpup((__be32 *)p); | 29 | return be32_to_cpup((__be32 *)p); |
| 30 | } | 30 | } |
| 31 | 31 | ||
| 32 | static inline u64 get_unaligned_be64(const void *p) | 32 | static __always_inline u64 get_unaligned_be64(const void *p) |
| 33 | { | 33 | { |
| 34 | return be64_to_cpup((__be64 *)p); | 34 | return be64_to_cpup((__be64 *)p); |
| 35 | } | 35 | } |
| 36 | 36 | ||
| 37 | static inline void put_unaligned_le16(u16 val, void *p) | 37 | static __always_inline void put_unaligned_le16(u16 val, void *p) |
| 38 | { | 38 | { |
| 39 | *((__le16 *)p) = cpu_to_le16(val); | 39 | *((__le16 *)p) = cpu_to_le16(val); |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | static inline void put_unaligned_le32(u32 val, void *p) | 42 | static __always_inline void put_unaligned_le32(u32 val, void *p) |
| 43 | { | 43 | { |
| 44 | *((__le32 *)p) = cpu_to_le32(val); | 44 | *((__le32 *)p) = cpu_to_le32(val); |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | static inline void put_unaligned_le64(u64 val, void *p) | 47 | static __always_inline void put_unaligned_le64(u64 val, void *p) |
| 48 | { | 48 | { |
| 49 | *((__le64 *)p) = cpu_to_le64(val); | 49 | *((__le64 *)p) = cpu_to_le64(val); |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | static inline void put_unaligned_be16(u16 val, void *p) | 52 | static __always_inline void put_unaligned_be16(u16 val, void *p) |
| 53 | { | 53 | { |
| 54 | *((__be16 *)p) = cpu_to_be16(val); | 54 | *((__be16 *)p) = cpu_to_be16(val); |
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | static inline void put_unaligned_be32(u32 val, void *p) | 57 | static __always_inline void put_unaligned_be32(u32 val, void *p) |
| 58 | { | 58 | { |
| 59 | *((__be32 *)p) = cpu_to_be32(val); | 59 | *((__be32 *)p) = cpu_to_be32(val); |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | static inline void put_unaligned_be64(u64 val, void *p) | 62 | static __always_inline void put_unaligned_be64(u64 val, void *p) |
| 63 | { | 63 | { |
| 64 | *((__be64 *)p) = cpu_to_be64(val); | 64 | *((__be64 *)p) = cpu_to_be64(val); |
| 65 | } | 65 | } |
