aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/net/bpf_jit_comp.c
diff options
context:
space:
mode:
authorJoe Perches <joe@perches.com>2014-12-04 20:01:24 -0500
committerDavid S. Miller <davem@davemloft.net>2014-12-09 14:56:41 -0500
commit5cccc702fd54e5c3dc5ee16a129770aae79ae60b (patch)
tree09803a622327d359d5b3c279b9886f5ce2809422 /arch/x86/net/bpf_jit_comp.c
parentd148134be51fe05271ec8d47fe8c815bdee2b8e7 (diff)
x86: bpf_jit_comp: Remove inline from static function definitions
Let the compiler decide instead. No change in object size x86-64 -O2 no profiling Signed-off-by: Joe Perches <joe@perches.com> Suggested-by: Eric Dumazet <eric.dumazet@gmail.com> Acked-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/x86/net/bpf_jit_comp.c')
-rw-r--r--arch/x86/net/bpf_jit_comp.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 09e2ceaf13cb..626e01377a01 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -24,7 +24,7 @@ extern u8 sk_load_byte_positive_offset[];
24extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[]; 24extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
25extern u8 sk_load_byte_negative_offset[]; 25extern u8 sk_load_byte_negative_offset[];
26 26
27static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) 27static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
28{ 28{
29 if (len == 1) 29 if (len == 1)
30 *ptr = bytes; 30 *ptr = bytes;
@@ -52,12 +52,12 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
52#define EMIT4_off32(b1, b2, b3, b4, off) \ 52#define EMIT4_off32(b1, b2, b3, b4, off) \
53 do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) 53 do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
54 54
55static inline bool is_imm8(int value) 55static bool is_imm8(int value)
56{ 56{
57 return value <= 127 && value >= -128; 57 return value <= 127 && value >= -128;
58} 58}
59 59
60static inline bool is_simm32(s64 value) 60static bool is_simm32(s64 value)
61{ 61{
62 return value == (s64) (s32) value; 62 return value == (s64) (s32) value;
63} 63}
@@ -94,7 +94,7 @@ static int bpf_size_to_x86_bytes(int bpf_size)
94#define X86_JGE 0x7D 94#define X86_JGE 0x7D
95#define X86_JG 0x7F 95#define X86_JG 0x7F
96 96
97static inline void bpf_flush_icache(void *start, void *end) 97static void bpf_flush_icache(void *start, void *end)
98{ 98{
99 mm_segment_t old_fs = get_fs(); 99 mm_segment_t old_fs = get_fs();
100 100
@@ -133,7 +133,7 @@ static const int reg2hex[] = {
133 * which need extra byte of encoding. 133 * which need extra byte of encoding.
134 * rax,rcx,...,rbp have simpler encoding 134 * rax,rcx,...,rbp have simpler encoding
135 */ 135 */
136static inline bool is_ereg(u32 reg) 136static bool is_ereg(u32 reg)
137{ 137{
138 return (1 << reg) & (BIT(BPF_REG_5) | 138 return (1 << reg) & (BIT(BPF_REG_5) |
139 BIT(AUX_REG) | 139 BIT(AUX_REG) |
@@ -143,14 +143,14 @@ static inline bool is_ereg(u32 reg)
143} 143}
144 144
145/* add modifiers if 'reg' maps to x64 registers r8..r15 */ 145/* add modifiers if 'reg' maps to x64 registers r8..r15 */
146static inline u8 add_1mod(u8 byte, u32 reg) 146static u8 add_1mod(u8 byte, u32 reg)
147{ 147{
148 if (is_ereg(reg)) 148 if (is_ereg(reg))
149 byte |= 1; 149 byte |= 1;
150 return byte; 150 return byte;
151} 151}
152 152
153static inline u8 add_2mod(u8 byte, u32 r1, u32 r2) 153static u8 add_2mod(u8 byte, u32 r1, u32 r2)
154{ 154{
155 if (is_ereg(r1)) 155 if (is_ereg(r1))
156 byte |= 1; 156 byte |= 1;
@@ -160,13 +160,13 @@ static inline u8 add_2mod(u8 byte, u32 r1, u32 r2)
160} 160}
161 161
162/* encode 'dst_reg' register into x64 opcode 'byte' */ 162/* encode 'dst_reg' register into x64 opcode 'byte' */
163static inline u8 add_1reg(u8 byte, u32 dst_reg) 163static u8 add_1reg(u8 byte, u32 dst_reg)
164{ 164{
165 return byte + reg2hex[dst_reg]; 165 return byte + reg2hex[dst_reg];
166} 166}
167 167
168/* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */ 168/* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
169static inline u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) 169static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
170{ 170{
171 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); 171 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
172} 172}