aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/net
diff options
context:
space:
mode:
authorJan Seiffert <kaffeemonster@googlemail.com>2012-03-30 01:24:05 -0400
committerDavid S. Miller <davem@davemloft.net>2012-04-03 18:01:41 -0400
commita998d4342337c82dacdc0897d30a9364de1576a1 (patch)
tree301294142ae65e4646281329737fa349190d7565 /arch/x86/net
parentf03fb3f455c6c3a3dfcef6c7f2dcab104c813f4b (diff)
bpf jit: Let the x86 jit handle negative offsets
Now the helper function from filter.c for negative offsets is exported, it can be used it in the jit to handle negative offsets. First modify the asm load helper functions to handle: - know positive offsets - know negative offsets - any offset then the compiler can be modified to explicitly use these helper when appropriate. This fixes the case of a negative X register and allows to lift the restriction that bpf programs with negative offsets can't be jited. Signed-of-by: Jan Seiffert <kaffeemonster@googlemail.com> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/x86/net')
-rw-r--r--arch/x86/net/bpf_jit.S122
-rw-r--r--arch/x86/net/bpf_jit_comp.c41
2 files changed, 115 insertions, 48 deletions
diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
index 66870223f8c5..877b9a1b2152 100644
--- a/arch/x86/net/bpf_jit.S
+++ b/arch/x86/net/bpf_jit.S
@@ -18,17 +18,17 @@
18 * r9d : hlen = skb->len - skb->data_len 18 * r9d : hlen = skb->len - skb->data_len
19 */ 19 */
20#define SKBDATA %r8 20#define SKBDATA %r8
21 21#define SKF_MAX_NEG_OFF $(-0x200000) /* SKF_LL_OFF from filter.h */
22sk_load_word_ind:
23 .globl sk_load_word_ind
24
25 add %ebx,%esi /* offset += X */
26# test %esi,%esi /* if (offset < 0) goto bpf_error; */
27 js bpf_error
28 22
29sk_load_word: 23sk_load_word:
30 .globl sk_load_word 24 .globl sk_load_word
31 25
26 test %esi,%esi
27 js bpf_slow_path_word_neg
28
29sk_load_word_positive_offset:
30 .globl sk_load_word_positive_offset
31
32 mov %r9d,%eax # hlen 32 mov %r9d,%eax # hlen
33 sub %esi,%eax # hlen - offset 33 sub %esi,%eax # hlen - offset
34 cmp $3,%eax 34 cmp $3,%eax
@@ -37,16 +37,15 @@ sk_load_word:
37 bswap %eax /* ntohl() */ 37 bswap %eax /* ntohl() */
38 ret 38 ret
39 39
40
41sk_load_half_ind:
42 .globl sk_load_half_ind
43
44 add %ebx,%esi /* offset += X */
45 js bpf_error
46
47sk_load_half: 40sk_load_half:
48 .globl sk_load_half 41 .globl sk_load_half
49 42
43 test %esi,%esi
44 js bpf_slow_path_half_neg
45
46sk_load_half_positive_offset:
47 .globl sk_load_half_positive_offset
48
50 mov %r9d,%eax 49 mov %r9d,%eax
51 sub %esi,%eax # hlen - offset 50 sub %esi,%eax # hlen - offset
52 cmp $1,%eax 51 cmp $1,%eax
@@ -55,14 +54,15 @@ sk_load_half:
55 rol $8,%ax # ntohs() 54 rol $8,%ax # ntohs()
56 ret 55 ret
57 56
58sk_load_byte_ind:
59 .globl sk_load_byte_ind
60 add %ebx,%esi /* offset += X */
61 js bpf_error
62
63sk_load_byte: 57sk_load_byte:
64 .globl sk_load_byte 58 .globl sk_load_byte
65 59
60 test %esi,%esi
61 js bpf_slow_path_byte_neg
62
63sk_load_byte_positive_offset:
64 .globl sk_load_byte_positive_offset
65
66 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */ 66 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
67 jle bpf_slow_path_byte 67 jle bpf_slow_path_byte
68 movzbl (SKBDATA,%rsi),%eax 68 movzbl (SKBDATA,%rsi),%eax
@@ -73,25 +73,21 @@ sk_load_byte:
73 * 73 *
74 * Implements BPF_S_LDX_B_MSH : ldxb 4*([offset]&0xf) 74 * Implements BPF_S_LDX_B_MSH : ldxb 4*([offset]&0xf)
75 * Must preserve A accumulator (%eax) 75 * Must preserve A accumulator (%eax)
76 * Inputs : %esi is the offset value, already known positive 76 * Inputs : %esi is the offset value
77 */ 77 */
78ENTRY(sk_load_byte_msh) 78sk_load_byte_msh:
79 CFI_STARTPROC 79 .globl sk_load_byte_msh
80 test %esi,%esi
81 js bpf_slow_path_byte_msh_neg
82
83sk_load_byte_msh_positive_offset:
84 .globl sk_load_byte_msh_positive_offset
80 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte_msh */ 85 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte_msh */
81 jle bpf_slow_path_byte_msh 86 jle bpf_slow_path_byte_msh
82 movzbl (SKBDATA,%rsi),%ebx 87 movzbl (SKBDATA,%rsi),%ebx
83 and $15,%bl 88 and $15,%bl
84 shl $2,%bl 89 shl $2,%bl
85 ret 90 ret
86 CFI_ENDPROC
87ENDPROC(sk_load_byte_msh)
88
89bpf_error:
90# force a return 0 from jit handler
91 xor %eax,%eax
92 mov -8(%rbp),%rbx
93 leaveq
94 ret
95 91
96/* rsi contains offset and can be scratched */ 92/* rsi contains offset and can be scratched */
97#define bpf_slow_path_common(LEN) \ 93#define bpf_slow_path_common(LEN) \
@@ -138,3 +134,67 @@ bpf_slow_path_byte_msh:
138 shl $2,%al 134 shl $2,%al
139 xchg %eax,%ebx 135 xchg %eax,%ebx
140 ret 136 ret
137
138#define sk_negative_common(SIZE) \
139 push %rdi; /* save skb */ \
140 push %r9; \
141 push SKBDATA; \
142/* rsi already has offset */ \
143 mov $SIZE,%ecx; /* size */ \
144 call bpf_internal_load_pointer_neg_helper; \
145 test %rax,%rax; \
146 pop SKBDATA; \
147 pop %r9; \
148 pop %rdi; \
149 jz bpf_error
150
151
152bpf_slow_path_word_neg:
153 cmp SKF_MAX_NEG_OFF, %esi /* test range */
154 jl bpf_error /* offset lower -> error */
155sk_load_word_negative_offset:
156 .globl sk_load_word_negative_offset
157 sk_negative_common(4)
158 mov (%rax), %eax
159 bswap %eax
160 ret
161
162bpf_slow_path_half_neg:
163 cmp SKF_MAX_NEG_OFF, %esi
164 jl bpf_error
165sk_load_half_negative_offset:
166 .globl sk_load_half_negative_offset
167 sk_negative_common(2)
168 mov (%rax),%ax
169 rol $8,%ax
170 movzwl %ax,%eax
171 ret
172
173bpf_slow_path_byte_neg:
174 cmp SKF_MAX_NEG_OFF, %esi
175 jl bpf_error
176sk_load_byte_negative_offset:
177 .globl sk_load_byte_negative_offset
178 sk_negative_common(1)
179 movzbl (%rax), %eax
180 ret
181
182bpf_slow_path_byte_msh_neg:
183 cmp SKF_MAX_NEG_OFF, %esi
184 jl bpf_error
185sk_load_byte_msh_negative_offset:
186 .globl sk_load_byte_msh_negative_offset
187 xchg %eax,%ebx /* dont lose A , X is about to be scratched */
188 sk_negative_common(1)
189 movzbl (%rax),%eax
190 and $15,%al
191 shl $2,%al
192 xchg %eax,%ebx
193 ret
194
195bpf_error:
196# force a return 0 from jit handler
197 xor %eax,%eax
198 mov -8(%rbp),%rbx
199 leaveq
200 ret
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 5a5b6e4dd738..0597f95b6da6 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -30,7 +30,10 @@ int bpf_jit_enable __read_mostly;
30 * assembly code in arch/x86/net/bpf_jit.S 30 * assembly code in arch/x86/net/bpf_jit.S
31 */ 31 */
32extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[]; 32extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
33extern u8 sk_load_word_ind[], sk_load_half_ind[], sk_load_byte_ind[]; 33extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
34extern u8 sk_load_byte_positive_offset[], sk_load_byte_msh_positive_offset[];
35extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
36extern u8 sk_load_byte_negative_offset[], sk_load_byte_msh_negative_offset[];
34 37
35static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) 38static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
36{ 39{
@@ -117,6 +120,8 @@ static inline void bpf_flush_icache(void *start, void *end)
117 set_fs(old_fs); 120 set_fs(old_fs);
118} 121}
119 122
123#define CHOOSE_LOAD_FUNC(K, func) \
124 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
120 125
121void bpf_jit_compile(struct sk_filter *fp) 126void bpf_jit_compile(struct sk_filter *fp)
122{ 127{
@@ -473,44 +478,46 @@ void bpf_jit_compile(struct sk_filter *fp)
473#endif 478#endif
474 break; 479 break;
475 case BPF_S_LD_W_ABS: 480 case BPF_S_LD_W_ABS:
476 func = sk_load_word; 481 func = CHOOSE_LOAD_FUNC(K, sk_load_word);
477common_load: seen |= SEEN_DATAREF; 482common_load: seen |= SEEN_DATAREF;
478 if ((int)K < 0) {
479 /* Abort the JIT because __load_pointer() is needed. */
480 goto out;
481 }
482 t_offset = func - (image + addrs[i]); 483 t_offset = func - (image + addrs[i]);
483 EMIT1_off32(0xbe, K); /* mov imm32,%esi */ 484 EMIT1_off32(0xbe, K); /* mov imm32,%esi */
484 EMIT1_off32(0xe8, t_offset); /* call */ 485 EMIT1_off32(0xe8, t_offset); /* call */
485 break; 486 break;
486 case BPF_S_LD_H_ABS: 487 case BPF_S_LD_H_ABS:
487 func = sk_load_half; 488 func = CHOOSE_LOAD_FUNC(K, sk_load_half);
488 goto common_load; 489 goto common_load;
489 case BPF_S_LD_B_ABS: 490 case BPF_S_LD_B_ABS:
490 func = sk_load_byte; 491 func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
491 goto common_load; 492 goto common_load;
492 case BPF_S_LDX_B_MSH: 493 case BPF_S_LDX_B_MSH:
493 if ((int)K < 0) { 494 func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
494 /* Abort the JIT because __load_pointer() is needed. */
495 goto out;
496 }
497 seen |= SEEN_DATAREF | SEEN_XREG; 495 seen |= SEEN_DATAREF | SEEN_XREG;
498 t_offset = sk_load_byte_msh - (image + addrs[i]); 496 t_offset = func - (image + addrs[i]);
499 EMIT1_off32(0xbe, K); /* mov imm32,%esi */ 497 EMIT1_off32(0xbe, K); /* mov imm32,%esi */
500 EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */ 498 EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */
501 break; 499 break;
502 case BPF_S_LD_W_IND: 500 case BPF_S_LD_W_IND:
503 func = sk_load_word_ind; 501 func = sk_load_word;
504common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; 502common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
505 t_offset = func - (image + addrs[i]); 503 t_offset = func - (image + addrs[i]);
506 EMIT1_off32(0xbe, K); /* mov imm32,%esi */ 504 if (K) {
505 if (is_imm8(K)) {
506 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
507 } else {
508 EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
509 EMIT(K, 4);
510 }
511 } else {
512 EMIT2(0x89,0xde); /* mov %ebx,%esi */
513 }
507 EMIT1_off32(0xe8, t_offset); /* call sk_load_xxx_ind */ 514 EMIT1_off32(0xe8, t_offset); /* call sk_load_xxx_ind */
508 break; 515 break;
509 case BPF_S_LD_H_IND: 516 case BPF_S_LD_H_IND:
510 func = sk_load_half_ind; 517 func = sk_load_half;
511 goto common_load_ind; 518 goto common_load_ind;
512 case BPF_S_LD_B_IND: 519 case BPF_S_LD_B_IND:
513 func = sk_load_byte_ind; 520 func = sk_load_byte;
514 goto common_load_ind; 521 goto common_load_ind;
515 case BPF_S_JMP_JA: 522 case BPF_S_JMP_JA:
516 t_offset = addrs[i + K] - addrs[i]; 523 t_offset = addrs[i + K] - addrs[i];