aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/net
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2012-01-18 02:21:42 -0500
committerDavid S. Miller <davem@davemloft.net>2012-01-18 16:04:26 -0500
commitd00a9dd21bdf7908b70866794c8313ee8a5abd5c (patch)
tree3c9c422ba64d1bb6ffdadeb71e02726126c870c6 /arch/x86/net
parent1c659a4475ec2b2f4495e4773c417a9100cbd9de (diff)
net: bpf_jit: fix divide by 0 generation
Several problems fixed in this patch : 1) Target of the conditional jump in case a divide by 0 is performed by a bpf is wrong. 2) Must 'generate' the full function prologue/epilogue at pass=0, or else we can stop too early in pass=1 if the proglen doesnt change. (if the increase of prologue/epilogue equals decrease of all instructions length because some jumps are converted to near jumps) 3) Change the wrong length detection at the end of code generation to issue a more explicit message, no need for a full stack trace. Reported-by: Phil Oester <kernel@linuxace.com> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/x86/net')
-rw-r--r--arch/x86/net/bpf_jit_comp.c36
1 files changed, 22 insertions, 14 deletions
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 7b65f752c5f8..7c1b765ecc59 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -151,17 +151,18 @@ void bpf_jit_compile(struct sk_filter *fp)
151 cleanup_addr = proglen; /* epilogue address */ 151 cleanup_addr = proglen; /* epilogue address */
152 152
153 for (pass = 0; pass < 10; pass++) { 153 for (pass = 0; pass < 10; pass++) {
154 u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
154 /* no prologue/epilogue for trivial filters (RET something) */ 155 /* no prologue/epilogue for trivial filters (RET something) */
155 proglen = 0; 156 proglen = 0;
156 prog = temp; 157 prog = temp;
157 158
158 if (seen) { 159 if (seen_or_pass0) {
159 EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */ 160 EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
160 EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */ 161 EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */
161 /* note : must save %rbx in case bpf_error is hit */ 162 /* note : must save %rbx in case bpf_error is hit */
162 if (seen & (SEEN_XREG | SEEN_DATAREF)) 163 if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF))
163 EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */ 164 EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */
164 if (seen & SEEN_XREG) 165 if (seen_or_pass0 & SEEN_XREG)
165 CLEAR_X(); /* make sure we dont leek kernel memory */ 166 CLEAR_X(); /* make sure we dont leek kernel memory */
166 167
167 /* 168 /*
@@ -170,7 +171,7 @@ void bpf_jit_compile(struct sk_filter *fp)
170 * r9 = skb->len - skb->data_len 171 * r9 = skb->len - skb->data_len
171 * r8 = skb->data 172 * r8 = skb->data
172 */ 173 */
173 if (seen & SEEN_DATAREF) { 174 if (seen_or_pass0 & SEEN_DATAREF) {
174 if (offsetof(struct sk_buff, len) <= 127) 175 if (offsetof(struct sk_buff, len) <= 127)
175 /* mov off8(%rdi),%r9d */ 176 /* mov off8(%rdi),%r9d */
176 EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len)); 177 EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
@@ -260,9 +261,14 @@ void bpf_jit_compile(struct sk_filter *fp)
260 case BPF_S_ALU_DIV_X: /* A /= X; */ 261 case BPF_S_ALU_DIV_X: /* A /= X; */
261 seen |= SEEN_XREG; 262 seen |= SEEN_XREG;
262 EMIT2(0x85, 0xdb); /* test %ebx,%ebx */ 263 EMIT2(0x85, 0xdb); /* test %ebx,%ebx */
263 if (pc_ret0 != -1) 264 if (pc_ret0 > 0) {
264 EMIT_COND_JMP(X86_JE, addrs[pc_ret0] - (addrs[i] - 4)); 265 /* addrs[pc_ret0 - 1] is start address of target
265 else { 266 * (addrs[i] - 4) is the address following this jmp
267 * ("xor %edx,%edx; div %ebx" being 4 bytes long)
268 */
269 EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
270 (addrs[i] - 4));
271 } else {
266 EMIT_COND_JMP(X86_JNE, 2 + 5); 272 EMIT_COND_JMP(X86_JNE, 2 + 5);
267 CLEAR_A(); 273 CLEAR_A();
268 EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */ 274 EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
@@ -335,12 +341,12 @@ void bpf_jit_compile(struct sk_filter *fp)
335 } 341 }
336 /* fallinto */ 342 /* fallinto */
337 case BPF_S_RET_A: 343 case BPF_S_RET_A:
338 if (seen) { 344 if (seen_or_pass0) {
339 if (i != flen - 1) { 345 if (i != flen - 1) {
340 EMIT_JMP(cleanup_addr - addrs[i]); 346 EMIT_JMP(cleanup_addr - addrs[i]);
341 break; 347 break;
342 } 348 }
343 if (seen & SEEN_XREG) 349 if (seen_or_pass0 & SEEN_XREG)
344 EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */ 350 EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */
345 EMIT1(0xc9); /* leaveq */ 351 EMIT1(0xc9); /* leaveq */
346 } 352 }
@@ -483,8 +489,9 @@ common_load: seen |= SEEN_DATAREF;
483 goto common_load; 489 goto common_load;
484 case BPF_S_LDX_B_MSH: 490 case BPF_S_LDX_B_MSH:
485 if ((int)K < 0) { 491 if ((int)K < 0) {
486 if (pc_ret0 != -1) { 492 if (pc_ret0 > 0) {
487 EMIT_JMP(addrs[pc_ret0] - addrs[i]); 493 /* addrs[pc_ret0 - 1] is the start address */
494 EMIT_JMP(addrs[pc_ret0 - 1] - addrs[i]);
488 break; 495 break;
489 } 496 }
490 CLEAR_A(); 497 CLEAR_A();
@@ -599,13 +606,14 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
599 * use it to give the cleanup instruction(s) addr 606 * use it to give the cleanup instruction(s) addr
600 */ 607 */
601 cleanup_addr = proglen - 1; /* ret */ 608 cleanup_addr = proglen - 1; /* ret */
602 if (seen) 609 if (seen_or_pass0)
603 cleanup_addr -= 1; /* leaveq */ 610 cleanup_addr -= 1; /* leaveq */
604 if (seen & SEEN_XREG) 611 if (seen_or_pass0 & SEEN_XREG)
605 cleanup_addr -= 4; /* mov -8(%rbp),%rbx */ 612 cleanup_addr -= 4; /* mov -8(%rbp),%rbx */
606 613
607 if (image) { 614 if (image) {
608 WARN_ON(proglen != oldproglen); 615 if (proglen != oldproglen)
616 pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen);
609 break; 617 break;
610 } 618 }
611 if (proglen == oldproglen) { 619 if (proglen == oldproglen) {