aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/net
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@plumgrid.com>2014-10-10 23:30:23 -0400
committerDavid S. Miller <davem@davemloft.net>2014-10-14 13:13:14 -0400
commite0ee9c12157dc74e49e4731e0d07512e7d1ceb95 (patch)
treeadd38d7ee6a64ede3ed2e8ae26bb0f2a877b5c25 /arch/x86/net
parentb2532eb9abd88384aa586169b54a3e53574f29f8 (diff)
x86: bpf_jit: fix two bugs in eBPF JIT compiler
1. JIT compiler using multi-pass approach to converge to final image size, since x86 instructions are variable length. It starts with large gaps between instructions (so some jumps may use imm32 instead of imm8) and iterates until total program size is the same as in previous pass. This algorithm works only if program size is strictly decreasing. Programs that use LD_ABS insn need additional code in prologue, but it was not emitted during 1st pass, so there was a chance that 2nd pass would adjust imm32->imm8 jump offsets to the same number of bytes as increase in prologue, which may cause algorithm to erroneously decide that size converged. Fix it by always emitting largest prologue in the first pass which is detected by oldproglen==0 check. Also change error check condition 'proglen != oldproglen' to fail gracefully. 2. while staring at the code realized that 64-byte buffer may not be enough when 1st insn is large, so increase it to 128 to avoid buffer overflow (theoretical maximum size of prologue+div is 109) and add runtime check. Fixes: 622582786c9e ("net: filter: x86: internal BPF JIT") Reported-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Tested-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/x86/net')
-rw-r--r--arch/x86/net/bpf_jit_comp.c25
1 files changed, 19 insertions, 6 deletions
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index d56cd1f515bd..3f627345d51c 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -182,12 +182,17 @@ struct jit_context {
182 bool seen_ld_abs; 182 bool seen_ld_abs;
183}; 183};
184 184
185/* maximum number of bytes emitted while JITing one eBPF insn */
186#define BPF_MAX_INSN_SIZE 128
187#define BPF_INSN_SAFETY 64
188
185static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, 189static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
186 int oldproglen, struct jit_context *ctx) 190 int oldproglen, struct jit_context *ctx)
187{ 191{
188 struct bpf_insn *insn = bpf_prog->insnsi; 192 struct bpf_insn *insn = bpf_prog->insnsi;
189 int insn_cnt = bpf_prog->len; 193 int insn_cnt = bpf_prog->len;
190 u8 temp[64]; 194 bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
195 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
191 int i; 196 int i;
192 int proglen = 0; 197 int proglen = 0;
193 u8 *prog = temp; 198 u8 *prog = temp;
@@ -225,7 +230,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
225 EMIT2(0x31, 0xc0); /* xor eax, eax */ 230 EMIT2(0x31, 0xc0); /* xor eax, eax */
226 EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */ 231 EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */
227 232
228 if (ctx->seen_ld_abs) { 233 if (seen_ld_abs) {
229 /* r9d : skb->len - skb->data_len (headlen) 234 /* r9d : skb->len - skb->data_len (headlen)
230 * r10 : skb->data 235 * r10 : skb->data
231 */ 236 */
@@ -685,7 +690,7 @@ xadd: if (is_imm8(insn->off))
685 case BPF_JMP | BPF_CALL: 690 case BPF_JMP | BPF_CALL:
686 func = (u8 *) __bpf_call_base + imm32; 691 func = (u8 *) __bpf_call_base + imm32;
687 jmp_offset = func - (image + addrs[i]); 692 jmp_offset = func - (image + addrs[i]);
688 if (ctx->seen_ld_abs) { 693 if (seen_ld_abs) {
689 EMIT2(0x41, 0x52); /* push %r10 */ 694 EMIT2(0x41, 0x52); /* push %r10 */
690 EMIT2(0x41, 0x51); /* push %r9 */ 695 EMIT2(0x41, 0x51); /* push %r9 */
691 /* need to adjust jmp offset, since 696 /* need to adjust jmp offset, since
@@ -699,7 +704,7 @@ xadd: if (is_imm8(insn->off))
699 return -EINVAL; 704 return -EINVAL;
700 } 705 }
701 EMIT1_off32(0xE8, jmp_offset); 706 EMIT1_off32(0xE8, jmp_offset);
702 if (ctx->seen_ld_abs) { 707 if (seen_ld_abs) {
703 EMIT2(0x41, 0x59); /* pop %r9 */ 708 EMIT2(0x41, 0x59); /* pop %r9 */
704 EMIT2(0x41, 0x5A); /* pop %r10 */ 709 EMIT2(0x41, 0x5A); /* pop %r10 */
705 } 710 }
@@ -804,7 +809,8 @@ emit_jmp:
804 goto common_load; 809 goto common_load;
805 case BPF_LD | BPF_ABS | BPF_W: 810 case BPF_LD | BPF_ABS | BPF_W:
806 func = CHOOSE_LOAD_FUNC(imm32, sk_load_word); 811 func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
807common_load: ctx->seen_ld_abs = true; 812common_load:
813 ctx->seen_ld_abs = seen_ld_abs = true;
808 jmp_offset = func - (image + addrs[i]); 814 jmp_offset = func - (image + addrs[i]);
809 if (!func || !is_simm32(jmp_offset)) { 815 if (!func || !is_simm32(jmp_offset)) {
810 pr_err("unsupported bpf func %d addr %p image %p\n", 816 pr_err("unsupported bpf func %d addr %p image %p\n",
@@ -878,6 +884,11 @@ common_load: ctx->seen_ld_abs = true;
878 } 884 }
879 885
880 ilen = prog - temp; 886 ilen = prog - temp;
887 if (ilen > BPF_MAX_INSN_SIZE) {
888 pr_err("bpf_jit_compile fatal insn size error\n");
889 return -EFAULT;
890 }
891
881 if (image) { 892 if (image) {
882 if (unlikely(proglen + ilen > oldproglen)) { 893 if (unlikely(proglen + ilen > oldproglen)) {
883 pr_err("bpf_jit_compile fatal error\n"); 894 pr_err("bpf_jit_compile fatal error\n");
@@ -934,9 +945,11 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
934 goto out; 945 goto out;
935 } 946 }
936 if (image) { 947 if (image) {
937 if (proglen != oldproglen) 948 if (proglen != oldproglen) {
938 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", 949 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
939 proglen, oldproglen); 950 proglen, oldproglen);
951 goto out;
952 }
940 break; 953 break;
941 } 954 }
942 if (proglen == oldproglen) { 955 if (proglen == oldproglen) {