diff options
author | Zi Shen Lim <zlim.lnx@gmail.com> | 2014-12-03 03:38:01 -0500 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2014-12-03 13:04:09 -0500 |
commit | 51c9fbb1b146f3336a93d398c439b6fbfe5ab489 (patch) | |
tree | 2928e70795aaf5fba8d0263ac06b9205d42364d7 /arch/arm64/net | |
parent | e4f88d833bec29b8e6fadc1b2488f0c6370935e1 (diff) |
arm64: bpf: lift restriction on last instruction
Earlier implementation assumed last instruction is BPF_EXIT.
Since this is no longer a restriction in eBPF, we remove this
limitation.
Per Alexei Starovoitov [1]:
> classic BPF has a restriction that last insn is always BPF_RET.
> eBPF doesn't have BPF_RET instruction and this restriction.
> It has BPF_EXIT insn which can appear anywhere in the program
> one or more times and it doesn't have to be last insn.
[1] https://lkml.org/lkml/2014/11/27/2
Fixes: e54bcde3d69d ("arm64: eBPF JIT compiler")
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: Zi Shen Lim <zlim.lnx@gmail.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/net')
-rw-r--r-- | arch/arm64/net/bpf_jit_comp.c | 13 |
1 files changed, 8 insertions, 5 deletions
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 41f1e3e2ea24..edba042b2325 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c | |||
@@ -60,7 +60,7 @@ struct jit_ctx { | |||
60 | const struct bpf_prog *prog; | 60 | const struct bpf_prog *prog; |
61 | int idx; | 61 | int idx; |
62 | int tmp_used; | 62 | int tmp_used; |
63 | int body_offset; | 63 | int epilogue_offset; |
64 | int *offset; | 64 | int *offset; |
65 | u32 *image; | 65 | u32 *image; |
66 | }; | 66 | }; |
@@ -130,8 +130,8 @@ static void jit_fill_hole(void *area, unsigned int size) | |||
130 | 130 | ||
131 | static inline int epilogue_offset(const struct jit_ctx *ctx) | 131 | static inline int epilogue_offset(const struct jit_ctx *ctx) |
132 | { | 132 | { |
133 | int to = ctx->offset[ctx->prog->len - 1]; | 133 | int to = ctx->epilogue_offset; |
134 | int from = ctx->idx - ctx->body_offset; | 134 | int from = ctx->idx; |
135 | 135 | ||
136 | return to - from; | 136 | return to - from; |
137 | } | 137 | } |
@@ -463,6 +463,8 @@ emit_cond_jmp: | |||
463 | } | 463 | } |
464 | /* function return */ | 464 | /* function return */ |
465 | case BPF_JMP | BPF_EXIT: | 465 | case BPF_JMP | BPF_EXIT: |
466 | /* Optimization: when last instruction is EXIT, | ||
467 | simply fallthrough to epilogue. */ | ||
466 | if (i == ctx->prog->len - 1) | 468 | if (i == ctx->prog->len - 1) |
467 | break; | 469 | break; |
468 | jmp_offset = epilogue_offset(ctx); | 470 | jmp_offset = epilogue_offset(ctx); |
@@ -685,11 +687,13 @@ void bpf_int_jit_compile(struct bpf_prog *prog) | |||
685 | 687 | ||
686 | /* 1. Initial fake pass to compute ctx->idx. */ | 688 | /* 1. Initial fake pass to compute ctx->idx. */ |
687 | 689 | ||
688 | /* Fake pass to fill in ctx->offset. */ | 690 | /* Fake pass to fill in ctx->offset and ctx->tmp_used. */ |
689 | if (build_body(&ctx)) | 691 | if (build_body(&ctx)) |
690 | goto out; | 692 | goto out; |
691 | 693 | ||
692 | build_prologue(&ctx); | 694 | build_prologue(&ctx); |
695 | |||
696 | ctx.epilogue_offset = ctx.idx; | ||
693 | build_epilogue(&ctx); | 697 | build_epilogue(&ctx); |
694 | 698 | ||
695 | /* Now we know the actual image size. */ | 699 | /* Now we know the actual image size. */ |
@@ -706,7 +710,6 @@ void bpf_int_jit_compile(struct bpf_prog *prog) | |||
706 | 710 | ||
707 | build_prologue(&ctx); | 711 | build_prologue(&ctx); |
708 | 712 | ||
709 | ctx.body_offset = ctx.idx; | ||
710 | if (build_body(&ctx)) { | 713 | if (build_body(&ctx)) { |
711 | bpf_jit_binary_free(header); | 714 | bpf_jit_binary_free(header); |
712 | goto out; | 715 | goto out; |