aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorNicolas Schichan <nschichan@freebox.fr>2015-05-07 11:14:21 -0400
committerDavid S. Miller <davem@davemloft.net>2015-05-10 19:21:49 -0400
commit0b59d8806a31bb0267b3a461e8fef20c727bdbf6 (patch)
treeb10da6cabb48be6b718189c7217e24fecb051b4a /arch
parent19fc99d0c6ba7d9b65456496b5bb2169d5f74cd0 (diff)
ARM: net: delegate filter to kernel interpreter when imm_offset() return value can't fit into 12bits.
The ARM JIT code emits "ldr rX, [pc, #offset]" to access the literal pool. #offset maximum value is 4095 and if the generated code is too large, the #offset value can overflow and not point to the expected slot in the literal pool. Additionally, when overflow occurs, bits of the overflow can end up changing the destination register of the ldr instruction. Fix that by detecting the overflow in imm_offset() and setting a flag that is checked for each BPF instructions converted in build_body(). As of now it can only be detected in the second pass. As a result the second build_body() call can now fail, so add the corresponding cleanup code in that case. Using multiple literal pools in the JITed code is going to require lots of intrusive changes to the JIT code (which would better be done as a feature instead of fix), just delegating to the kernel BPF interpreter in that case is a more straight forward, minimal fix and easy to backport. Fixes: ddecdfcea0ae ("ARM: 7259/3: net: JIT compiler for packet filters") Signed-off-by: Nicolas Schichan <nschichan@freebox.fr> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/net/bpf_jit_32.c27
1 files changed, 26 insertions, 1 deletions
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index f412b53ed268..e0e23582c8b4 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -54,6 +54,7 @@
54#define SEEN_DATA (1 << (BPF_MEMWORDS + 3)) 54#define SEEN_DATA (1 << (BPF_MEMWORDS + 3))
55 55
56#define FLAG_NEED_X_RESET (1 << 0) 56#define FLAG_NEED_X_RESET (1 << 0)
57#define FLAG_IMM_OVERFLOW (1 << 1)
57 58
58struct jit_ctx { 59struct jit_ctx {
59 const struct bpf_prog *skf; 60 const struct bpf_prog *skf;
@@ -293,6 +294,15 @@ static u16 imm_offset(u32 k, struct jit_ctx *ctx)
293 /* PC in ARM mode == address of the instruction + 8 */ 294 /* PC in ARM mode == address of the instruction + 8 */
294 imm = offset - (8 + ctx->idx * 4); 295 imm = offset - (8 + ctx->idx * 4);
295 296
297 if (imm & ~0xfff) {
298 /*
299 * literal pool is too far, signal it into flags. we
300 * can only detect it on the second pass unfortunately.
301 */
302 ctx->flags |= FLAG_IMM_OVERFLOW;
303 return 0;
304 }
305
296 return imm; 306 return imm;
297} 307}
298 308
@@ -866,6 +876,14 @@ b_epilogue:
866 default: 876 default:
867 return -1; 877 return -1;
868 } 878 }
879
880 if (ctx->flags & FLAG_IMM_OVERFLOW)
881 /*
882 * this instruction generated an overflow when
883 * trying to access the literal pool, so
884 * delegate this filter to the kernel interpreter.
885 */
886 return -1;
869 } 887 }
870 888
871 /* compute offsets only during the first pass */ 889 /* compute offsets only during the first pass */
@@ -928,7 +946,14 @@ void bpf_jit_compile(struct bpf_prog *fp)
928 ctx.idx = 0; 946 ctx.idx = 0;
929 947
930 build_prologue(&ctx); 948 build_prologue(&ctx);
931 build_body(&ctx); 949 if (build_body(&ctx) < 0) {
950#if __LINUX_ARM_ARCH__ < 7
951 if (ctx.imm_count)
952 kfree(ctx.imms);
953#endif
954 bpf_jit_binary_free(header);
955 goto out;
956 }
932 build_epilogue(&ctx); 957 build_epilogue(&ctx);
933 958
934 flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx)); 959 flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));