summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorNicolas Schichan <nschichan@freebox.fr>2015-07-21 08:14:13 -0400
committerDavid S. Miller <davem@davemloft.net>2015-07-22 01:19:55 -0400
commit6d715e301e950e3314d590bdbabf0c26e4fed94b (patch)
tree71943addcee05f7947ab107b74467d7637303aab /arch
parent7aed35cb65348fc8b9ce0c2394ff675e5fc750da (diff)
ARM: net: handle negative offsets in BPF JIT.
Previously, the JIT would reject negative offsets known during code generation and mishandle negative offsets provided at runtime. Fix that by calling bpf_internal_load_pointer_neg_helper() appropriately in the jit_get_skb_{b,h,w} slow path helpers and by forcing the execution flow to the slow path helpers when the offset is negative. Signed-off-by: Nicolas Schichan <nschichan@freebox.fr> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/net/bpf_jit_32.c47
1 files changed, 38 insertions, 9 deletions
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 21f5ace156fd..d9b25242f743 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -74,32 +74,52 @@ struct jit_ctx {
74 74
75int bpf_jit_enable __read_mostly; 75int bpf_jit_enable __read_mostly;
76 76
77static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset) 77static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
78 unsigned int size)
79{
80 void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
81
82 if (!ptr)
83 return -EFAULT;
84 memcpy(ret, ptr, size);
85 return 0;
86}
87
88static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
78{ 89{
79 u8 ret; 90 u8 ret;
80 int err; 91 int err;
81 92
82 err = skb_copy_bits(skb, offset, &ret, 1); 93 if (offset < 0)
94 err = call_neg_helper(skb, offset, &ret, 1);
95 else
96 err = skb_copy_bits(skb, offset, &ret, 1);
83 97
84 return (u64)err << 32 | ret; 98 return (u64)err << 32 | ret;
85} 99}
86 100
87static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset) 101static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
88{ 102{
89 u16 ret; 103 u16 ret;
90 int err; 104 int err;
91 105
92 err = skb_copy_bits(skb, offset, &ret, 2); 106 if (offset < 0)
107 err = call_neg_helper(skb, offset, &ret, 2);
108 else
109 err = skb_copy_bits(skb, offset, &ret, 2);
93 110
94 return (u64)err << 32 | ntohs(ret); 111 return (u64)err << 32 | ntohs(ret);
95} 112}
96 113
97static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset) 114static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
98{ 115{
99 u32 ret; 116 u32 ret;
100 int err; 117 int err;
101 118
102 err = skb_copy_bits(skb, offset, &ret, 4); 119 if (offset < 0)
120 err = call_neg_helper(skb, offset, &ret, 4);
121 else
122 err = skb_copy_bits(skb, offset, &ret, 4);
103 123
104 return (u64)err << 32 | ntohl(ret); 124 return (u64)err << 32 | ntohl(ret);
105} 125}
@@ -536,9 +556,6 @@ static int build_body(struct jit_ctx *ctx)
536 case BPF_LD | BPF_B | BPF_ABS: 556 case BPF_LD | BPF_B | BPF_ABS:
537 load_order = 0; 557 load_order = 0;
538load: 558load:
539 /* the interpreter will deal with the negative K */
540 if ((int)k < 0)
541 return -ENOTSUPP;
542 emit_mov_i(r_off, k, ctx); 559 emit_mov_i(r_off, k, ctx);
543load_common: 560load_common:
544 ctx->seen |= SEEN_DATA | SEEN_CALL; 561 ctx->seen |= SEEN_DATA | SEEN_CALL;
@@ -553,6 +570,18 @@ load_common:
553 condt = ARM_COND_HI; 570 condt = ARM_COND_HI;
554 } 571 }
555 572
573 /*
574 * test for negative offset, only if we are
575 * currently scheduled to take the fast
576 * path. this will update the flags so that
577 * the slowpath instruction are ignored if the
578 * offset is negative.
579 *
580 * for loard_order == 0 the HI condition will
581 * make loads at offset 0 take the slow path too.
582 */
583 _emit(condt, ARM_CMP_I(r_off, 0), ctx);
584
556 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data), 585 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
557 ctx); 586 ctx);
558 587