aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/net
diff options
context:
space:
mode:
authorJan Seiffert <kaffeemonster@googlemail.com>2012-04-29 15:02:19 -0400
committerDavid S. Miller <davem@davemloft.net>2012-04-30 13:40:50 -0400
commit05be18241e83d2ac6b656c8f924e74b3998c173f (patch)
tree4f0637e3fb5116fa259130b75a5dfdcb3e3335e7 /arch/powerpc/net
parent518fbf9cdf17875d808596afd77fc115a6f942ca (diff)
bpf jit: Let the powerpc jit handle negative offsets
Now the helper function from filter.c for negative offsets is exported, it can be used it in the jit to handle negative offsets. First modify the asm load helper functions to handle: - know positive offsets - know negative offsets - any offset then the compiler can be modified to explicitly use these helper when appropriate. This fixes the case of a negative X register and allows to lift the restriction that bpf programs with negative offsets can't be jited. Tested-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Jan Seiffert <kaffeemonster@googlemail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/powerpc/net')
-rw-r--r--arch/powerpc/net/bpf_jit.h8
-rw-r--r--arch/powerpc/net/bpf_jit_64.S108
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c26
3 files changed, 111 insertions, 31 deletions
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index af1ab5e9a691..5c3cf2d04e41 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -48,7 +48,13 @@
48/* 48/*
49 * Assembly helpers from arch/powerpc/net/bpf_jit.S: 49 * Assembly helpers from arch/powerpc/net/bpf_jit.S:
50 */ 50 */
51extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[]; 51#define DECLARE_LOAD_FUNC(func) \
52 extern u8 func[], func##_negative_offset[], func##_positive_offset[]
53
54DECLARE_LOAD_FUNC(sk_load_word);
55DECLARE_LOAD_FUNC(sk_load_half);
56DECLARE_LOAD_FUNC(sk_load_byte);
57DECLARE_LOAD_FUNC(sk_load_byte_msh);
52 58
53#define FUNCTION_DESCR_SIZE 24 59#define FUNCTION_DESCR_SIZE 24
54 60
diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S
index ff4506e85cce..55ba3855a97f 100644
--- a/arch/powerpc/net/bpf_jit_64.S
+++ b/arch/powerpc/net/bpf_jit_64.S
@@ -31,14 +31,13 @@
31 * then branch directly to slow_path_XXX if required. (In fact, could 31 * then branch directly to slow_path_XXX if required. (In fact, could
32 * load a spare GPR with the address of slow_path_generic and pass size 32 * load a spare GPR with the address of slow_path_generic and pass size
33 * as an argument, making the call site a mtlr, li and bllr.) 33 * as an argument, making the call site a mtlr, li and bllr.)
34 *
35 * Technically, the "is addr < 0" check is unnecessary & slowing down
36 * the ABS path, as it's statically checked on generation.
37 */ 34 */
38 .globl sk_load_word 35 .globl sk_load_word
39sk_load_word: 36sk_load_word:
40 cmpdi r_addr, 0 37 cmpdi r_addr, 0
41 blt bpf_error 38 blt bpf_slow_path_word_neg
39 .globl sk_load_word_positive_offset
40sk_load_word_positive_offset:
42 /* Are we accessing past headlen? */ 41 /* Are we accessing past headlen? */
43 subi r_scratch1, r_HL, 4 42 subi r_scratch1, r_HL, 4
44 cmpd r_scratch1, r_addr 43 cmpd r_scratch1, r_addr
@@ -51,7 +50,9 @@ sk_load_word:
51 .globl sk_load_half 50 .globl sk_load_half
52sk_load_half: 51sk_load_half:
53 cmpdi r_addr, 0 52 cmpdi r_addr, 0
54 blt bpf_error 53 blt bpf_slow_path_half_neg
54 .globl sk_load_half_positive_offset
55sk_load_half_positive_offset:
55 subi r_scratch1, r_HL, 2 56 subi r_scratch1, r_HL, 2
56 cmpd r_scratch1, r_addr 57 cmpd r_scratch1, r_addr
57 blt bpf_slow_path_half 58 blt bpf_slow_path_half
@@ -61,7 +62,9 @@ sk_load_half:
61 .globl sk_load_byte 62 .globl sk_load_byte
62sk_load_byte: 63sk_load_byte:
63 cmpdi r_addr, 0 64 cmpdi r_addr, 0
64 blt bpf_error 65 blt bpf_slow_path_byte_neg
66 .globl sk_load_byte_positive_offset
67sk_load_byte_positive_offset:
65 cmpd r_HL, r_addr 68 cmpd r_HL, r_addr
66 ble bpf_slow_path_byte 69 ble bpf_slow_path_byte
67 lbzx r_A, r_D, r_addr 70 lbzx r_A, r_D, r_addr
@@ -69,22 +72,20 @@ sk_load_byte:
69 72
70/* 73/*
71 * BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf) 74 * BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf)
72 * r_addr is the offset value, already known positive 75 * r_addr is the offset value
73 */ 76 */
74 .globl sk_load_byte_msh 77 .globl sk_load_byte_msh
75sk_load_byte_msh: 78sk_load_byte_msh:
79 cmpdi r_addr, 0
80 blt bpf_slow_path_byte_msh_neg
81 .globl sk_load_byte_msh_positive_offset
82sk_load_byte_msh_positive_offset:
76 cmpd r_HL, r_addr 83 cmpd r_HL, r_addr
77 ble bpf_slow_path_byte_msh 84 ble bpf_slow_path_byte_msh
78 lbzx r_X, r_D, r_addr 85 lbzx r_X, r_D, r_addr
79 rlwinm r_X, r_X, 2, 32-4-2, 31-2 86 rlwinm r_X, r_X, 2, 32-4-2, 31-2
80 blr 87 blr
81 88
82bpf_error:
83 /* Entered with cr0 = lt */
84 li r3, 0
85 /* Generated code will 'blt epilogue', returning 0. */
86 blr
87
88/* Call out to skb_copy_bits: 89/* Call out to skb_copy_bits:
89 * We'll need to back up our volatile regs first; we have 90 * We'll need to back up our volatile regs first; we have
90 * local variable space at r1+(BPF_PPC_STACK_BASIC). 91 * local variable space at r1+(BPF_PPC_STACK_BASIC).
@@ -136,3 +137,84 @@ bpf_slow_path_byte_msh:
136 lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1) 137 lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1)
137 rlwinm r_X, r_X, 2, 32-4-2, 31-2 138 rlwinm r_X, r_X, 2, 32-4-2, 31-2
138 blr 139 blr
140
141/* Call out to bpf_internal_load_pointer_neg_helper:
142 * We'll need to back up our volatile regs first; we have
143 * local variable space at r1+(BPF_PPC_STACK_BASIC).
144 * Allocate a new stack frame here to remain ABI-compliant in
145 * stashing LR.
146 */
147#define sk_negative_common(SIZE) \
148 mflr r0; \
149 std r0, 16(r1); \
150 /* R3 goes in parameter space of caller's frame */ \
151 std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
152 std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
153 std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
154 stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
155 /* R3 = r_skb, as passed */ \
156 mr r4, r_addr; \
157 li r5, SIZE; \
158 bl bpf_internal_load_pointer_neg_helper; \
159 /* R3 != 0 on success */ \
160 addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
161 ld r0, 16(r1); \
162 ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
163 ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
164 mtlr r0; \
165 cmpldi r3, 0; \
166 beq bpf_error_slow; /* cr0 = EQ */ \
167 mr r_addr, r3; \
168 ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
169 /* Great success! */
170
171bpf_slow_path_word_neg:
172 lis r_scratch1,-32 /* SKF_LL_OFF */
173 cmpd r_addr, r_scratch1 /* addr < SKF_* */
174 blt bpf_error /* cr0 = LT */
175 .globl sk_load_word_negative_offset
176sk_load_word_negative_offset:
177 sk_negative_common(4)
178 lwz r_A, 0(r_addr)
179 blr
180
181bpf_slow_path_half_neg:
182 lis r_scratch1,-32 /* SKF_LL_OFF */
183 cmpd r_addr, r_scratch1 /* addr < SKF_* */
184 blt bpf_error /* cr0 = LT */
185 .globl sk_load_half_negative_offset
186sk_load_half_negative_offset:
187 sk_negative_common(2)
188 lhz r_A, 0(r_addr)
189 blr
190
191bpf_slow_path_byte_neg:
192 lis r_scratch1,-32 /* SKF_LL_OFF */
193 cmpd r_addr, r_scratch1 /* addr < SKF_* */
194 blt bpf_error /* cr0 = LT */
195 .globl sk_load_byte_negative_offset
196sk_load_byte_negative_offset:
197 sk_negative_common(1)
198 lbz r_A, 0(r_addr)
199 blr
200
201bpf_slow_path_byte_msh_neg:
202 lis r_scratch1,-32 /* SKF_LL_OFF */
203 cmpd r_addr, r_scratch1 /* addr < SKF_* */
204 blt bpf_error /* cr0 = LT */
205 .globl sk_load_byte_msh_negative_offset
206sk_load_byte_msh_negative_offset:
207 sk_negative_common(1)
208 lbz r_X, 0(r_addr)
209 rlwinm r_X, r_X, 2, 32-4-2, 31-2
210 blr
211
212bpf_error_slow:
213 /* fabricate a cr0 = lt */
214 li r_scratch1, -1
215 cmpdi r_scratch1, 0
216bpf_error:
217 /* Entered with cr0 = lt */
218 li r3, 0
219 /* Generated code will 'blt epilogue', returning 0. */
220 blr
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 73619d3aeb6c..2dc8b1484845 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -127,6 +127,9 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
127 PPC_BLR(); 127 PPC_BLR();
128} 128}
129 129
130#define CHOOSE_LOAD_FUNC(K, func) \
131 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
132
130/* Assemble the body code between the prologue & epilogue. */ 133/* Assemble the body code between the prologue & epilogue. */
131static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, 134static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
132 struct codegen_context *ctx, 135 struct codegen_context *ctx,
@@ -391,21 +394,16 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
391 394
392 /*** Absolute loads from packet header/data ***/ 395 /*** Absolute loads from packet header/data ***/
393 case BPF_S_LD_W_ABS: 396 case BPF_S_LD_W_ABS:
394 func = sk_load_word; 397 func = CHOOSE_LOAD_FUNC(K, sk_load_word);
395 goto common_load; 398 goto common_load;
396 case BPF_S_LD_H_ABS: 399 case BPF_S_LD_H_ABS:
397 func = sk_load_half; 400 func = CHOOSE_LOAD_FUNC(K, sk_load_half);
398 goto common_load; 401 goto common_load;
399 case BPF_S_LD_B_ABS: 402 case BPF_S_LD_B_ABS:
400 func = sk_load_byte; 403 func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
401 common_load: 404 common_load:
402 /* 405 /* Load from [K]. */
403 * Load from [K]. Reference with the (negative)
404 * SKF_NET_OFF/SKF_LL_OFF offsets is unsupported.
405 */
406 ctx->seen |= SEEN_DATAREF; 406 ctx->seen |= SEEN_DATAREF;
407 if ((int)K < 0)
408 return -ENOTSUPP;
409 PPC_LI64(r_scratch1, func); 407 PPC_LI64(r_scratch1, func);
410 PPC_MTLR(r_scratch1); 408 PPC_MTLR(r_scratch1);
411 PPC_LI32(r_addr, K); 409 PPC_LI32(r_addr, K);
@@ -429,7 +427,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
429 common_load_ind: 427 common_load_ind:
430 /* 428 /*
431 * Load from [X + K]. Negative offsets are tested for 429 * Load from [X + K]. Negative offsets are tested for
432 * in the helper functions, and result in a 'ret 0'. 430 * in the helper functions.
433 */ 431 */
434 ctx->seen |= SEEN_DATAREF | SEEN_XREG; 432 ctx->seen |= SEEN_DATAREF | SEEN_XREG;
435 PPC_LI64(r_scratch1, func); 433 PPC_LI64(r_scratch1, func);
@@ -443,13 +441,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
443 break; 441 break;
444 442
445 case BPF_S_LDX_B_MSH: 443 case BPF_S_LDX_B_MSH:
446 /* 444 func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
447 * x86 version drops packet (RET 0) when K<0, whereas
448 * interpreter does allow K<0 (__load_pointer, special
449 * ancillary data). common_load returns ENOTSUPP if K<0,
450 * so we fall back to interpreter & filter works.
451 */
452 func = sk_load_byte_msh;
453 goto common_load; 445 goto common_load;
454 break; 446 break;
455 447