aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/net/bpf_jit_comp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/net/bpf_jit_comp.c')
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c32
1 files changed, 17 insertions, 15 deletions
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index d1916b577f2c..8b2926850125 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -1,8 +1,9 @@
1/* bpf_jit_comp.c: BPF JIT compiler for PPC64 1/* bpf_jit_comp.c: BPF JIT compiler
2 * 2 *
3 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation 3 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
4 * 4 *
5 * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com) 5 * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
6 * Ported to ppc32 by Denis Kirjanov <kda@linux-powerpc.org>
6 * 7 *
7 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
@@ -36,11 +37,11 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
36 if (ctx->seen & SEEN_DATAREF) { 37 if (ctx->seen & SEEN_DATAREF) {
37 /* If we call any helpers (for loads), save LR */ 38 /* If we call any helpers (for loads), save LR */
38 EMIT(PPC_INST_MFLR | __PPC_RT(R0)); 39 EMIT(PPC_INST_MFLR | __PPC_RT(R0));
39 PPC_STD(0, 1, 16); 40 PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
40 41
41 /* Back up non-volatile regs. */ 42 /* Back up non-volatile regs. */
42 PPC_STD(r_D, 1, -(8*(32-r_D))); 43 PPC_BPF_STL(r_D, 1, -(REG_SZ*(32-r_D)));
43 PPC_STD(r_HL, 1, -(8*(32-r_HL))); 44 PPC_BPF_STL(r_HL, 1, -(REG_SZ*(32-r_HL)));
44 } 45 }
45 if (ctx->seen & SEEN_MEM) { 46 if (ctx->seen & SEEN_MEM) {
46 /* 47 /*
@@ -49,11 +50,10 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
49 */ 50 */
50 for (i = r_M; i < (r_M+16); i++) { 51 for (i = r_M; i < (r_M+16); i++) {
51 if (ctx->seen & (1 << (i-r_M))) 52 if (ctx->seen & (1 << (i-r_M)))
52 PPC_STD(i, 1, -(8*(32-i))); 53 PPC_BPF_STL(i, 1, -(REG_SZ*(32-i)));
53 } 54 }
54 } 55 }
55 EMIT(PPC_INST_STDU | __PPC_RS(R1) | __PPC_RA(R1) | 56 PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME);
56 (-BPF_PPC_STACKFRAME & 0xfffc));
57 } 57 }
58 58
59 if (ctx->seen & SEEN_DATAREF) { 59 if (ctx->seen & SEEN_DATAREF) {
@@ -67,7 +67,7 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
67 data_len)); 67 data_len));
68 PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len)); 68 PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
69 PPC_SUB(r_HL, r_HL, r_scratch1); 69 PPC_SUB(r_HL, r_HL, r_scratch1);
70 PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data)); 70 PPC_LL_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
71 } 71 }
72 72
73 if (ctx->seen & SEEN_XREG) { 73 if (ctx->seen & SEEN_XREG) {
@@ -99,16 +99,16 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
99 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) { 99 if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
100 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME); 100 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
101 if (ctx->seen & SEEN_DATAREF) { 101 if (ctx->seen & SEEN_DATAREF) {
102 PPC_LD(0, 1, 16); 102 PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
103 PPC_MTLR(0); 103 PPC_MTLR(0);
104 PPC_LD(r_D, 1, -(8*(32-r_D))); 104 PPC_BPF_LL(r_D, 1, -(REG_SZ*(32-r_D)));
105 PPC_LD(r_HL, 1, -(8*(32-r_HL))); 105 PPC_BPF_LL(r_HL, 1, -(REG_SZ*(32-r_HL)));
106 } 106 }
107 if (ctx->seen & SEEN_MEM) { 107 if (ctx->seen & SEEN_MEM) {
108 /* Restore any saved non-vol registers */ 108 /* Restore any saved non-vol registers */
109 for (i = r_M; i < (r_M+16); i++) { 109 for (i = r_M; i < (r_M+16); i++) {
110 if (ctx->seen & (1 << (i-r_M))) 110 if (ctx->seen & (1 << (i-r_M)))
111 PPC_LD(i, 1, -(8*(32-i))); 111 PPC_BPF_LL(i, 1, -(REG_SZ*(32-i)));
112 } 112 }
113 } 113 }
114 } 114 }
@@ -355,7 +355,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
355 ifindex) != 4); 355 ifindex) != 4);
356 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, 356 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
357 type) != 2); 357 type) != 2);
358 PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, 358 PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
359 dev)); 359 dev));
360 PPC_CMPDI(r_scratch1, 0); 360 PPC_CMPDI(r_scratch1, 0);
361 if (ctx->pc_ret0 != -1) { 361 if (ctx->pc_ret0 != -1) {
@@ -437,7 +437,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
437 common_load: 437 common_load:
438 /* Load from [K]. */ 438 /* Load from [K]. */
439 ctx->seen |= SEEN_DATAREF; 439 ctx->seen |= SEEN_DATAREF;
440 PPC_LI64(r_scratch1, func); 440 PPC_FUNC_ADDR(r_scratch1, func);
441 PPC_MTLR(r_scratch1); 441 PPC_MTLR(r_scratch1);
442 PPC_LI32(r_addr, K); 442 PPC_LI32(r_addr, K);
443 PPC_BLRL(); 443 PPC_BLRL();
@@ -463,7 +463,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
463 * in the helper functions. 463 * in the helper functions.
464 */ 464 */
465 ctx->seen |= SEEN_DATAREF | SEEN_XREG; 465 ctx->seen |= SEEN_DATAREF | SEEN_XREG;
466 PPC_LI64(r_scratch1, func); 466 PPC_FUNC_ADDR(r_scratch1, func);
467 PPC_MTLR(r_scratch1); 467 PPC_MTLR(r_scratch1);
468 PPC_ADDI(r_addr, r_X, IMM_L(K)); 468 PPC_ADDI(r_addr, r_X, IMM_L(K));
469 if (K >= 32768) 469 if (K >= 32768)
@@ -685,9 +685,11 @@ void bpf_jit_compile(struct bpf_prog *fp)
685 685
686 if (image) { 686 if (image) {
687 bpf_flush_icache(code_base, code_base + (proglen/4)); 687 bpf_flush_icache(code_base, code_base + (proglen/4));
688#ifdef CONFIG_PPC64
688 /* Function descriptor nastiness: Address + TOC */ 689 /* Function descriptor nastiness: Address + TOC */
689 ((u64 *)image)[0] = (u64)code_base; 690 ((u64 *)image)[0] = (u64)code_base;
690 ((u64 *)image)[1] = local_paca->kernel_toc; 691 ((u64 *)image)[1] = local_paca->kernel_toc;
692#endif
691 fp->bpf_func = (void *)image; 693 fp->bpf_func = (void *)image;
692 fp->jited = true; 694 fp->jited = true;
693 } 695 }