aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNaveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>2016-06-22 12:25:06 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2016-06-24 01:15:51 -0400
commit6ac0ba5a4f82b40b4f6b3a75e7e4f0a15a3d7b9b (patch)
tree5f063709a6d9456683ecf869fc6ed3b8cde405f5
parentcef1e8cdcdb50513e7d3351f536e7e1e3e347827 (diff)
powerpc/bpf/jit: Isolate classic BPF JIT specifics into a separate header
Break out classic BPF JIT specifics into a separate header in preparation for eBPF JIT implementation. Note that ppc32 will still need the classic BPF JIT. Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/net/bpf_jit.h121
-rw-r--r--arch/powerpc/net/bpf_jit32.h139
-rw-r--r--arch/powerpc/net/bpf_jit_asm.S2
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c2
4 files changed, 143 insertions, 121 deletions
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 9041d3fb9231..313cfafde9bb 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -1,4 +1,5 @@
1/* bpf_jit.h: BPF JIT compiler for PPC64 1/*
2 * bpf_jit.h: BPF JIT compiler for PPC
2 * 3 *
3 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation 4 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
4 * 5 *
@@ -10,66 +11,8 @@
10#ifndef _BPF_JIT_H 11#ifndef _BPF_JIT_H
11#define _BPF_JIT_H 12#define _BPF_JIT_H
12 13
13#ifdef CONFIG_PPC64
14#define BPF_PPC_STACK_R3_OFF 48
15#define BPF_PPC_STACK_LOCALS 32
16#define BPF_PPC_STACK_BASIC (48+64)
17#define BPF_PPC_STACK_SAVE (18*8)
18#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
19 BPF_PPC_STACK_SAVE)
20#define BPF_PPC_SLOWPATH_FRAME (48+64)
21#else
22#define BPF_PPC_STACK_R3_OFF 24
23#define BPF_PPC_STACK_LOCALS 16
24#define BPF_PPC_STACK_BASIC (24+32)
25#define BPF_PPC_STACK_SAVE (18*4)
26#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
27 BPF_PPC_STACK_SAVE)
28#define BPF_PPC_SLOWPATH_FRAME (24+32)
29#endif
30
31#define REG_SZ (BITS_PER_LONG/8)
32
33/*
34 * Generated code register usage:
35 *
36 * As normal PPC C ABI (e.g. r1=sp, r2=TOC), with:
37 *
38 * skb r3 (Entry parameter)
39 * A register r4
40 * X register r5
41 * addr param r6
42 * r7-r10 scratch
43 * skb->data r14
44 * skb headlen r15 (skb->len - skb->data_len)
45 * m[0] r16
46 * m[...] ...
47 * m[15] r31
48 */
49#define r_skb 3
50#define r_ret 3
51#define r_A 4
52#define r_X 5
53#define r_addr 6
54#define r_scratch1 7
55#define r_scratch2 8
56#define r_D 14
57#define r_HL 15
58#define r_M 16
59
60#ifndef __ASSEMBLY__ 14#ifndef __ASSEMBLY__
61 15
62/*
63 * Assembly helpers from arch/powerpc/net/bpf_jit.S:
64 */
65#define DECLARE_LOAD_FUNC(func) \
66 extern u8 func[], func##_negative_offset[], func##_positive_offset[]
67
68DECLARE_LOAD_FUNC(sk_load_word);
69DECLARE_LOAD_FUNC(sk_load_half);
70DECLARE_LOAD_FUNC(sk_load_byte);
71DECLARE_LOAD_FUNC(sk_load_byte_msh);
72
73#ifdef CONFIG_PPC64 16#ifdef CONFIG_PPC64
74#define FUNCTION_DESCR_SIZE 24 17#define FUNCTION_DESCR_SIZE 24
75#else 18#else
@@ -131,46 +74,6 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
131#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0) 74#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
132#endif 75#endif
133 76
134/* Convenience helpers for the above with 'far' offsets: */
135#define PPC_LBZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LBZ(r, base, i); \
136 else { PPC_ADDIS(r, base, IMM_HA(i)); \
137 PPC_LBZ(r, r, IMM_L(i)); } } while(0)
138
139#define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \
140 else { PPC_ADDIS(r, base, IMM_HA(i)); \
141 PPC_LD(r, r, IMM_L(i)); } } while(0)
142
143#define PPC_LWZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LWZ(r, base, i); \
144 else { PPC_ADDIS(r, base, IMM_HA(i)); \
145 PPC_LWZ(r, r, IMM_L(i)); } } while(0)
146
147#define PPC_LHZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LHZ(r, base, i); \
148 else { PPC_ADDIS(r, base, IMM_HA(i)); \
149 PPC_LHZ(r, r, IMM_L(i)); } } while(0)
150
151#ifdef CONFIG_PPC64
152#define PPC_LL_OFFS(r, base, i) do { PPC_LD_OFFS(r, base, i); } while(0)
153#else
154#define PPC_LL_OFFS(r, base, i) do { PPC_LWZ_OFFS(r, base, i); } while(0)
155#endif
156
157#ifdef CONFIG_SMP
158#ifdef CONFIG_PPC64
159#define PPC_BPF_LOAD_CPU(r) \
160 do { BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct, paca_index) != 2); \
161 PPC_LHZ_OFFS(r, 13, offsetof(struct paca_struct, paca_index)); \
162 } while (0)
163#else
164#define PPC_BPF_LOAD_CPU(r) \
165 do { BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4); \
166 PPC_LHZ_OFFS(r, (1 & ~(THREAD_SIZE - 1)), \
167 offsetof(struct thread_info, cpu)); \
168 } while(0)
169#endif
170#else
171#define PPC_BPF_LOAD_CPU(r) do { PPC_LI(r, 0); } while(0)
172#endif
173
174#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i)) 77#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i))
175#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i)) 78#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i))
176#define PPC_CMPLWI(a, i) EMIT(PPC_INST_CMPLWI | ___PPC_RA(a) | IMM_L(i)) 79#define PPC_CMPLWI(a, i) EMIT(PPC_INST_CMPLWI | ___PPC_RA(a) | IMM_L(i))
@@ -273,14 +176,6 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
273#define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0) 176#define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0)
274#endif 177#endif
275 178
276#define PPC_LHBRX_OFFS(r, base, i) \
277 do { PPC_LI32(r, i); PPC_LHBRX(r, r, base); } while(0)
278#ifdef __LITTLE_ENDIAN__
279#define PPC_NTOHS_OFFS(r, base, i) PPC_LHBRX_OFFS(r, base, i)
280#else
281#define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i)
282#endif
283
284static inline bool is_nearbranch(int offset) 179static inline bool is_nearbranch(int offset)
285{ 180{
286 return (offset < 32768) && (offset >= -32768); 181 return (offset < 32768) && (offset >= -32768);
@@ -317,18 +212,6 @@ static inline bool is_nearbranch(int offset)
317#define COND_NE (CR0_EQ | COND_CMP_FALSE) 212#define COND_NE (CR0_EQ | COND_CMP_FALSE)
318#define COND_LT (CR0_LT | COND_CMP_TRUE) 213#define COND_LT (CR0_LT | COND_CMP_TRUE)
319 214
320#define SEEN_DATAREF 0x10000 /* might call external helpers */
321#define SEEN_XREG 0x20000 /* X reg is used */
322#define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary
323 * storage */
324#define SEEN_MEM_MSK 0x0ffff
325
326struct codegen_context {
327 unsigned int seen;
328 unsigned int idx;
329 int pc_ret0; /* bpf index of first RET #0 instruction (if any) */
330};
331
332#endif 215#endif
333 216
334#endif 217#endif
diff --git a/arch/powerpc/net/bpf_jit32.h b/arch/powerpc/net/bpf_jit32.h
new file mode 100644
index 000000000000..a8cd7e289ecd
--- /dev/null
+++ b/arch/powerpc/net/bpf_jit32.h
@@ -0,0 +1,139 @@
1/*
2 * bpf_jit32.h: BPF JIT compiler for PPC
3 *
4 * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
5 *
6 * Split from bpf_jit.h
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; version 2
11 * of the License.
12 */
13#ifndef _BPF_JIT32_H
14#define _BPF_JIT32_H
15
16#include "bpf_jit.h"
17
18#ifdef CONFIG_PPC64
19#define BPF_PPC_STACK_R3_OFF 48
20#define BPF_PPC_STACK_LOCALS 32
21#define BPF_PPC_STACK_BASIC (48+64)
22#define BPF_PPC_STACK_SAVE (18*8)
23#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
24 BPF_PPC_STACK_SAVE)
25#define BPF_PPC_SLOWPATH_FRAME (48+64)
26#else
27#define BPF_PPC_STACK_R3_OFF 24
28#define BPF_PPC_STACK_LOCALS 16
29#define BPF_PPC_STACK_BASIC (24+32)
30#define BPF_PPC_STACK_SAVE (18*4)
31#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
32 BPF_PPC_STACK_SAVE)
33#define BPF_PPC_SLOWPATH_FRAME (24+32)
34#endif
35
36#define REG_SZ (BITS_PER_LONG/8)
37
38/*
39 * Generated code register usage:
40 *
41 * As normal PPC C ABI (e.g. r1=sp, r2=TOC), with:
42 *
43 * skb r3 (Entry parameter)
44 * A register r4
45 * X register r5
46 * addr param r6
47 * r7-r10 scratch
48 * skb->data r14
49 * skb headlen r15 (skb->len - skb->data_len)
50 * m[0] r16
51 * m[...] ...
52 * m[15] r31
53 */
54#define r_skb 3
55#define r_ret 3
56#define r_A 4
57#define r_X 5
58#define r_addr 6
59#define r_scratch1 7
60#define r_scratch2 8
61#define r_D 14
62#define r_HL 15
63#define r_M 16
64
65#ifndef __ASSEMBLY__
66
67/*
68 * Assembly helpers from arch/powerpc/net/bpf_jit.S:
69 */
70#define DECLARE_LOAD_FUNC(func) \
71 extern u8 func[], func##_negative_offset[], func##_positive_offset[]
72
73DECLARE_LOAD_FUNC(sk_load_word);
74DECLARE_LOAD_FUNC(sk_load_half);
75DECLARE_LOAD_FUNC(sk_load_byte);
76DECLARE_LOAD_FUNC(sk_load_byte_msh);
77
78#define PPC_LBZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LBZ(r, base, i); \
79 else { PPC_ADDIS(r, base, IMM_HA(i)); \
80 PPC_LBZ(r, r, IMM_L(i)); } } while(0)
81
82#define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \
83 else { PPC_ADDIS(r, base, IMM_HA(i)); \
84 PPC_LD(r, r, IMM_L(i)); } } while(0)
85
86#define PPC_LWZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LWZ(r, base, i); \
87 else { PPC_ADDIS(r, base, IMM_HA(i)); \
88 PPC_LWZ(r, r, IMM_L(i)); } } while(0)
89
90#define PPC_LHZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LHZ(r, base, i); \
91 else { PPC_ADDIS(r, base, IMM_HA(i)); \
92 PPC_LHZ(r, r, IMM_L(i)); } } while(0)
93
94#ifdef CONFIG_PPC64
95#define PPC_LL_OFFS(r, base, i) do { PPC_LD_OFFS(r, base, i); } while(0)
96#else
97#define PPC_LL_OFFS(r, base, i) do { PPC_LWZ_OFFS(r, base, i); } while(0)
98#endif
99
100#ifdef CONFIG_SMP
101#ifdef CONFIG_PPC64
102#define PPC_BPF_LOAD_CPU(r) \
103 do { BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct, paca_index) != 2); \
104 PPC_LHZ_OFFS(r, 13, offsetof(struct paca_struct, paca_index)); \
105 } while (0)
106#else
107#define PPC_BPF_LOAD_CPU(r) \
108 do { BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4); \
109 PPC_LHZ_OFFS(r, (1 & ~(THREAD_SIZE - 1)), \
110 offsetof(struct thread_info, cpu)); \
111 } while(0)
112#endif
113#else
114#define PPC_BPF_LOAD_CPU(r) do { PPC_LI(r, 0); } while(0)
115#endif
116
117#define PPC_LHBRX_OFFS(r, base, i) \
118 do { PPC_LI32(r, i); PPC_LHBRX(r, r, base); } while(0)
119#ifdef __LITTLE_ENDIAN__
120#define PPC_NTOHS_OFFS(r, base, i) PPC_LHBRX_OFFS(r, base, i)
121#else
122#define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i)
123#endif
124
125#define SEEN_DATAREF 0x10000 /* might call external helpers */
126#define SEEN_XREG 0x20000 /* X reg is used */
127#define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary
128 * storage */
129#define SEEN_MEM_MSK 0x0ffff
130
131struct codegen_context {
132 unsigned int seen;
133 unsigned int idx;
134 int pc_ret0; /* bpf index of first RET #0 instruction (if any) */
135};
136
137#endif
138
139#endif
diff --git a/arch/powerpc/net/bpf_jit_asm.S b/arch/powerpc/net/bpf_jit_asm.S
index 8ff5a3b5d1c3..3dd9c43d40c9 100644
--- a/arch/powerpc/net/bpf_jit_asm.S
+++ b/arch/powerpc/net/bpf_jit_asm.S
@@ -10,7 +10,7 @@
10 */ 10 */
11 11
12#include <asm/ppc_asm.h> 12#include <asm/ppc_asm.h>
13#include "bpf_jit.h" 13#include "bpf_jit32.h"
14 14
15/* 15/*
16 * All of these routines are called directly from generated code, 16 * All of these routines are called directly from generated code,
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 6012aac70e2f..7e706f36e364 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -16,7 +16,7 @@
16#include <linux/filter.h> 16#include <linux/filter.h>
17#include <linux/if_vlan.h> 17#include <linux/if_vlan.h>
18 18
19#include "bpf_jit.h" 19#include "bpf_jit32.h"
20 20
21int bpf_jit_enable __read_mostly; 21int bpf_jit_enable __read_mostly;
22 22