diff options
author | Denis Kirjanov <kda@linux-powerpc.org> | 2015-02-17 02:04:40 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-02-20 15:19:43 -0500 |
commit | 09ca5ab23eca61a6f79076d38ab5a17da07533dc (patch) | |
tree | 323540b03afe992e403155629726547cb76f5411 | |
parent | 693930d69c67145dcdf512fe863dbb1095b744b9 (diff) |
ppc: bpf: update jit to use compatibility macros
Use helpers from the asm-compat.h to wrap up assembly mnemonics
Signed-off-by: Denis Kirjanov <kda@linux-powerpc.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | arch/powerpc/net/bpf_jit.h | 47 | ||||
-rw-r--r-- | arch/powerpc/net/bpf_jit_64.S | 70 | ||||
-rw-r--r-- | arch/powerpc/net/bpf_jit_comp.c | 32 |
3 files changed, 98 insertions, 51 deletions
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index c406aa95b2bc..2d5e71577210 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h | |||
@@ -10,12 +10,25 @@ | |||
10 | #ifndef _BPF_JIT_H | 10 | #ifndef _BPF_JIT_H |
11 | #define _BPF_JIT_H | 11 | #define _BPF_JIT_H |
12 | 12 | ||
13 | #ifdef CONFIG_PPC64 | ||
14 | #define BPF_PPC_STACK_R3_OFF 48 | ||
13 | #define BPF_PPC_STACK_LOCALS 32 | 15 | #define BPF_PPC_STACK_LOCALS 32 |
14 | #define BPF_PPC_STACK_BASIC (48+64) | 16 | #define BPF_PPC_STACK_BASIC (48+64) |
15 | #define BPF_PPC_STACK_SAVE (18*8) | 17 | #define BPF_PPC_STACK_SAVE (18*8) |
16 | #define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \ | 18 | #define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \ |
17 | BPF_PPC_STACK_SAVE) | 19 | BPF_PPC_STACK_SAVE) |
18 | #define BPF_PPC_SLOWPATH_FRAME (48+64) | 20 | #define BPF_PPC_SLOWPATH_FRAME (48+64) |
21 | #else | ||
22 | #define BPF_PPC_STACK_R3_OFF 24 | ||
23 | #define BPF_PPC_STACK_LOCALS 16 | ||
24 | #define BPF_PPC_STACK_BASIC (24+32) | ||
25 | #define BPF_PPC_STACK_SAVE (18*4) | ||
26 | #define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \ | ||
27 | BPF_PPC_STACK_SAVE) | ||
28 | #define BPF_PPC_SLOWPATH_FRAME (24+32) | ||
29 | #endif | ||
30 | |||
31 | #define REG_SZ (BITS_PER_LONG/8) | ||
19 | 32 | ||
20 | /* | 33 | /* |
21 | * Generated code register usage: | 34 | * Generated code register usage: |
@@ -57,7 +70,11 @@ DECLARE_LOAD_FUNC(sk_load_half); | |||
57 | DECLARE_LOAD_FUNC(sk_load_byte); | 70 | DECLARE_LOAD_FUNC(sk_load_byte); |
58 | DECLARE_LOAD_FUNC(sk_load_byte_msh); | 71 | DECLARE_LOAD_FUNC(sk_load_byte_msh); |
59 | 72 | ||
73 | #ifdef CONFIG_PPC64 | ||
60 | #define FUNCTION_DESCR_SIZE 24 | 74 | #define FUNCTION_DESCR_SIZE 24 |
75 | #else | ||
76 | #define FUNCTION_DESCR_SIZE 0 | ||
77 | #endif | ||
61 | 78 | ||
62 | /* | 79 | /* |
63 | * 16-bit immediate helper macros: HA() is for use with sign-extending instrs | 80 | * 16-bit immediate helper macros: HA() is for use with sign-extending instrs |
@@ -86,7 +103,12 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); | |||
86 | #define PPC_LIS(r, i) PPC_ADDIS(r, 0, i) | 103 | #define PPC_LIS(r, i) PPC_ADDIS(r, 0, i) |
87 | #define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \ | 104 | #define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \ |
88 | ___PPC_RA(base) | ((i) & 0xfffc)) | 105 | ___PPC_RA(base) | ((i) & 0xfffc)) |
89 | 106 | #define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \ | |
107 | ___PPC_RA(base) | ((i) & 0xfffc)) | ||
108 | #define PPC_STW(r, base, i) EMIT(PPC_INST_STW | ___PPC_RS(r) | \ | ||
109 | ___PPC_RA(base) | ((i) & 0xfffc)) | ||
110 | #define PPC_STWU(r, base, i) EMIT(PPC_INST_STWU | ___PPC_RS(r) | \ | ||
111 | ___PPC_RA(base) | ((i) & 0xfffc)) | ||
90 | 112 | ||
91 | #define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \ | 113 | #define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \ |
92 | ___PPC_RA(base) | IMM_L(i)) | 114 | ___PPC_RA(base) | IMM_L(i)) |
@@ -98,6 +120,17 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); | |||
98 | ___PPC_RA(base) | IMM_L(i)) | 120 | ___PPC_RA(base) | IMM_L(i)) |
99 | #define PPC_LHBRX(r, base, b) EMIT(PPC_INST_LHBRX | ___PPC_RT(r) | \ | 121 | #define PPC_LHBRX(r, base, b) EMIT(PPC_INST_LHBRX | ___PPC_RT(r) | \ |
100 | ___PPC_RA(base) | ___PPC_RB(b)) | 122 | ___PPC_RA(base) | ___PPC_RB(b)) |
123 | |||
124 | #ifdef CONFIG_PPC64 | ||
125 | #define PPC_BPF_LL(r, base, i) do { PPC_LD(r, base, i); } while(0) | ||
126 | #define PPC_BPF_STL(r, base, i) do { PPC_STD(r, base, i); } while(0) | ||
127 | #define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0) | ||
128 | #else | ||
129 | #define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0) | ||
130 | #define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0) | ||
131 | #define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0) | ||
132 | #endif | ||
133 | |||
101 | /* Convenience helpers for the above with 'far' offsets: */ | 134 | /* Convenience helpers for the above with 'far' offsets: */ |
102 | #define PPC_LBZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LBZ(r, base, i); \ | 135 | #define PPC_LBZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LBZ(r, base, i); \ |
103 | else { PPC_ADDIS(r, base, IMM_HA(i)); \ | 136 | else { PPC_ADDIS(r, base, IMM_HA(i)); \ |
@@ -115,6 +148,12 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); | |||
115 | else { PPC_ADDIS(r, base, IMM_HA(i)); \ | 148 | else { PPC_ADDIS(r, base, IMM_HA(i)); \ |
116 | PPC_LHZ(r, r, IMM_L(i)); } } while(0) | 149 | PPC_LHZ(r, r, IMM_L(i)); } } while(0) |
117 | 150 | ||
151 | #ifdef CONFIG_PPC64 | ||
152 | #define PPC_LL_OFFS(r, base, i) do { PPC_LD_OFFS(r, base, i); } while(0) | ||
153 | #else | ||
154 | #define PPC_LL_OFFS(r, base, i) do { PPC_LWZ_OFFS(r, base, i); } while(0) | ||
155 | #endif | ||
156 | |||
118 | #define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i)) | 157 | #define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i)) |
119 | #define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i)) | 158 | #define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i)) |
120 | #define PPC_CMPLWI(a, i) EMIT(PPC_INST_CMPLWI | ___PPC_RA(a) | IMM_L(i)) | 159 | #define PPC_CMPLWI(a, i) EMIT(PPC_INST_CMPLWI | ___PPC_RA(a) | IMM_L(i)) |
@@ -196,6 +235,12 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh); | |||
196 | PPC_ORI(d, d, (uintptr_t)(i) & 0xffff); \ | 235 | PPC_ORI(d, d, (uintptr_t)(i) & 0xffff); \ |
197 | } } while (0); | 236 | } } while (0); |
198 | 237 | ||
238 | #ifdef CONFIG_PPC64 | ||
239 | #define PPC_FUNC_ADDR(d,i) do { PPC_LI64(d, i); } while(0) | ||
240 | #else | ||
241 | #define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0) | ||
242 | #endif | ||
243 | |||
199 | #define PPC_LHBRX_OFFS(r, base, i) \ | 244 | #define PPC_LHBRX_OFFS(r, base, i) \ |
200 | do { PPC_LI32(r, i); PPC_LHBRX(r, r, base); } while(0) | 245 | do { PPC_LI32(r, i); PPC_LHBRX(r, r, base); } while(0) |
201 | #ifdef __LITTLE_ENDIAN__ | 246 | #ifdef __LITTLE_ENDIAN__ |
diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S index 8f87d9217122..8ff5a3b5d1c3 100644 --- a/arch/powerpc/net/bpf_jit_64.S +++ b/arch/powerpc/net/bpf_jit_64.S | |||
@@ -34,13 +34,13 @@ | |||
34 | */ | 34 | */ |
35 | .globl sk_load_word | 35 | .globl sk_load_word |
36 | sk_load_word: | 36 | sk_load_word: |
37 | cmpdi r_addr, 0 | 37 | PPC_LCMPI r_addr, 0 |
38 | blt bpf_slow_path_word_neg | 38 | blt bpf_slow_path_word_neg |
39 | .globl sk_load_word_positive_offset | 39 | .globl sk_load_word_positive_offset |
40 | sk_load_word_positive_offset: | 40 | sk_load_word_positive_offset: |
41 | /* Are we accessing past headlen? */ | 41 | /* Are we accessing past headlen? */ |
42 | subi r_scratch1, r_HL, 4 | 42 | subi r_scratch1, r_HL, 4 |
43 | cmpd r_scratch1, r_addr | 43 | PPC_LCMP r_scratch1, r_addr |
44 | blt bpf_slow_path_word | 44 | blt bpf_slow_path_word |
45 | /* Nope, just hitting the header. cr0 here is eq or gt! */ | 45 | /* Nope, just hitting the header. cr0 here is eq or gt! */ |
46 | #ifdef __LITTLE_ENDIAN__ | 46 | #ifdef __LITTLE_ENDIAN__ |
@@ -52,12 +52,12 @@ sk_load_word_positive_offset: | |||
52 | 52 | ||
53 | .globl sk_load_half | 53 | .globl sk_load_half |
54 | sk_load_half: | 54 | sk_load_half: |
55 | cmpdi r_addr, 0 | 55 | PPC_LCMPI r_addr, 0 |
56 | blt bpf_slow_path_half_neg | 56 | blt bpf_slow_path_half_neg |
57 | .globl sk_load_half_positive_offset | 57 | .globl sk_load_half_positive_offset |
58 | sk_load_half_positive_offset: | 58 | sk_load_half_positive_offset: |
59 | subi r_scratch1, r_HL, 2 | 59 | subi r_scratch1, r_HL, 2 |
60 | cmpd r_scratch1, r_addr | 60 | PPC_LCMP r_scratch1, r_addr |
61 | blt bpf_slow_path_half | 61 | blt bpf_slow_path_half |
62 | #ifdef __LITTLE_ENDIAN__ | 62 | #ifdef __LITTLE_ENDIAN__ |
63 | lhbrx r_A, r_D, r_addr | 63 | lhbrx r_A, r_D, r_addr |
@@ -68,11 +68,11 @@ sk_load_half_positive_offset: | |||
68 | 68 | ||
69 | .globl sk_load_byte | 69 | .globl sk_load_byte |
70 | sk_load_byte: | 70 | sk_load_byte: |
71 | cmpdi r_addr, 0 | 71 | PPC_LCMPI r_addr, 0 |
72 | blt bpf_slow_path_byte_neg | 72 | blt bpf_slow_path_byte_neg |
73 | .globl sk_load_byte_positive_offset | 73 | .globl sk_load_byte_positive_offset |
74 | sk_load_byte_positive_offset: | 74 | sk_load_byte_positive_offset: |
75 | cmpd r_HL, r_addr | 75 | PPC_LCMP r_HL, r_addr |
76 | ble bpf_slow_path_byte | 76 | ble bpf_slow_path_byte |
77 | lbzx r_A, r_D, r_addr | 77 | lbzx r_A, r_D, r_addr |
78 | blr | 78 | blr |
@@ -83,11 +83,11 @@ sk_load_byte_positive_offset: | |||
83 | */ | 83 | */ |
84 | .globl sk_load_byte_msh | 84 | .globl sk_load_byte_msh |
85 | sk_load_byte_msh: | 85 | sk_load_byte_msh: |
86 | cmpdi r_addr, 0 | 86 | PPC_LCMPI r_addr, 0 |
87 | blt bpf_slow_path_byte_msh_neg | 87 | blt bpf_slow_path_byte_msh_neg |
88 | .globl sk_load_byte_msh_positive_offset | 88 | .globl sk_load_byte_msh_positive_offset |
89 | sk_load_byte_msh_positive_offset: | 89 | sk_load_byte_msh_positive_offset: |
90 | cmpd r_HL, r_addr | 90 | PPC_LCMP r_HL, r_addr |
91 | ble bpf_slow_path_byte_msh | 91 | ble bpf_slow_path_byte_msh |
92 | lbzx r_X, r_D, r_addr | 92 | lbzx r_X, r_D, r_addr |
93 | rlwinm r_X, r_X, 2, 32-4-2, 31-2 | 93 | rlwinm r_X, r_X, 2, 32-4-2, 31-2 |
@@ -101,13 +101,13 @@ sk_load_byte_msh_positive_offset: | |||
101 | */ | 101 | */ |
102 | #define bpf_slow_path_common(SIZE) \ | 102 | #define bpf_slow_path_common(SIZE) \ |
103 | mflr r0; \ | 103 | mflr r0; \ |
104 | std r0, 16(r1); \ | 104 | PPC_STL r0, PPC_LR_STKOFF(r1); \ |
105 | /* R3 goes in parameter space of caller's frame */ \ | 105 | /* R3 goes in parameter space of caller's frame */ \ |
106 | std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ | 106 | PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \ |
107 | std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ | 107 | PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \ |
108 | std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ | 108 | PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \ |
109 | addi r5, r1, BPF_PPC_STACK_BASIC+(2*8); \ | 109 | addi r5, r1, BPF_PPC_STACK_BASIC+(2*REG_SZ); \ |
110 | stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \ | 110 | PPC_STLU r1, -BPF_PPC_SLOWPATH_FRAME(r1); \ |
111 | /* R3 = r_skb, as passed */ \ | 111 | /* R3 = r_skb, as passed */ \ |
112 | mr r4, r_addr; \ | 112 | mr r4, r_addr; \ |
113 | li r6, SIZE; \ | 113 | li r6, SIZE; \ |
@@ -115,19 +115,19 @@ sk_load_byte_msh_positive_offset: | |||
115 | nop; \ | 115 | nop; \ |
116 | /* R3 = 0 on success */ \ | 116 | /* R3 = 0 on success */ \ |
117 | addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ | 117 | addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ |
118 | ld r0, 16(r1); \ | 118 | PPC_LL r0, PPC_LR_STKOFF(r1); \ |
119 | ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ | 119 | PPC_LL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \ |
120 | ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ | 120 | PPC_LL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \ |
121 | mtlr r0; \ | 121 | mtlr r0; \ |
122 | cmpdi r3, 0; \ | 122 | PPC_LCMPI r3, 0; \ |
123 | blt bpf_error; /* cr0 = LT */ \ | 123 | blt bpf_error; /* cr0 = LT */ \ |
124 | ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ | 124 | PPC_LL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \ |
125 | /* Great success! */ | 125 | /* Great success! */ |
126 | 126 | ||
127 | bpf_slow_path_word: | 127 | bpf_slow_path_word: |
128 | bpf_slow_path_common(4) | 128 | bpf_slow_path_common(4) |
129 | /* Data value is on stack, and cr0 != LT */ | 129 | /* Data value is on stack, and cr0 != LT */ |
130 | lwz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1) | 130 | lwz r_A, BPF_PPC_STACK_BASIC+(2*REG_SZ)(r1) |
131 | blr | 131 | blr |
132 | 132 | ||
133 | bpf_slow_path_half: | 133 | bpf_slow_path_half: |
@@ -154,12 +154,12 @@ bpf_slow_path_byte_msh: | |||
154 | */ | 154 | */ |
155 | #define sk_negative_common(SIZE) \ | 155 | #define sk_negative_common(SIZE) \ |
156 | mflr r0; \ | 156 | mflr r0; \ |
157 | std r0, 16(r1); \ | 157 | PPC_STL r0, PPC_LR_STKOFF(r1); \ |
158 | /* R3 goes in parameter space of caller's frame */ \ | 158 | /* R3 goes in parameter space of caller's frame */ \ |
159 | std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ | 159 | PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \ |
160 | std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ | 160 | PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \ |
161 | std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ | 161 | PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \ |
162 | stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \ | 162 | PPC_STLU r1, -BPF_PPC_SLOWPATH_FRAME(r1); \ |
163 | /* R3 = r_skb, as passed */ \ | 163 | /* R3 = r_skb, as passed */ \ |
164 | mr r4, r_addr; \ | 164 | mr r4, r_addr; \ |
165 | li r5, SIZE; \ | 165 | li r5, SIZE; \ |
@@ -167,19 +167,19 @@ bpf_slow_path_byte_msh: | |||
167 | nop; \ | 167 | nop; \ |
168 | /* R3 != 0 on success */ \ | 168 | /* R3 != 0 on success */ \ |
169 | addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ | 169 | addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ |
170 | ld r0, 16(r1); \ | 170 | PPC_LL r0, PPC_LR_STKOFF(r1); \ |
171 | ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ | 171 | PPC_LL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \ |
172 | ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ | 172 | PPC_LL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \ |
173 | mtlr r0; \ | 173 | mtlr r0; \ |
174 | cmpldi r3, 0; \ | 174 | PPC_LCMPLI r3, 0; \ |
175 | beq bpf_error_slow; /* cr0 = EQ */ \ | 175 | beq bpf_error_slow; /* cr0 = EQ */ \ |
176 | mr r_addr, r3; \ | 176 | mr r_addr, r3; \ |
177 | ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ | 177 | PPC_LL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \ |
178 | /* Great success! */ | 178 | /* Great success! */ |
179 | 179 | ||
180 | bpf_slow_path_word_neg: | 180 | bpf_slow_path_word_neg: |
181 | lis r_scratch1,-32 /* SKF_LL_OFF */ | 181 | lis r_scratch1,-32 /* SKF_LL_OFF */ |
182 | cmpd r_addr, r_scratch1 /* addr < SKF_* */ | 182 | PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */ |
183 | blt bpf_error /* cr0 = LT */ | 183 | blt bpf_error /* cr0 = LT */ |
184 | .globl sk_load_word_negative_offset | 184 | .globl sk_load_word_negative_offset |
185 | sk_load_word_negative_offset: | 185 | sk_load_word_negative_offset: |
@@ -189,7 +189,7 @@ sk_load_word_negative_offset: | |||
189 | 189 | ||
190 | bpf_slow_path_half_neg: | 190 | bpf_slow_path_half_neg: |
191 | lis r_scratch1,-32 /* SKF_LL_OFF */ | 191 | lis r_scratch1,-32 /* SKF_LL_OFF */ |
192 | cmpd r_addr, r_scratch1 /* addr < SKF_* */ | 192 | PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */ |
193 | blt bpf_error /* cr0 = LT */ | 193 | blt bpf_error /* cr0 = LT */ |
194 | .globl sk_load_half_negative_offset | 194 | .globl sk_load_half_negative_offset |
195 | sk_load_half_negative_offset: | 195 | sk_load_half_negative_offset: |
@@ -199,7 +199,7 @@ sk_load_half_negative_offset: | |||
199 | 199 | ||
200 | bpf_slow_path_byte_neg: | 200 | bpf_slow_path_byte_neg: |
201 | lis r_scratch1,-32 /* SKF_LL_OFF */ | 201 | lis r_scratch1,-32 /* SKF_LL_OFF */ |
202 | cmpd r_addr, r_scratch1 /* addr < SKF_* */ | 202 | PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */ |
203 | blt bpf_error /* cr0 = LT */ | 203 | blt bpf_error /* cr0 = LT */ |
204 | .globl sk_load_byte_negative_offset | 204 | .globl sk_load_byte_negative_offset |
205 | sk_load_byte_negative_offset: | 205 | sk_load_byte_negative_offset: |
@@ -209,7 +209,7 @@ sk_load_byte_negative_offset: | |||
209 | 209 | ||
210 | bpf_slow_path_byte_msh_neg: | 210 | bpf_slow_path_byte_msh_neg: |
211 | lis r_scratch1,-32 /* SKF_LL_OFF */ | 211 | lis r_scratch1,-32 /* SKF_LL_OFF */ |
212 | cmpd r_addr, r_scratch1 /* addr < SKF_* */ | 212 | PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */ |
213 | blt bpf_error /* cr0 = LT */ | 213 | blt bpf_error /* cr0 = LT */ |
214 | .globl sk_load_byte_msh_negative_offset | 214 | .globl sk_load_byte_msh_negative_offset |
215 | sk_load_byte_msh_negative_offset: | 215 | sk_load_byte_msh_negative_offset: |
@@ -221,7 +221,7 @@ sk_load_byte_msh_negative_offset: | |||
221 | bpf_error_slow: | 221 | bpf_error_slow: |
222 | /* fabricate a cr0 = lt */ | 222 | /* fabricate a cr0 = lt */ |
223 | li r_scratch1, -1 | 223 | li r_scratch1, -1 |
224 | cmpdi r_scratch1, 0 | 224 | PPC_LCMPI r_scratch1, 0 |
225 | bpf_error: | 225 | bpf_error: |
226 | /* Entered with cr0 = lt */ | 226 | /* Entered with cr0 = lt */ |
227 | li r3, 0 | 227 | li r3, 0 |
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index d1916b577f2c..8b2926850125 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c | |||
@@ -1,8 +1,9 @@ | |||
1 | /* bpf_jit_comp.c: BPF JIT compiler for PPC64 | 1 | /* bpf_jit_comp.c: BPF JIT compiler |
2 | * | 2 | * |
3 | * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation | 3 | * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation |
4 | * | 4 | * |
5 | * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com) | 5 | * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com) |
6 | * Ported to ppc32 by Denis Kirjanov <kda@linux-powerpc.org> | ||
6 | * | 7 | * |
7 | * This program is free software; you can redistribute it and/or | 8 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License | 9 | * modify it under the terms of the GNU General Public License |
@@ -36,11 +37,11 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image, | |||
36 | if (ctx->seen & SEEN_DATAREF) { | 37 | if (ctx->seen & SEEN_DATAREF) { |
37 | /* If we call any helpers (for loads), save LR */ | 38 | /* If we call any helpers (for loads), save LR */ |
38 | EMIT(PPC_INST_MFLR | __PPC_RT(R0)); | 39 | EMIT(PPC_INST_MFLR | __PPC_RT(R0)); |
39 | PPC_STD(0, 1, 16); | 40 | PPC_BPF_STL(0, 1, PPC_LR_STKOFF); |
40 | 41 | ||
41 | /* Back up non-volatile regs. */ | 42 | /* Back up non-volatile regs. */ |
42 | PPC_STD(r_D, 1, -(8*(32-r_D))); | 43 | PPC_BPF_STL(r_D, 1, -(REG_SZ*(32-r_D))); |
43 | PPC_STD(r_HL, 1, -(8*(32-r_HL))); | 44 | PPC_BPF_STL(r_HL, 1, -(REG_SZ*(32-r_HL))); |
44 | } | 45 | } |
45 | if (ctx->seen & SEEN_MEM) { | 46 | if (ctx->seen & SEEN_MEM) { |
46 | /* | 47 | /* |
@@ -49,11 +50,10 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image, | |||
49 | */ | 50 | */ |
50 | for (i = r_M; i < (r_M+16); i++) { | 51 | for (i = r_M; i < (r_M+16); i++) { |
51 | if (ctx->seen & (1 << (i-r_M))) | 52 | if (ctx->seen & (1 << (i-r_M))) |
52 | PPC_STD(i, 1, -(8*(32-i))); | 53 | PPC_BPF_STL(i, 1, -(REG_SZ*(32-i))); |
53 | } | 54 | } |
54 | } | 55 | } |
55 | EMIT(PPC_INST_STDU | __PPC_RS(R1) | __PPC_RA(R1) | | 56 | PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME); |
56 | (-BPF_PPC_STACKFRAME & 0xfffc)); | ||
57 | } | 57 | } |
58 | 58 | ||
59 | if (ctx->seen & SEEN_DATAREF) { | 59 | if (ctx->seen & SEEN_DATAREF) { |
@@ -67,7 +67,7 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image, | |||
67 | data_len)); | 67 | data_len)); |
68 | PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len)); | 68 | PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len)); |
69 | PPC_SUB(r_HL, r_HL, r_scratch1); | 69 | PPC_SUB(r_HL, r_HL, r_scratch1); |
70 | PPC_LD_OFFS(r_D, r_skb, offsetof(struct sk_buff, data)); | 70 | PPC_LL_OFFS(r_D, r_skb, offsetof(struct sk_buff, data)); |
71 | } | 71 | } |
72 | 72 | ||
73 | if (ctx->seen & SEEN_XREG) { | 73 | if (ctx->seen & SEEN_XREG) { |
@@ -99,16 +99,16 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) | |||
99 | if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) { | 99 | if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) { |
100 | PPC_ADDI(1, 1, BPF_PPC_STACKFRAME); | 100 | PPC_ADDI(1, 1, BPF_PPC_STACKFRAME); |
101 | if (ctx->seen & SEEN_DATAREF) { | 101 | if (ctx->seen & SEEN_DATAREF) { |
102 | PPC_LD(0, 1, 16); | 102 | PPC_BPF_LL(0, 1, PPC_LR_STKOFF); |
103 | PPC_MTLR(0); | 103 | PPC_MTLR(0); |
104 | PPC_LD(r_D, 1, -(8*(32-r_D))); | 104 | PPC_BPF_LL(r_D, 1, -(REG_SZ*(32-r_D))); |
105 | PPC_LD(r_HL, 1, -(8*(32-r_HL))); | 105 | PPC_BPF_LL(r_HL, 1, -(REG_SZ*(32-r_HL))); |
106 | } | 106 | } |
107 | if (ctx->seen & SEEN_MEM) { | 107 | if (ctx->seen & SEEN_MEM) { |
108 | /* Restore any saved non-vol registers */ | 108 | /* Restore any saved non-vol registers */ |
109 | for (i = r_M; i < (r_M+16); i++) { | 109 | for (i = r_M; i < (r_M+16); i++) { |
110 | if (ctx->seen & (1 << (i-r_M))) | 110 | if (ctx->seen & (1 << (i-r_M))) |
111 | PPC_LD(i, 1, -(8*(32-i))); | 111 | PPC_BPF_LL(i, 1, -(REG_SZ*(32-i))); |
112 | } | 112 | } |
113 | } | 113 | } |
114 | } | 114 | } |
@@ -355,7 +355,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, | |||
355 | ifindex) != 4); | 355 | ifindex) != 4); |
356 | BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, | 356 | BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, |
357 | type) != 2); | 357 | type) != 2); |
358 | PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, | 358 | PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, |
359 | dev)); | 359 | dev)); |
360 | PPC_CMPDI(r_scratch1, 0); | 360 | PPC_CMPDI(r_scratch1, 0); |
361 | if (ctx->pc_ret0 != -1) { | 361 | if (ctx->pc_ret0 != -1) { |
@@ -437,7 +437,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, | |||
437 | common_load: | 437 | common_load: |
438 | /* Load from [K]. */ | 438 | /* Load from [K]. */ |
439 | ctx->seen |= SEEN_DATAREF; | 439 | ctx->seen |= SEEN_DATAREF; |
440 | PPC_LI64(r_scratch1, func); | 440 | PPC_FUNC_ADDR(r_scratch1, func); |
441 | PPC_MTLR(r_scratch1); | 441 | PPC_MTLR(r_scratch1); |
442 | PPC_LI32(r_addr, K); | 442 | PPC_LI32(r_addr, K); |
443 | PPC_BLRL(); | 443 | PPC_BLRL(); |
@@ -463,7 +463,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, | |||
463 | * in the helper functions. | 463 | * in the helper functions. |
464 | */ | 464 | */ |
465 | ctx->seen |= SEEN_DATAREF | SEEN_XREG; | 465 | ctx->seen |= SEEN_DATAREF | SEEN_XREG; |
466 | PPC_LI64(r_scratch1, func); | 466 | PPC_FUNC_ADDR(r_scratch1, func); |
467 | PPC_MTLR(r_scratch1); | 467 | PPC_MTLR(r_scratch1); |
468 | PPC_ADDI(r_addr, r_X, IMM_L(K)); | 468 | PPC_ADDI(r_addr, r_X, IMM_L(K)); |
469 | if (K >= 32768) | 469 | if (K >= 32768) |
@@ -685,9 +685,11 @@ void bpf_jit_compile(struct bpf_prog *fp) | |||
685 | 685 | ||
686 | if (image) { | 686 | if (image) { |
687 | bpf_flush_icache(code_base, code_base + (proglen/4)); | 687 | bpf_flush_icache(code_base, code_base + (proglen/4)); |
688 | #ifdef CONFIG_PPC64 | ||
688 | /* Function descriptor nastiness: Address + TOC */ | 689 | /* Function descriptor nastiness: Address + TOC */ |
689 | ((u64 *)image)[0] = (u64)code_base; | 690 | ((u64 *)image)[0] = (u64)code_base; |
690 | ((u64 *)image)[1] = local_paca->kernel_toc; | 691 | ((u64 *)image)[1] = local_paca->kernel_toc; |
692 | #endif | ||
691 | fp->bpf_func = (void *)image; | 693 | fp->bpf_func = (void *)image; |
692 | fp->jited = true; | 694 | fp->jited = true; |
693 | } | 695 | } |