diff options
Diffstat (limited to 'arch/powerpc/net/bpf_jit_64.S')
-rw-r--r-- | arch/powerpc/net/bpf_jit_64.S | 108 |
1 files changed, 95 insertions, 13 deletions
diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S index ff4506e85cce..55ba3855a97f 100644 --- a/arch/powerpc/net/bpf_jit_64.S +++ b/arch/powerpc/net/bpf_jit_64.S | |||
@@ -31,14 +31,13 @@ | |||
31 | * then branch directly to slow_path_XXX if required. (In fact, could | 31 | * then branch directly to slow_path_XXX if required. (In fact, could |
32 | * load a spare GPR with the address of slow_path_generic and pass size | 32 | * load a spare GPR with the address of slow_path_generic and pass size |
33 | * as an argument, making the call site a mtlr, li and bllr.) | 33 | * as an argument, making the call site a mtlr, li and bllr.) |
34 | * | ||
35 | * Technically, the "is addr < 0" check is unnecessary & slowing down | ||
36 | * the ABS path, as it's statically checked on generation. | ||
37 | */ | 34 | */ |
38 | .globl sk_load_word | 35 | .globl sk_load_word |
39 | sk_load_word: | 36 | sk_load_word: |
40 | cmpdi r_addr, 0 | 37 | cmpdi r_addr, 0 |
41 | blt bpf_error | 38 | blt bpf_slow_path_word_neg |
39 | .globl sk_load_word_positive_offset | ||
40 | sk_load_word_positive_offset: | ||
42 | /* Are we accessing past headlen? */ | 41 | /* Are we accessing past headlen? */ |
43 | subi r_scratch1, r_HL, 4 | 42 | subi r_scratch1, r_HL, 4 |
44 | cmpd r_scratch1, r_addr | 43 | cmpd r_scratch1, r_addr |
@@ -51,7 +50,9 @@ sk_load_word: | |||
51 | .globl sk_load_half | 50 | .globl sk_load_half |
52 | sk_load_half: | 51 | sk_load_half: |
53 | cmpdi r_addr, 0 | 52 | cmpdi r_addr, 0 |
54 | blt bpf_error | 53 | blt bpf_slow_path_half_neg |
54 | .globl sk_load_half_positive_offset | ||
55 | sk_load_half_positive_offset: | ||
55 | subi r_scratch1, r_HL, 2 | 56 | subi r_scratch1, r_HL, 2 |
56 | cmpd r_scratch1, r_addr | 57 | cmpd r_scratch1, r_addr |
57 | blt bpf_slow_path_half | 58 | blt bpf_slow_path_half |
@@ -61,7 +62,9 @@ sk_load_half: | |||
61 | .globl sk_load_byte | 62 | .globl sk_load_byte |
62 | sk_load_byte: | 63 | sk_load_byte: |
63 | cmpdi r_addr, 0 | 64 | cmpdi r_addr, 0 |
64 | blt bpf_error | 65 | blt bpf_slow_path_byte_neg |
66 | .globl sk_load_byte_positive_offset | ||
67 | sk_load_byte_positive_offset: | ||
65 | cmpd r_HL, r_addr | 68 | cmpd r_HL, r_addr |
66 | ble bpf_slow_path_byte | 69 | ble bpf_slow_path_byte |
67 | lbzx r_A, r_D, r_addr | 70 | lbzx r_A, r_D, r_addr |
@@ -69,22 +72,20 @@ sk_load_byte: | |||
69 | 72 | ||
70 | /* | 73 | /* |
71 | * BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf) | 74 | * BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf) |
72 | * r_addr is the offset value, already known positive | 75 | * r_addr is the offset value |
73 | */ | 76 | */ |
74 | .globl sk_load_byte_msh | 77 | .globl sk_load_byte_msh |
75 | sk_load_byte_msh: | 78 | sk_load_byte_msh: |
79 | cmpdi r_addr, 0 | ||
80 | blt bpf_slow_path_byte_msh_neg | ||
81 | .globl sk_load_byte_msh_positive_offset | ||
82 | sk_load_byte_msh_positive_offset: | ||
76 | cmpd r_HL, r_addr | 83 | cmpd r_HL, r_addr |
77 | ble bpf_slow_path_byte_msh | 84 | ble bpf_slow_path_byte_msh |
78 | lbzx r_X, r_D, r_addr | 85 | lbzx r_X, r_D, r_addr |
79 | rlwinm r_X, r_X, 2, 32-4-2, 31-2 | 86 | rlwinm r_X, r_X, 2, 32-4-2, 31-2 |
80 | blr | 87 | blr |
81 | 88 | ||
82 | bpf_error: | ||
83 | /* Entered with cr0 = lt */ | ||
84 | li r3, 0 | ||
85 | /* Generated code will 'blt epilogue', returning 0. */ | ||
86 | blr | ||
87 | |||
88 | /* Call out to skb_copy_bits: | 89 | /* Call out to skb_copy_bits: |
89 | * We'll need to back up our volatile regs first; we have | 90 | * We'll need to back up our volatile regs first; we have |
90 | * local variable space at r1+(BPF_PPC_STACK_BASIC). | 91 | * local variable space at r1+(BPF_PPC_STACK_BASIC). |
@@ -136,3 +137,84 @@ bpf_slow_path_byte_msh: | |||
136 | lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1) | 137 | lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1) |
137 | rlwinm r_X, r_X, 2, 32-4-2, 31-2 | 138 | rlwinm r_X, r_X, 2, 32-4-2, 31-2 |
138 | blr | 139 | blr |
140 | |||
141 | /* Call out to bpf_internal_load_pointer_neg_helper: | ||
142 | * We'll need to back up our volatile regs first; we have | ||
143 | * local variable space at r1+(BPF_PPC_STACK_BASIC). | ||
144 | * Allocate a new stack frame here to remain ABI-compliant in | ||
145 | * stashing LR. | ||
146 | */ | ||
147 | #define sk_negative_common(SIZE) \ | ||
148 | mflr r0; \ | ||
149 | std r0, 16(r1); \ | ||
150 | /* R3 goes in parameter space of caller's frame */ \ | ||
151 | std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ | ||
152 | std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ | ||
153 | std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ | ||
154 | stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \ | ||
155 | /* R3 = r_skb, as passed */ \ | ||
156 | mr r4, r_addr; \ | ||
157 | li r5, SIZE; \ | ||
158 | bl bpf_internal_load_pointer_neg_helper; \ | ||
159 | /* R3 != 0 on success */ \ | ||
160 | addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ | ||
161 | ld r0, 16(r1); \ | ||
162 | ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ | ||
163 | ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ | ||
164 | mtlr r0; \ | ||
165 | cmpldi r3, 0; \ | ||
166 | beq bpf_error_slow; /* cr0 = EQ */ \ | ||
167 | mr r_addr, r3; \ | ||
168 | ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ | ||
169 | /* Great success! */ | ||
170 | |||
171 | bpf_slow_path_word_neg: | ||
172 | lis r_scratch1,-32 /* SKF_LL_OFF */ | ||
173 | cmpd r_addr, r_scratch1 /* addr < SKF_* */ | ||
174 | blt bpf_error /* cr0 = LT */ | ||
175 | .globl sk_load_word_negative_offset | ||
176 | sk_load_word_negative_offset: | ||
177 | sk_negative_common(4) | ||
178 | lwz r_A, 0(r_addr) | ||
179 | blr | ||
180 | |||
181 | bpf_slow_path_half_neg: | ||
182 | lis r_scratch1,-32 /* SKF_LL_OFF */ | ||
183 | cmpd r_addr, r_scratch1 /* addr < SKF_* */ | ||
184 | blt bpf_error /* cr0 = LT */ | ||
185 | .globl sk_load_half_negative_offset | ||
186 | sk_load_half_negative_offset: | ||
187 | sk_negative_common(2) | ||
188 | lhz r_A, 0(r_addr) | ||
189 | blr | ||
190 | |||
191 | bpf_slow_path_byte_neg: | ||
192 | lis r_scratch1,-32 /* SKF_LL_OFF */ | ||
193 | cmpd r_addr, r_scratch1 /* addr < SKF_* */ | ||
194 | blt bpf_error /* cr0 = LT */ | ||
195 | .globl sk_load_byte_negative_offset | ||
196 | sk_load_byte_negative_offset: | ||
197 | sk_negative_common(1) | ||
198 | lbz r_A, 0(r_addr) | ||
199 | blr | ||
200 | |||
201 | bpf_slow_path_byte_msh_neg: | ||
202 | lis r_scratch1,-32 /* SKF_LL_OFF */ | ||
203 | cmpd r_addr, r_scratch1 /* addr < SKF_* */ | ||
204 | blt bpf_error /* cr0 = LT */ | ||
205 | .globl sk_load_byte_msh_negative_offset | ||
206 | sk_load_byte_msh_negative_offset: | ||
207 | sk_negative_common(1) | ||
208 | lbz r_X, 0(r_addr) | ||
209 | rlwinm r_X, r_X, 2, 32-4-2, 31-2 | ||
210 | blr | ||
211 | |||
212 | bpf_error_slow: | ||
213 | /* fabricate a cr0 = lt */ | ||
214 | li r_scratch1, -1 | ||
215 | cmpdi r_scratch1, 0 | ||
216 | bpf_error: | ||
217 | /* Entered with cr0 = lt */ | ||
218 | li r3, 0 | ||
219 | /* Generated code will 'blt epilogue', returning 0. */ | ||
220 | blr | ||