aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/net/bpf_jit_asm.S
diff options
context:
space:
mode:
authorMarkos Chandras <markos.chandras@imgtec.com>2015-06-04 06:56:16 -0400
committerRalf Baechle <ralf@linux-mips.org>2015-06-21 15:54:25 -0400
commit266a88e2200eefa216180ce2761eb84e06f3d77e (patch)
treeba408f5efb9382895717f9f99fa456f0732c788f /arch/mips/net/bpf_jit_asm.S
parentbeaf70b8b7d025e7293ac013b198fc550ee2d3ec (diff)
MIPS: BPF: Introduce BPF ASM helpers
This commit introduces BPF ASM helpers for MIPS and MIPS64 kernels. The purpose of this patch is to twofold: 1) We are now able to handle negative offsets instead of either falling back to the interpreter or to simply not do anything and bail out. 2) Optimize reads from the packet header instead of calling the C helpers Because of this patch, we are now able to get rid of quite a bit of code in the JIT generation process by using MIPS optimized assembly code. The new assembly code makes the test_bpf testsuite happy with all 60 test passing successfully compared to the previous implementation where 2 tests were failing. Doing some basic analysis in the results between the old implementation and the new one we can obtain the following summary running current mainline on an ER8 board (+/- 30us delta is ignored to prevent noise from kernel scheduling or IRQ latencies): Summary: 22 tests are faster, 7 are slower and 47 saw no improvement with the most notable improvement being the tcpdump tests. The 7 tests that seem to be a bit slower is because they all follow the slow path (bpf_internal_load_pointer_neg_helper) which is meant to be slow so that's not a problem. Signed-off-by: Markos Chandras <markos.chandras@imgtec.com> Cc: netdev@vger.kernel.org Cc: "David S. Miller" <davem@davemloft.net> Cc: Alexei Starovoitov <ast@plumgrid.com> Cc: Daniel Borkmann <dborkman@redhat.com> Cc: Hannes Frederic Sowa <hannes@stressinduktion.org> Cc: linux-kernel@vger.kernel.org Cc: linux-mips@linux-mips.org Cc: netdev@vger.kernel.org Patchwork: http://patchwork.linux-mips.org/patch/10530/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/net/bpf_jit_asm.S')
-rw-r--r--arch/mips/net/bpf_jit_asm.S238
1 files changed, 238 insertions, 0 deletions
diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S
new file mode 100644
index 000000000000..e92726099be0
--- /dev/null
+++ b/arch/mips/net/bpf_jit_asm.S
@@ -0,0 +1,238 @@
1/*
2 * bpf_jib_asm.S: Packet/header access helper functions for MIPS/MIPS64 BPF
3 * compiler.
4 *
5 * Copyright (C) 2015 Imagination Technologies Ltd.
6 * Author: Markos Chandras <markos.chandras@imgtec.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; version 2 of the License.
11 */
12
13#include <asm/asm.h>
14#include <asm/regdef.h>
15#include "bpf_jit.h"
16
17/* ABI
18 *
19 * r_skb_hl skb header length
20 * r_skb_data skb data
21 * r_off(a1) offset register
22 * r_A BPF register A
23 * r_X PF register X
24 * r_skb(a0) *skb
25 * r_M *scratch memory
26 * r_skb_le skb length
27 * r_s0 Scratch register 0
28 * r_s1 Scratch register 1
29 *
30 * On entry:
31 * a0: *skb
32 * a1: offset (imm or imm + X)
33 *
34 * All non-BPF-ABI registers are free for use. On return, we only
35 * care about r_ret. The BPF-ABI registers are assumed to remain
36 * unmodified during the entire filter operation.
37 */
38
39#define skb a0
40#define offset a1
41#define SKF_LL_OFF (-0x200000) /* Can't include linux/filter.h in assembly */
42
43 /* We know better :) so prevent assembler reordering etc */
44 .set noreorder
45
46#define is_offset_negative(TYPE) \
47 /* If offset is negative we have more work to do */ \
48 slti t0, offset, 0; \
49 bgtz t0, bpf_slow_path_##TYPE##_neg; \
50 /* Be careful what follows in DS. */
51
52#define is_offset_in_header(SIZE, TYPE) \
53 /* Reading from header? */ \
54 addiu $r_s0, $r_skb_hl, -SIZE; \
55 slt t0, $r_s0, offset; \
56 bgtz t0, bpf_slow_path_##TYPE; \
57
58LEAF(sk_load_word)
59 is_offset_negative(word)
60 .globl sk_load_word_positive
61sk_load_word_positive:
62 is_offset_in_header(4, word)
63 /* Offset within header boundaries */
64 PTR_ADDU t1, $r_skb_data, offset
65 lw $r_A, 0(t1)
66#ifdef CONFIG_CPU_LITTLE_ENDIAN
67 wsbh t0, $r_A
68 rotr $r_A, t0, 16
69#endif
70 jr $r_ra
71 move $r_ret, zero
72 END(sk_load_word)
73
74LEAF(sk_load_half)
75 is_offset_negative(half)
76 .globl sk_load_half_positive
77sk_load_half_positive:
78 is_offset_in_header(2, half)
79 /* Offset within header boundaries */
80 PTR_ADDU t1, $r_skb_data, offset
81 lh $r_A, 0(t1)
82#ifdef CONFIG_CPU_LITTLE_ENDIAN
83 wsbh t0, $r_A
84 seh $r_A, t0
85#endif
86 jr $r_ra
87 move $r_ret, zero
88 END(sk_load_half)
89
90LEAF(sk_load_byte)
91 is_offset_negative(byte)
92 .globl sk_load_byte_positive
93sk_load_byte_positive:
94 is_offset_in_header(1, byte)
95 /* Offset within header boundaries */
96 PTR_ADDU t1, $r_skb_data, offset
97 lb $r_A, 0(t1)
98 jr $r_ra
99 move $r_ret, zero
100 END(sk_load_byte)
101
102/*
103 * call skb_copy_bits:
104 * (prototype in linux/skbuff.h)
105 *
106 * int skb_copy_bits(sk_buff *skb, int offset, void *to, int len)
107 *
108 * o32 mandates we leave 4 spaces for argument registers in case
109 * the callee needs to use them. Even though we don't care about
110 * the argument registers ourselves, we need to allocate that space
111 * to remain ABI compliant since the callee may want to use that space.
112 * We also allocate 2 more spaces for $r_ra and our return register (*to).
113 *
114 * n64 is a bit different. The *caller* will allocate the space to preserve
115 * the arguments. So in 64-bit kernels, we allocate the 4-arg space for no
116 * good reason but it does not matter that much really.
117 *
118 * (void *to) is returned in r_s0
119 *
120 */
121#define bpf_slow_path_common(SIZE) \
122 /* Quick check. Are we within reasonable boundaries? */ \
123 LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \
124 sltu $r_s0, offset, $r_s1; \
125 beqz $r_s0, fault; \
126 /* Load 4th argument in DS */ \
127 LONG_ADDIU a3, zero, SIZE; \
128 PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
129 PTR_LA t0, skb_copy_bits; \
130 PTR_S $r_ra, (5 * SZREG)($r_sp); \
131 /* Assign low slot to a2 */ \
132 move a2, $r_sp; \
133 jalr t0; \
134 /* Reset our destination slot (DS but it's ok) */ \
135 INT_S zero, (4 * SZREG)($r_sp); \
136 /* \
137 * skb_copy_bits returns 0 on success and -EFAULT \
138 * on error. Our data live in a2. Do not bother with \
139 * our data if an error has been returned. \
140 */ \
141 /* Restore our frame */ \
142 PTR_L $r_ra, (5 * SZREG)($r_sp); \
143 INT_L $r_s0, (4 * SZREG)($r_sp); \
144 bltz v0, fault; \
145 PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
146 move $r_ret, zero; \
147
148NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp)
149 bpf_slow_path_common(4)
150#ifdef CONFIG_CPU_LITTLE_ENDIAN
151 wsbh t0, $r_s0
152 jr $r_ra
153 rotr $r_A, t0, 16
154#endif
155 jr $r_ra
156 move $r_A, $r_s0
157
158 END(bpf_slow_path_word)
159
160NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp)
161 bpf_slow_path_common(2)
162#ifdef CONFIG_CPU_LITTLE_ENDIAN
163 jr $r_ra
164 wsbh $r_A, $r_s0
165#endif
166 jr $r_ra
167 move $r_A, $r_s0
168
169 END(bpf_slow_path_half)
170
171NESTED(bpf_slow_path_byte, (6 * SZREG), $r_sp)
172 bpf_slow_path_common(1)
173 jr $r_ra
174 move $r_A, $r_s0
175
176 END(bpf_slow_path_byte)
177
178/*
179 * Negative entry points
180 */
181 .macro bpf_is_end_of_data
182 li t0, SKF_LL_OFF
183 /* Reading link layer data? */
184 slt t1, offset, t0
185 bgtz t1, fault
186 /* Be careful what follows in DS. */
187 .endm
188/*
189 * call skb_copy_bits:
190 * (prototype in linux/filter.h)
191 *
192 * void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
193 * int k, unsigned int size)
194 *
195 * see above (bpf_slow_path_common) for ABI restrictions
196 */
197#define bpf_negative_common(SIZE) \
198 PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
199 PTR_LA t0, bpf_internal_load_pointer_neg_helper; \
200 PTR_S $r_ra, (5 * SZREG)($r_sp); \
201 jalr t0; \
202 li a2, SIZE; \
203 PTR_L $r_ra, (5 * SZREG)($r_sp); \
204 /* Check return pointer */ \
205 beqz v0, fault; \
206 PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
207 /* Preserve our pointer */ \
208 move $r_s0, v0; \
209 /* Set return value */ \
210 move $r_ret, zero; \
211
212bpf_slow_path_word_neg:
213 bpf_is_end_of_data
214NESTED(sk_load_word_negative, (6 * SZREG), $r_sp)
215 bpf_negative_common(4)
216 jr $r_ra
217 lw $r_A, 0($r_s0)
218 END(sk_load_word_negative)
219
220bpf_slow_path_half_neg:
221 bpf_is_end_of_data
222NESTED(sk_load_half_negative, (6 * SZREG), $r_sp)
223 bpf_negative_common(2)
224 jr $r_ra
225 lhu $r_A, 0($r_s0)
226 END(sk_load_half_negative)
227
228bpf_slow_path_byte_neg:
229 bpf_is_end_of_data
230NESTED(sk_load_byte_negative, (6 * SZREG), $r_sp)
231 bpf_negative_common(1)
232 jr $r_ra
233 lbu $r_A, 0($r_s0)
234 END(sk_load_byte_negative)
235
236fault:
237 jr $r_ra
238 addiu $r_ret, zero, 1