aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-07-24 00:06:30 -0400
committerDavid S. Miller <davem@davemloft.net>2014-07-24 00:06:30 -0400
commitf6e675324481c56b358091ddb446b2c95a8e047b (patch)
tree024d48e55bb053d7013e0efc1b7a50f75f3b5a36 /net
parent8942a6d5f2b60ca374473ee0a996c5e64ce604dc (diff)
parentb5f4df3483a1679bc11eb2dd7dcf4f3c8b54d387 (diff)
Merge branch 'filter-move'
Alexei Starovoitov says: ==================== I believe my recent set of RFC/patches [1] provided good visibility on where I would like to take eBPF subsystem. These two trivial patches is a first step in that direction: patch 1 - mechanical split of eBPF interpreter out of filter.c patch 2 - nominate myself as a maintainer for eBPF core pieces In the foreseeable future eBPF patches will be going through net-next, so put netdev as a primary mailing list [1] git://git.kernel.org/pub/scm/linux/kernel/git/ast/bpf master ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/filter.c511
1 files changed, 0 insertions, 511 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index b90ae7fb3b89..1d0e9492e4fa 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -45,45 +45,6 @@
45#include <linux/seccomp.h> 45#include <linux/seccomp.h>
46#include <linux/if_vlan.h> 46#include <linux/if_vlan.h>
47 47
48/* Registers */
49#define BPF_R0 regs[BPF_REG_0]
50#define BPF_R1 regs[BPF_REG_1]
51#define BPF_R2 regs[BPF_REG_2]
52#define BPF_R3 regs[BPF_REG_3]
53#define BPF_R4 regs[BPF_REG_4]
54#define BPF_R5 regs[BPF_REG_5]
55#define BPF_R6 regs[BPF_REG_6]
56#define BPF_R7 regs[BPF_REG_7]
57#define BPF_R8 regs[BPF_REG_8]
58#define BPF_R9 regs[BPF_REG_9]
59#define BPF_R10 regs[BPF_REG_10]
60
61/* Named registers */
62#define DST regs[insn->dst_reg]
63#define SRC regs[insn->src_reg]
64#define FP regs[BPF_REG_FP]
65#define ARG1 regs[BPF_REG_ARG1]
66#define CTX regs[BPF_REG_CTX]
67#define IMM insn->imm
68
69/* No hurry in this branch
70 *
71 * Exported for the bpf jit load helper.
72 */
73void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
74{
75 u8 *ptr = NULL;
76
77 if (k >= SKF_NET_OFF)
78 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
79 else if (k >= SKF_LL_OFF)
80 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
81 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
82 return ptr;
83
84 return NULL;
85}
86
87/** 48/**
88 * sk_filter - run a packet through a socket filter 49 * sk_filter - run a packet through a socket filter
89 * @sk: sock associated with &sk_buff 50 * @sk: sock associated with &sk_buff
@@ -126,451 +87,6 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
126} 87}
127EXPORT_SYMBOL(sk_filter); 88EXPORT_SYMBOL(sk_filter);
128 89
129/* Base function for offset calculation. Needs to go into .text section,
130 * therefore keeping it non-static as well; will also be used by JITs
131 * anyway later on, so do not let the compiler omit it.
132 */
133noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
134{
135 return 0;
136}
137
138/**
139 * __sk_run_filter - run a filter on a given context
140 * @ctx: buffer to run the filter on
141 * @insn: filter to apply
142 *
143 * Decode and apply filter instructions to the skb->data. Return length to
144 * keep, 0 for none. @ctx is the data we are operating on, @insn is the
145 * array of filter instructions.
146 */
147static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
148{
149 u64 stack[MAX_BPF_STACK / sizeof(u64)];
150 u64 regs[MAX_BPF_REG], tmp;
151 static const void *jumptable[256] = {
152 [0 ... 255] = &&default_label,
153 /* Now overwrite non-defaults ... */
154 /* 32 bit ALU operations */
155 [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
156 [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
157 [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
158 [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
159 [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
160 [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
161 [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X,
162 [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K,
163 [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
164 [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
165 [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
166 [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
167 [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
168 [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
169 [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
170 [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
171 [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
172 [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
173 [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
174 [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
175 [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
176 [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
177 [BPF_ALU | BPF_NEG] = &&ALU_NEG,
178 [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
179 [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
180 /* 64 bit ALU operations */
181 [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
182 [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
183 [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
184 [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
185 [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
186 [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
187 [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
188 [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
189 [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
190 [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
191 [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
192 [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
193 [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
194 [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
195 [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
196 [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
197 [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
198 [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
199 [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
200 [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
201 [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
202 [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
203 [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
204 [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
205 [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
206 /* Call instruction */
207 [BPF_JMP | BPF_CALL] = &&JMP_CALL,
208 /* Jumps */
209 [BPF_JMP | BPF_JA] = &&JMP_JA,
210 [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
211 [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
212 [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
213 [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
214 [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
215 [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
216 [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
217 [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
218 [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
219 [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
220 [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
221 [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
222 [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
223 [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
224 /* Program return */
225 [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
226 /* Store instructions */
227 [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
228 [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
229 [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
230 [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
231 [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
232 [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
233 [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
234 [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
235 [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
236 [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
237 /* Load instructions */
238 [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
239 [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
240 [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
241 [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
242 [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
243 [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
244 [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
245 [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
246 [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
247 [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
248 };
249 void *ptr;
250 int off;
251
252#define CONT ({ insn++; goto select_insn; })
253#define CONT_JMP ({ insn++; goto select_insn; })
254
255 FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
256 ARG1 = (u64) (unsigned long) ctx;
257
258 /* Registers used in classic BPF programs need to be reset first. */
259 regs[BPF_REG_A] = 0;
260 regs[BPF_REG_X] = 0;
261
262select_insn:
263 goto *jumptable[insn->code];
264
265 /* ALU */
266#define ALU(OPCODE, OP) \
267 ALU64_##OPCODE##_X: \
268 DST = DST OP SRC; \
269 CONT; \
270 ALU_##OPCODE##_X: \
271 DST = (u32) DST OP (u32) SRC; \
272 CONT; \
273 ALU64_##OPCODE##_K: \
274 DST = DST OP IMM; \
275 CONT; \
276 ALU_##OPCODE##_K: \
277 DST = (u32) DST OP (u32) IMM; \
278 CONT;
279
280 ALU(ADD, +)
281 ALU(SUB, -)
282 ALU(AND, &)
283 ALU(OR, |)
284 ALU(LSH, <<)
285 ALU(RSH, >>)
286 ALU(XOR, ^)
287 ALU(MUL, *)
288#undef ALU
289 ALU_NEG:
290 DST = (u32) -DST;
291 CONT;
292 ALU64_NEG:
293 DST = -DST;
294 CONT;
295 ALU_MOV_X:
296 DST = (u32) SRC;
297 CONT;
298 ALU_MOV_K:
299 DST = (u32) IMM;
300 CONT;
301 ALU64_MOV_X:
302 DST = SRC;
303 CONT;
304 ALU64_MOV_K:
305 DST = IMM;
306 CONT;
307 ALU64_ARSH_X:
308 (*(s64 *) &DST) >>= SRC;
309 CONT;
310 ALU64_ARSH_K:
311 (*(s64 *) &DST) >>= IMM;
312 CONT;
313 ALU64_MOD_X:
314 if (unlikely(SRC == 0))
315 return 0;
316 tmp = DST;
317 DST = do_div(tmp, SRC);
318 CONT;
319 ALU_MOD_X:
320 if (unlikely(SRC == 0))
321 return 0;
322 tmp = (u32) DST;
323 DST = do_div(tmp, (u32) SRC);
324 CONT;
325 ALU64_MOD_K:
326 tmp = DST;
327 DST = do_div(tmp, IMM);
328 CONT;
329 ALU_MOD_K:
330 tmp = (u32) DST;
331 DST = do_div(tmp, (u32) IMM);
332 CONT;
333 ALU64_DIV_X:
334 if (unlikely(SRC == 0))
335 return 0;
336 do_div(DST, SRC);
337 CONT;
338 ALU_DIV_X:
339 if (unlikely(SRC == 0))
340 return 0;
341 tmp = (u32) DST;
342 do_div(tmp, (u32) SRC);
343 DST = (u32) tmp;
344 CONT;
345 ALU64_DIV_K:
346 do_div(DST, IMM);
347 CONT;
348 ALU_DIV_K:
349 tmp = (u32) DST;
350 do_div(tmp, (u32) IMM);
351 DST = (u32) tmp;
352 CONT;
353 ALU_END_TO_BE:
354 switch (IMM) {
355 case 16:
356 DST = (__force u16) cpu_to_be16(DST);
357 break;
358 case 32:
359 DST = (__force u32) cpu_to_be32(DST);
360 break;
361 case 64:
362 DST = (__force u64) cpu_to_be64(DST);
363 break;
364 }
365 CONT;
366 ALU_END_TO_LE:
367 switch (IMM) {
368 case 16:
369 DST = (__force u16) cpu_to_le16(DST);
370 break;
371 case 32:
372 DST = (__force u32) cpu_to_le32(DST);
373 break;
374 case 64:
375 DST = (__force u64) cpu_to_le64(DST);
376 break;
377 }
378 CONT;
379
380 /* CALL */
381 JMP_CALL:
382 /* Function call scratches BPF_R1-BPF_R5 registers,
383 * preserves BPF_R6-BPF_R9, and stores return value
384 * into BPF_R0.
385 */
386 BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
387 BPF_R4, BPF_R5);
388 CONT;
389
390 /* JMP */
391 JMP_JA:
392 insn += insn->off;
393 CONT;
394 JMP_JEQ_X:
395 if (DST == SRC) {
396 insn += insn->off;
397 CONT_JMP;
398 }
399 CONT;
400 JMP_JEQ_K:
401 if (DST == IMM) {
402 insn += insn->off;
403 CONT_JMP;
404 }
405 CONT;
406 JMP_JNE_X:
407 if (DST != SRC) {
408 insn += insn->off;
409 CONT_JMP;
410 }
411 CONT;
412 JMP_JNE_K:
413 if (DST != IMM) {
414 insn += insn->off;
415 CONT_JMP;
416 }
417 CONT;
418 JMP_JGT_X:
419 if (DST > SRC) {
420 insn += insn->off;
421 CONT_JMP;
422 }
423 CONT;
424 JMP_JGT_K:
425 if (DST > IMM) {
426 insn += insn->off;
427 CONT_JMP;
428 }
429 CONT;
430 JMP_JGE_X:
431 if (DST >= SRC) {
432 insn += insn->off;
433 CONT_JMP;
434 }
435 CONT;
436 JMP_JGE_K:
437 if (DST >= IMM) {
438 insn += insn->off;
439 CONT_JMP;
440 }
441 CONT;
442 JMP_JSGT_X:
443 if (((s64) DST) > ((s64) SRC)) {
444 insn += insn->off;
445 CONT_JMP;
446 }
447 CONT;
448 JMP_JSGT_K:
449 if (((s64) DST) > ((s64) IMM)) {
450 insn += insn->off;
451 CONT_JMP;
452 }
453 CONT;
454 JMP_JSGE_X:
455 if (((s64) DST) >= ((s64) SRC)) {
456 insn += insn->off;
457 CONT_JMP;
458 }
459 CONT;
460 JMP_JSGE_K:
461 if (((s64) DST) >= ((s64) IMM)) {
462 insn += insn->off;
463 CONT_JMP;
464 }
465 CONT;
466 JMP_JSET_X:
467 if (DST & SRC) {
468 insn += insn->off;
469 CONT_JMP;
470 }
471 CONT;
472 JMP_JSET_K:
473 if (DST & IMM) {
474 insn += insn->off;
475 CONT_JMP;
476 }
477 CONT;
478 JMP_EXIT:
479 return BPF_R0;
480
481 /* STX and ST and LDX*/
482#define LDST(SIZEOP, SIZE) \
483 STX_MEM_##SIZEOP: \
484 *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \
485 CONT; \
486 ST_MEM_##SIZEOP: \
487 *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \
488 CONT; \
489 LDX_MEM_##SIZEOP: \
490 DST = *(SIZE *)(unsigned long) (SRC + insn->off); \
491 CONT;
492
493 LDST(B, u8)
494 LDST(H, u16)
495 LDST(W, u32)
496 LDST(DW, u64)
497#undef LDST
498 STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
499 atomic_add((u32) SRC, (atomic_t *)(unsigned long)
500 (DST + insn->off));
501 CONT;
502 STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
503 atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
504 (DST + insn->off));
505 CONT;
506 LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
507 off = IMM;
508load_word:
509 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
510 * only appearing in the programs where ctx ==
511 * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
512 * == BPF_R6, sk_convert_filter() saves it in BPF_R6,
513 * internal BPF verifier will check that BPF_R6 ==
514 * ctx.
515 *
516 * BPF_ABS and BPF_IND are wrappers of function calls,
517 * so they scratch BPF_R1-BPF_R5 registers, preserve
518 * BPF_R6-BPF_R9, and store return value into BPF_R0.
519 *
520 * Implicit input:
521 * ctx == skb == BPF_R6 == CTX
522 *
523 * Explicit input:
524 * SRC == any register
525 * IMM == 32-bit immediate
526 *
527 * Output:
528 * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
529 */
530
531 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
532 if (likely(ptr != NULL)) {
533 BPF_R0 = get_unaligned_be32(ptr);
534 CONT;
535 }
536
537 return 0;
538 LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
539 off = IMM;
540load_half:
541 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
542 if (likely(ptr != NULL)) {
543 BPF_R0 = get_unaligned_be16(ptr);
544 CONT;
545 }
546
547 return 0;
548 LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
549 off = IMM;
550load_byte:
551 ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
552 if (likely(ptr != NULL)) {
553 BPF_R0 = *(u8 *)ptr;
554 CONT;
555 }
556
557 return 0;
558 LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
559 off = IMM + SRC;
560 goto load_word;
561 LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
562 off = IMM + SRC;
563 goto load_half;
564 LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
565 off = IMM + SRC;
566 goto load_byte;
567
568 default_label:
569 /* If we ever reach this, we have a bug somewhere. */
570 WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
571 return 0;
572}
573
574/* Helper to find the offset of pkt_type in sk_buff structure. We want 90/* Helper to find the offset of pkt_type in sk_buff structure. We want
575 * to make sure its still a 3bit field starting at a byte boundary; 91 * to make sure its still a 3bit field starting at a byte boundary;
576 * taken from arch/x86/net/bpf_jit_comp.c. 92 * taken from arch/x86/net/bpf_jit_comp.c.
@@ -1455,33 +971,6 @@ out_err:
1455 return ERR_PTR(err); 971 return ERR_PTR(err);
1456} 972}
1457 973
1458void __weak bpf_int_jit_compile(struct sk_filter *prog)
1459{
1460}
1461
1462/**
1463 * sk_filter_select_runtime - select execution runtime for BPF program
1464 * @fp: sk_filter populated with internal BPF program
1465 *
1466 * try to JIT internal BPF program, if JIT is not available select interpreter
1467 * BPF program will be executed via SK_RUN_FILTER() macro
1468 */
1469void sk_filter_select_runtime(struct sk_filter *fp)
1470{
1471 fp->bpf_func = (void *) __sk_run_filter;
1472
1473 /* Probe if internal BPF can be JITed */
1474 bpf_int_jit_compile(fp);
1475}
1476EXPORT_SYMBOL_GPL(sk_filter_select_runtime);
1477
1478/* free internal BPF program */
1479void sk_filter_free(struct sk_filter *fp)
1480{
1481 bpf_jit_free(fp);
1482}
1483EXPORT_SYMBOL_GPL(sk_filter_free);
1484
1485static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp, 974static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
1486 struct sock *sk) 975 struct sock *sk)
1487{ 976{