aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@plumgrid.com>2014-07-30 23:34:16 -0400
committerDavid S. Miller <davem@davemloft.net>2014-08-02 18:03:58 -0400
commit7ae457c1e5b45a1b826fad9d62b32191d2bdcfdb (patch)
treedcb1aba57530e6c9426a81758173ca146ffafcaf /kernel/bpf
parent8fb575ca396bc31d9fa99c26336e2432b41d1bfc (diff)
net: filter: split 'struct sk_filter' into socket and bpf parts
clean up names related to socket filtering and bpf in the following way: - everything that deals with sockets keeps 'sk_*' prefix - everything that is pure BPF is changed to 'bpf_*' prefix split 'struct sk_filter' into struct sk_filter { atomic_t refcnt; struct rcu_head rcu; struct bpf_prog *prog; }; and struct bpf_prog { u32 jited:1, len:31; struct sock_fprog_kern *orig_prog; unsigned int (*bpf_func)(const struct sk_buff *skb, const struct bpf_insn *filter); union { struct sock_filter insns[0]; struct bpf_insn insnsi[0]; struct work_struct work; }; }; so that 'struct bpf_prog' can be used independent of sockets and cleans up 'unattached' bpf use cases split SK_RUN_FILTER macro into: SK_RUN_FILTER to be used with 'struct sk_filter *' and BPF_PROG_RUN to be used with 'struct bpf_prog *' __sk_filter_release(struct sk_filter *) gains __bpf_prog_release(struct bpf_prog *) helper function also perform related renames for the functions that work with 'struct bpf_prog *', since they're on the same lines: sk_filter_size -> bpf_prog_size sk_filter_select_runtime -> bpf_prog_select_runtime sk_filter_free -> bpf_prog_free sk_unattached_filter_create -> bpf_prog_create sk_unattached_filter_destroy -> bpf_prog_destroy sk_store_orig_filter -> bpf_prog_store_orig_filter sk_release_orig_filter -> bpf_release_orig_filter __sk_migrate_filter -> bpf_migrate_filter __sk_prepare_filter -> bpf_prepare_filter API for attaching classic BPF to a socket stays the same: sk_attach_filter(prog, struct sock *)/sk_detach_filter(struct sock *) and SK_RUN_FILTER(struct sk_filter *, ctx) to execute a program which is used by sockets, tun, af_packet API for 'unattached' BPF programs becomes: bpf_prog_create(struct bpf_prog **)/bpf_prog_destroy(struct bpf_prog *) and BPF_PROG_RUN(struct bpf_prog *, ctx) to execute a program which is used by isdn, ppp, team, seccomp, ptp, xt_bpf, cls_bpf, test_bpf Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/bpf')
-rw-r--r--kernel/bpf/core.c30
1 files changed, 14 insertions, 16 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 188ac5ba3900..7f0dbcbb34af 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -73,15 +73,13 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
73} 73}
74 74
75/** 75/**
76 * __sk_run_filter - run a filter on a given context 76 * __bpf_prog_run - run eBPF program on a given context
77 * @ctx: buffer to run the filter on 77 * @ctx: is the data we are operating on
78 * @insn: filter to apply 78 * @insn: is the array of eBPF instructions
79 * 79 *
80 * Decode and apply filter instructions to the skb->data. Return length to 80 * Decode and execute eBPF instructions.
81 * keep, 0 for none. @ctx is the data we are operating on, @insn is the
82 * array of filter instructions.
83 */ 81 */
84static unsigned int __sk_run_filter(void *ctx, const struct bpf_insn *insn) 82static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
85{ 83{
86 u64 stack[MAX_BPF_STACK / sizeof(u64)]; 84 u64 stack[MAX_BPF_STACK / sizeof(u64)];
87 u64 regs[MAX_BPF_REG], tmp; 85 u64 regs[MAX_BPF_REG], tmp;
@@ -508,29 +506,29 @@ load_byte:
508 return 0; 506 return 0;
509} 507}
510 508
511void __weak bpf_int_jit_compile(struct sk_filter *prog) 509void __weak bpf_int_jit_compile(struct bpf_prog *prog)
512{ 510{
513} 511}
514 512
515/** 513/**
516 * sk_filter_select_runtime - select execution runtime for BPF program 514 * bpf_prog_select_runtime - select execution runtime for BPF program
517 * @fp: sk_filter populated with internal BPF program 515 * @fp: bpf_prog populated with internal BPF program
518 * 516 *
519 * try to JIT internal BPF program, if JIT is not available select interpreter 517 * try to JIT internal BPF program, if JIT is not available select interpreter
520 * BPF program will be executed via SK_RUN_FILTER() macro 518 * BPF program will be executed via BPF_PROG_RUN() macro
521 */ 519 */
522void sk_filter_select_runtime(struct sk_filter *fp) 520void bpf_prog_select_runtime(struct bpf_prog *fp)
523{ 521{
524 fp->bpf_func = (void *) __sk_run_filter; 522 fp->bpf_func = (void *) __bpf_prog_run;
525 523
526 /* Probe if internal BPF can be JITed */ 524 /* Probe if internal BPF can be JITed */
527 bpf_int_jit_compile(fp); 525 bpf_int_jit_compile(fp);
528} 526}
529EXPORT_SYMBOL_GPL(sk_filter_select_runtime); 527EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
530 528
531/* free internal BPF program */ 529/* free internal BPF program */
532void sk_filter_free(struct sk_filter *fp) 530void bpf_prog_free(struct bpf_prog *fp)
533{ 531{
534 bpf_jit_free(fp); 532 bpf_jit_free(fp);
535} 533}
536EXPORT_SYMBOL_GPL(sk_filter_free); 534EXPORT_SYMBOL_GPL(bpf_prog_free);