aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@plumgrid.com>2014-05-19 17:56:14 -0400
committerDavid S. Miller <davem@davemloft.net>2014-05-21 17:07:17 -0400
commit5fe821a9dee241fa450703ab7015d970ee0cfb8d (patch)
tree4ada90ac07b074b55ffc40220d8a14fcee3f305a /net/core
parent21ea04fa2d26906a2c8bca40891a238414111f5f (diff)
net: filter: cleanup invocation of internal BPF
Kernel API for classic BPF socket filters is: sk_unattached_filter_create() - validate classic BPF, convert, JIT SK_RUN_FILTER() - run it sk_unattached_filter_destroy() - destroy socket filter Cleanup internal BPF kernel API as following: sk_filter_select_runtime() - final step of internal BPF creation. Try to JIT internal BPF program, if JIT is not available select interpreter SK_RUN_FILTER() - run it sk_filter_free() - free internal BPF program Disallow direct calls to BPF interpreter. Execution of the BPF program should be done with SK_RUN_FILTER() macro. Example of internal BPF create, run, destroy: struct sk_filter *fp; fp = kzalloc(sk_filter_size(prog_len), GFP_KERNEL); memcpy(fp->insni, prog, prog_len * sizeof(fp->insni[0])); fp->len = prog_len; sk_filter_select_runtime(fp); SK_RUN_FILTER(fp, ctx); sk_filter_free(fp); Sockets, seccomp, testsuite, tracing are using different ways to populate sk_filter, so first steps of program creation are not common. Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Acked-by: Daniel Borkmann <dborkman@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/filter.c44
1 files changed, 28 insertions, 16 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index 32c5b44c537e..7067cb240d3e 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -153,7 +153,7 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
153 * keep, 0 for none. @ctx is the data we are operating on, @insn is the 153 * keep, 0 for none. @ctx is the data we are operating on, @insn is the
154 * array of filter instructions. 154 * array of filter instructions.
155 */ 155 */
156unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn) 156static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
157{ 157{
158 u64 stack[MAX_BPF_STACK / sizeof(u64)]; 158 u64 stack[MAX_BPF_STACK / sizeof(u64)];
159 u64 regs[MAX_BPF_REG], tmp; 159 u64 regs[MAX_BPF_REG], tmp;
@@ -571,15 +571,6 @@ load_byte:
571 return 0; 571 return 0;
572} 572}
573 573
574u32 sk_run_filter_int_seccomp(const struct seccomp_data *ctx,
575 const struct sock_filter_int *insni)
576 __attribute__ ((alias ("__sk_run_filter")));
577
578u32 sk_run_filter_int_skb(const struct sk_buff *ctx,
579 const struct sock_filter_int *insni)
580 __attribute__ ((alias ("__sk_run_filter")));
581EXPORT_SYMBOL_GPL(sk_run_filter_int_skb);
582
583/* Helper to find the offset of pkt_type in sk_buff structure. We want 574/* Helper to find the offset of pkt_type in sk_buff structure. We want
584 * to make sure its still a 3bit field starting at a byte boundary; 575 * to make sure its still a 3bit field starting at a byte boundary;
585 * taken from arch/x86/net/bpf_jit_comp.c. 576 * taken from arch/x86/net/bpf_jit_comp.c.
@@ -1397,7 +1388,7 @@ static void sk_filter_release_rcu(struct rcu_head *rcu)
1397 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); 1388 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
1398 1389
1399 sk_release_orig_filter(fp); 1390 sk_release_orig_filter(fp);
1400 bpf_jit_free(fp); 1391 sk_filter_free(fp);
1401} 1392}
1402 1393
1403/** 1394/**
@@ -1497,7 +1488,6 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
1497 goto out_err_free; 1488 goto out_err_free;
1498 } 1489 }
1499 1490
1500 fp->bpf_func = sk_run_filter_int_skb;
1501 fp->len = new_len; 1491 fp->len = new_len;
1502 1492
1503 /* 2nd pass: remap sock_filter insns into sock_filter_int insns. */ 1493 /* 2nd pass: remap sock_filter insns into sock_filter_int insns. */
@@ -1510,6 +1500,8 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
1510 */ 1500 */
1511 goto out_err_free; 1501 goto out_err_free;
1512 1502
1503 sk_filter_select_runtime(fp);
1504
1513 kfree(old_prog); 1505 kfree(old_prog);
1514 return fp; 1506 return fp;
1515 1507
@@ -1528,6 +1520,29 @@ void __weak bpf_int_jit_compile(struct sk_filter *prog)
1528{ 1520{
1529} 1521}
1530 1522
1523/**
1524 * sk_filter_select_runtime - select execution runtime for BPF program
1525 * @fp: sk_filter populated with internal BPF program
1526 *
1527 * try to JIT internal BPF program, if JIT is not available select interpreter
1528 * BPF program will be executed via SK_RUN_FILTER() macro
1529 */
1530void sk_filter_select_runtime(struct sk_filter *fp)
1531{
1532 fp->bpf_func = (void *) __sk_run_filter;
1533
1534 /* Probe if internal BPF can be JITed */
1535 bpf_int_jit_compile(fp);
1536}
1537EXPORT_SYMBOL_GPL(sk_filter_select_runtime);
1538
1539/* free internal BPF program */
1540void sk_filter_free(struct sk_filter *fp)
1541{
1542 bpf_jit_free(fp);
1543}
1544EXPORT_SYMBOL_GPL(sk_filter_free);
1545
1531static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp, 1546static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
1532 struct sock *sk) 1547 struct sock *sk)
1533{ 1548{
@@ -1548,12 +1563,9 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
1548 /* JIT compiler couldn't process this filter, so do the 1563 /* JIT compiler couldn't process this filter, so do the
1549 * internal BPF translation for the optimized interpreter. 1564 * internal BPF translation for the optimized interpreter.
1550 */ 1565 */
1551 if (!fp->jited) { 1566 if (!fp->jited)
1552 fp = __sk_migrate_filter(fp, sk); 1567 fp = __sk_migrate_filter(fp, sk);
1553 1568
1554 /* Probe if internal BPF can be jit-ed */
1555 bpf_int_jit_compile(fp);
1556 }
1557 return fp; 1569 return fp;
1558} 1570}
1559 1571