aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/net
diff options
context:
space:
mode:
authorDaniel Borkmann <dborkman@redhat.com>2014-03-28 13:58:18 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-31 00:45:08 -0400
commitf8bbbfc3b97f4c7a6c7c23185e520b22bfc3a21d (patch)
treee5d935854be9ec9d0c3024fbe3316cfce9e68349 /arch/x86/net
parent64c27237a07129758e33f5f824ba5c33b7f57417 (diff)
net: filter: add jited flag to indicate jit compiled filters
This patch adds a jited flag into sk_filter struct in order to indicate whether a filter is currently jited or not. The size of sk_filter is not being expanded as the 32 bit 'len' member allows upper bits to be reused since a filter can currently only grow as large as BPF_MAXINSNS. Therefore, there's enough room also for other in future needed flags to reuse 'len' field if necessary. The jited flag also allows for having alternative interpreter functions running as currently, we can only detect jit compiled filters by testing fp->bpf_func to not equal the address of sk_run_filter(). Joint work with Alexei Starovoitov. Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: Daniel Borkmann <dborkman@redhat.com> Cc: Pablo Neira Ayuso <pablo@netfilter.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/x86/net')
-rw-r--r--arch/x86/net/bpf_jit_comp.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 293c57b74edc..dc017735bb91 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -772,6 +772,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
772 bpf_flush_icache(header, image + proglen); 772 bpf_flush_icache(header, image + proglen);
773 set_memory_ro((unsigned long)header, header->pages); 773 set_memory_ro((unsigned long)header, header->pages);
774 fp->bpf_func = (void *)image; 774 fp->bpf_func = (void *)image;
775 fp->jited = 1;
775 } 776 }
776out: 777out:
777 kfree(addrs); 778 kfree(addrs);
@@ -791,7 +792,7 @@ static void bpf_jit_free_deferred(struct work_struct *work)
791 792
792void bpf_jit_free(struct sk_filter *fp) 793void bpf_jit_free(struct sk_filter *fp)
793{ 794{
794 if (fp->bpf_func != sk_run_filter) { 795 if (fp->jited) {
795 INIT_WORK(&fp->work, bpf_jit_free_deferred); 796 INIT_WORK(&fp->work, bpf_jit_free_deferred);
796 schedule_work(&fp->work); 797 schedule_work(&fp->work);
797 } else { 798 } else {