aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-08-02 18:04:10 -0400
committerDavid S. Miller <davem@davemloft.net>2014-08-02 18:04:10 -0400
commite339756c9995648eecd015391f66baf2fd251fec (patch)
treedcb1aba57530e6c9426a81758173ca146ffafcaf
parent4330487acfff0cf1d7b14d238583a182e0a444bb (diff)
parent7ae457c1e5b45a1b826fad9d62b32191d2bdcfdb (diff)
Merge branch 'filter-next'
Alexei Starovoitov says: ==================== net: filter: split sk_filter into socket and bpf, cleanup names The main goal of the series is to split 'struct sk_filter' into socket and bpf parts and cleanup names in the following way: - everything that deals with sockets keeps 'sk_*' prefix - everything that is pure BPF is changed to 'bpf_*' prefix split 'struct sk_filter' into struct sk_filter { atomic_t refcnt; struct rcu_head rcu; struct bpf_prog *prog; }; and struct bpf_prog { u32 jited:1, len:31; struct sock_fprog_kern *orig_prog; unsigned int (*bpf_func)(const struct sk_buff *skb, const struct bpf_insn *filter); union { struct sock_filter insns[0]; struct bpf_insn insnsi[0]; struct work_struct work; }; }; so that 'struct bpf_prog' can be used independent of sockets and cleans up 'unattached' bpf use cases: isdn, ppp, team, seccomp, ptp, xt_bpf, cls_bpf, test_bpf which don't need refcnt/rcu fields. It's a follow up to the rcu cleanup started by Pablo in commit 34c5bd66e5 ("net: filter: don't release unattached filter through call_rcu()") Patch 1 - cleans up socket memory charging and makes it possible for functions sk(bpf)_migrate_filter(), sk(bpf)_prepare_filter() to be socket independent Patches 2-4 - trivial renames Patch 5 - sk_filter split and renames of related sk_*() functions ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/networking/filter.txt12
-rw-r--r--arch/arm/net/bpf_jit_32.c8
-rw-r--r--arch/mips/net/bpf_jit.c8
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c8
-rw-r--r--arch/s390/net/bpf_jit_comp.c4
-rw-r--r--arch/sparc/net/bpf_jit_comp.c4
-rw-r--r--arch/x86/net/bpf_jit_comp.c14
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c26
-rw-r--r--drivers/net/ppp/ppp_generic.c28
-rw-r--r--drivers/net/team/team_mode_loadbalance.c14
-rw-r--r--include/linux/filter.h51
-rw-r--r--include/linux/isdn_ppp.h4
-rw-r--r--include/uapi/linux/netfilter/xt_bpf.h4
-rw-r--r--kernel/bpf/core.c34
-rw-r--r--kernel/seccomp.c18
-rw-r--r--lib/test_bpf.c24
-rw-r--r--net/core/filter.c183
-rw-r--r--net/core/ptp_classifier.c6
-rw-r--r--net/core/sock.c9
-rw-r--r--net/core/sock_diag.c4
-rw-r--r--net/netfilter/xt_bpf.c6
-rw-r--r--net/sched/cls_bpf.c12
22 files changed, 243 insertions, 238 deletions
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt
index ee78eba78a9d..c48a9704bda8 100644
--- a/Documentation/networking/filter.txt
+++ b/Documentation/networking/filter.txt
@@ -586,12 +586,12 @@ team driver's classifier for its load-balancing mode, netfilter's xt_bpf
586extension, PTP dissector/classifier, and much more. They are all internally 586extension, PTP dissector/classifier, and much more. They are all internally
587converted by the kernel into the new instruction set representation and run 587converted by the kernel into the new instruction set representation and run
588in the eBPF interpreter. For in-kernel handlers, this all works transparently 588in the eBPF interpreter. For in-kernel handlers, this all works transparently
589by using sk_unattached_filter_create() for setting up the filter, resp. 589by using bpf_prog_create() for setting up the filter, resp.
590sk_unattached_filter_destroy() for destroying it. The macro 590bpf_prog_destroy() for destroying it. The macro
591SK_RUN_FILTER(filter, ctx) transparently invokes eBPF interpreter or JITed 591BPF_PROG_RUN(filter, ctx) transparently invokes eBPF interpreter or JITed
592code to run the filter. 'filter' is a pointer to struct sk_filter that we 592code to run the filter. 'filter' is a pointer to struct bpf_prog that we
593got from sk_unattached_filter_create(), and 'ctx' the given context (e.g. 593got from bpf_prog_create(), and 'ctx' the given context (e.g.
594skb pointer). All constraints and restrictions from sk_chk_filter() apply 594skb pointer). All constraints and restrictions from bpf_check_classic() apply
595before a conversion to the new layout is being done behind the scenes! 595before a conversion to the new layout is being done behind the scenes!
596 596
597Currently, the classic BPF format is being used for JITing on most of the 597Currently, the classic BPF format is being used for JITing on most of the
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index fb5503ce016f..a37b989a2f91 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -56,7 +56,7 @@
56#define FLAG_NEED_X_RESET (1 << 0) 56#define FLAG_NEED_X_RESET (1 << 0)
57 57
58struct jit_ctx { 58struct jit_ctx {
59 const struct sk_filter *skf; 59 const struct bpf_prog *skf;
60 unsigned idx; 60 unsigned idx;
61 unsigned prologue_bytes; 61 unsigned prologue_bytes;
62 int ret0_fp_idx; 62 int ret0_fp_idx;
@@ -465,7 +465,7 @@ static inline void update_on_xread(struct jit_ctx *ctx)
465static int build_body(struct jit_ctx *ctx) 465static int build_body(struct jit_ctx *ctx)
466{ 466{
467 void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w}; 467 void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
468 const struct sk_filter *prog = ctx->skf; 468 const struct bpf_prog *prog = ctx->skf;
469 const struct sock_filter *inst; 469 const struct sock_filter *inst;
470 unsigned i, load_order, off, condt; 470 unsigned i, load_order, off, condt;
471 int imm12; 471 int imm12;
@@ -857,7 +857,7 @@ b_epilogue:
857} 857}
858 858
859 859
860void bpf_jit_compile(struct sk_filter *fp) 860void bpf_jit_compile(struct bpf_prog *fp)
861{ 861{
862 struct jit_ctx ctx; 862 struct jit_ctx ctx;
863 unsigned tmp_idx; 863 unsigned tmp_idx;
@@ -926,7 +926,7 @@ out:
926 return; 926 return;
927} 927}
928 928
929void bpf_jit_free(struct sk_filter *fp) 929void bpf_jit_free(struct bpf_prog *fp)
930{ 930{
931 if (fp->jited) 931 if (fp->jited)
932 module_free(NULL, fp->bpf_func); 932 module_free(NULL, fp->bpf_func);
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index b87390a56a2f..05a56619ece2 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -131,7 +131,7 @@
131 * @target: Memory location for the compiled filter 131 * @target: Memory location for the compiled filter
132 */ 132 */
133struct jit_ctx { 133struct jit_ctx {
134 const struct sk_filter *skf; 134 const struct bpf_prog *skf;
135 unsigned int prologue_bytes; 135 unsigned int prologue_bytes;
136 u32 idx; 136 u32 idx;
137 u32 flags; 137 u32 flags;
@@ -789,7 +789,7 @@ static int pkt_type_offset(void)
789static int build_body(struct jit_ctx *ctx) 789static int build_body(struct jit_ctx *ctx)
790{ 790{
791 void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w}; 791 void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
792 const struct sk_filter *prog = ctx->skf; 792 const struct bpf_prog *prog = ctx->skf;
793 const struct sock_filter *inst; 793 const struct sock_filter *inst;
794 unsigned int i, off, load_order, condt; 794 unsigned int i, off, load_order, condt;
795 u32 k, b_off __maybe_unused; 795 u32 k, b_off __maybe_unused;
@@ -1369,7 +1369,7 @@ jmp_cmp:
1369 1369
1370int bpf_jit_enable __read_mostly; 1370int bpf_jit_enable __read_mostly;
1371 1371
1372void bpf_jit_compile(struct sk_filter *fp) 1372void bpf_jit_compile(struct bpf_prog *fp)
1373{ 1373{
1374 struct jit_ctx ctx; 1374 struct jit_ctx ctx;
1375 unsigned int alloc_size, tmp_idx; 1375 unsigned int alloc_size, tmp_idx;
@@ -1423,7 +1423,7 @@ out:
1423 kfree(ctx.offsets); 1423 kfree(ctx.offsets);
1424} 1424}
1425 1425
1426void bpf_jit_free(struct sk_filter *fp) 1426void bpf_jit_free(struct bpf_prog *fp)
1427{ 1427{
1428 if (fp->jited) 1428 if (fp->jited)
1429 module_free(NULL, fp->bpf_func); 1429 module_free(NULL, fp->bpf_func);
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 82e82cadcde5..3afa6f4c1957 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -25,7 +25,7 @@ static inline void bpf_flush_icache(void *start, void *end)
25 flush_icache_range((unsigned long)start, (unsigned long)end); 25 flush_icache_range((unsigned long)start, (unsigned long)end);
26} 26}
27 27
28static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image, 28static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
29 struct codegen_context *ctx) 29 struct codegen_context *ctx)
30{ 30{
31 int i; 31 int i;
@@ -121,7 +121,7 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
121 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) 121 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
122 122
123/* Assemble the body code between the prologue & epilogue. */ 123/* Assemble the body code between the prologue & epilogue. */
124static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, 124static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
125 struct codegen_context *ctx, 125 struct codegen_context *ctx,
126 unsigned int *addrs) 126 unsigned int *addrs)
127{ 127{
@@ -569,7 +569,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
569 return 0; 569 return 0;
570} 570}
571 571
572void bpf_jit_compile(struct sk_filter *fp) 572void bpf_jit_compile(struct bpf_prog *fp)
573{ 573{
574 unsigned int proglen; 574 unsigned int proglen;
575 unsigned int alloclen; 575 unsigned int alloclen;
@@ -693,7 +693,7 @@ out:
693 return; 693 return;
694} 694}
695 695
696void bpf_jit_free(struct sk_filter *fp) 696void bpf_jit_free(struct bpf_prog *fp)
697{ 697{
698 if (fp->jited) 698 if (fp->jited)
699 module_free(NULL, fp->bpf_func); 699 module_free(NULL, fp->bpf_func);
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index a2cbd875543a..61e45b7c04d7 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -812,7 +812,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
812 return header; 812 return header;
813} 813}
814 814
815void bpf_jit_compile(struct sk_filter *fp) 815void bpf_jit_compile(struct bpf_prog *fp)
816{ 816{
817 struct bpf_binary_header *header = NULL; 817 struct bpf_binary_header *header = NULL;
818 unsigned long size, prg_len, lit_len; 818 unsigned long size, prg_len, lit_len;
@@ -875,7 +875,7 @@ out:
875 kfree(addrs); 875 kfree(addrs);
876} 876}
877 877
878void bpf_jit_free(struct sk_filter *fp) 878void bpf_jit_free(struct bpf_prog *fp)
879{ 879{
880 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; 880 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
881 struct bpf_binary_header *header = (void *)addr; 881 struct bpf_binary_header *header = (void *)addr;
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index 892a102671ad..1f76c22a6a75 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -354,7 +354,7 @@ do { *prog++ = BR_OPC | WDISP22(OFF); \
354 * emit_jump() calls with adjusted offsets. 354 * emit_jump() calls with adjusted offsets.
355 */ 355 */
356 356
357void bpf_jit_compile(struct sk_filter *fp) 357void bpf_jit_compile(struct bpf_prog *fp)
358{ 358{
359 unsigned int cleanup_addr, proglen, oldproglen = 0; 359 unsigned int cleanup_addr, proglen, oldproglen = 0;
360 u32 temp[8], *prog, *func, seen = 0, pass; 360 u32 temp[8], *prog, *func, seen = 0, pass;
@@ -808,7 +808,7 @@ out:
808 return; 808 return;
809} 809}
810 810
811void bpf_jit_free(struct sk_filter *fp) 811void bpf_jit_free(struct bpf_prog *fp)
812{ 812{
813 if (fp->jited) 813 if (fp->jited)
814 module_free(NULL, fp->bpf_func); 814 module_free(NULL, fp->bpf_func);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 71737a83f022..5c8cb8043c5a 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -211,7 +211,7 @@ struct jit_context {
211 bool seen_ld_abs; 211 bool seen_ld_abs;
212}; 212};
213 213
214static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, 214static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
215 int oldproglen, struct jit_context *ctx) 215 int oldproglen, struct jit_context *ctx)
216{ 216{
217 struct bpf_insn *insn = bpf_prog->insnsi; 217 struct bpf_insn *insn = bpf_prog->insnsi;
@@ -235,7 +235,7 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
235 /* mov qword ptr [rbp-X],rbx */ 235 /* mov qword ptr [rbp-X],rbx */
236 EMIT3_off32(0x48, 0x89, 0x9D, -stacksize); 236 EMIT3_off32(0x48, 0x89, 0x9D, -stacksize);
237 237
238 /* sk_convert_filter() maps classic BPF register X to R7 and uses R8 238 /* bpf_convert_filter() maps classic BPF register X to R7 and uses R8
239 * as temporary, so all tcpdump filters need to spill/fill R7(r13) and 239 * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
240 * R8(r14). R9(r15) spill could be made conditional, but there is only 240 * R8(r14). R9(r15) spill could be made conditional, but there is only
241 * one 'bpf_error' return path out of helper functions inside bpf_jit.S 241 * one 'bpf_error' return path out of helper functions inside bpf_jit.S
@@ -841,7 +841,7 @@ common_load: ctx->seen_ld_abs = true;
841 /* By design x64 JIT should support all BPF instructions 841 /* By design x64 JIT should support all BPF instructions
842 * This error will be seen if new instruction was added 842 * This error will be seen if new instruction was added
843 * to interpreter, but not to JIT 843 * to interpreter, but not to JIT
844 * or if there is junk in sk_filter 844 * or if there is junk in bpf_prog
845 */ 845 */
846 pr_err("bpf_jit: unknown opcode %02x\n", insn->code); 846 pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
847 return -EINVAL; 847 return -EINVAL;
@@ -862,11 +862,11 @@ common_load: ctx->seen_ld_abs = true;
862 return proglen; 862 return proglen;
863} 863}
864 864
865void bpf_jit_compile(struct sk_filter *prog) 865void bpf_jit_compile(struct bpf_prog *prog)
866{ 866{
867} 867}
868 868
869void bpf_int_jit_compile(struct sk_filter *prog) 869void bpf_int_jit_compile(struct bpf_prog *prog)
870{ 870{
871 struct bpf_binary_header *header = NULL; 871 struct bpf_binary_header *header = NULL;
872 int proglen, oldproglen = 0; 872 int proglen, oldproglen = 0;
@@ -932,7 +932,7 @@ out:
932 932
933static void bpf_jit_free_deferred(struct work_struct *work) 933static void bpf_jit_free_deferred(struct work_struct *work)
934{ 934{
935 struct sk_filter *fp = container_of(work, struct sk_filter, work); 935 struct bpf_prog *fp = container_of(work, struct bpf_prog, work);
936 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; 936 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
937 struct bpf_binary_header *header = (void *)addr; 937 struct bpf_binary_header *header = (void *)addr;
938 938
@@ -941,7 +941,7 @@ static void bpf_jit_free_deferred(struct work_struct *work)
941 kfree(fp); 941 kfree(fp);
942} 942}
943 943
944void bpf_jit_free(struct sk_filter *fp) 944void bpf_jit_free(struct bpf_prog *fp)
945{ 945{
946 if (fp->jited) { 946 if (fp->jited) {
947 INIT_WORK(&fp->work, bpf_jit_free_deferred); 947 INIT_WORK(&fp->work, bpf_jit_free_deferred);
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 62f0688d45a5..c4198fa490bf 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -379,12 +379,12 @@ isdn_ppp_release(int min, struct file *file)
379#endif 379#endif
380#ifdef CONFIG_IPPP_FILTER 380#ifdef CONFIG_IPPP_FILTER
381 if (is->pass_filter) { 381 if (is->pass_filter) {
382 sk_unattached_filter_destroy(is->pass_filter); 382 bpf_prog_destroy(is->pass_filter);
383 is->pass_filter = NULL; 383 is->pass_filter = NULL;
384 } 384 }
385 385
386 if (is->active_filter) { 386 if (is->active_filter) {
387 sk_unattached_filter_destroy(is->active_filter); 387 bpf_prog_destroy(is->active_filter);
388 is->active_filter = NULL; 388 is->active_filter = NULL;
389 } 389 }
390#endif 390#endif
@@ -639,12 +639,11 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
639 fprog.filter = code; 639 fprog.filter = code;
640 640
641 if (is->pass_filter) { 641 if (is->pass_filter) {
642 sk_unattached_filter_destroy(is->pass_filter); 642 bpf_prog_destroy(is->pass_filter);
643 is->pass_filter = NULL; 643 is->pass_filter = NULL;
644 } 644 }
645 if (fprog.filter != NULL) 645 if (fprog.filter != NULL)
646 err = sk_unattached_filter_create(&is->pass_filter, 646 err = bpf_prog_create(&is->pass_filter, &fprog);
647 &fprog);
648 else 647 else
649 err = 0; 648 err = 0;
650 kfree(code); 649 kfree(code);
@@ -664,12 +663,11 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
664 fprog.filter = code; 663 fprog.filter = code;
665 664
666 if (is->active_filter) { 665 if (is->active_filter) {
667 sk_unattached_filter_destroy(is->active_filter); 666 bpf_prog_destroy(is->active_filter);
668 is->active_filter = NULL; 667 is->active_filter = NULL;
669 } 668 }
670 if (fprog.filter != NULL) 669 if (fprog.filter != NULL)
671 err = sk_unattached_filter_create(&is->active_filter, 670 err = bpf_prog_create(&is->active_filter, &fprog);
672 &fprog);
673 else 671 else
674 err = 0; 672 err = 0;
675 kfree(code); 673 kfree(code);
@@ -1174,14 +1172,14 @@ isdn_ppp_push_higher(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *
1174 } 1172 }
1175 1173
1176 if (is->pass_filter 1174 if (is->pass_filter
1177 && SK_RUN_FILTER(is->pass_filter, skb) == 0) { 1175 && BPF_PROG_RUN(is->pass_filter, skb) == 0) {
1178 if (is->debug & 0x2) 1176 if (is->debug & 0x2)
1179 printk(KERN_DEBUG "IPPP: inbound frame filtered.\n"); 1177 printk(KERN_DEBUG "IPPP: inbound frame filtered.\n");
1180 kfree_skb(skb); 1178 kfree_skb(skb);
1181 return; 1179 return;
1182 } 1180 }
1183 if (!(is->active_filter 1181 if (!(is->active_filter
1184 && SK_RUN_FILTER(is->active_filter, skb) == 0)) { 1182 && BPF_PROG_RUN(is->active_filter, skb) == 0)) {
1185 if (is->debug & 0x2) 1183 if (is->debug & 0x2)
1186 printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n"); 1184 printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
1187 lp->huptimer = 0; 1185 lp->huptimer = 0;
@@ -1320,14 +1318,14 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
1320 } 1318 }
1321 1319
1322 if (ipt->pass_filter 1320 if (ipt->pass_filter
1323 && SK_RUN_FILTER(ipt->pass_filter, skb) == 0) { 1321 && BPF_PROG_RUN(ipt->pass_filter, skb) == 0) {
1324 if (ipt->debug & 0x4) 1322 if (ipt->debug & 0x4)
1325 printk(KERN_DEBUG "IPPP: outbound frame filtered.\n"); 1323 printk(KERN_DEBUG "IPPP: outbound frame filtered.\n");
1326 kfree_skb(skb); 1324 kfree_skb(skb);
1327 goto unlock; 1325 goto unlock;
1328 } 1326 }
1329 if (!(ipt->active_filter 1327 if (!(ipt->active_filter
1330 && SK_RUN_FILTER(ipt->active_filter, skb) == 0)) { 1328 && BPF_PROG_RUN(ipt->active_filter, skb) == 0)) {
1331 if (ipt->debug & 0x4) 1329 if (ipt->debug & 0x4)
1332 printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n"); 1330 printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
1333 lp->huptimer = 0; 1331 lp->huptimer = 0;
@@ -1517,9 +1515,9 @@ int isdn_ppp_autodial_filter(struct sk_buff *skb, isdn_net_local *lp)
1517 } 1515 }
1518 1516
1519 drop |= is->pass_filter 1517 drop |= is->pass_filter
1520 && SK_RUN_FILTER(is->pass_filter, skb) == 0; 1518 && BPF_PROG_RUN(is->pass_filter, skb) == 0;
1521 drop |= is->active_filter 1519 drop |= is->active_filter
1522 && SK_RUN_FILTER(is->active_filter, skb) == 0; 1520 && BPF_PROG_RUN(is->active_filter, skb) == 0;
1523 1521
1524 skb_push(skb, IPPP_MAX_HEADER - 4); 1522 skb_push(skb, IPPP_MAX_HEADER - 4);
1525 return drop; 1523 return drop;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 765248b42a0a..fa0d71727894 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -143,8 +143,8 @@ struct ppp {
143 struct sk_buff_head mrq; /* MP: receive reconstruction queue */ 143 struct sk_buff_head mrq; /* MP: receive reconstruction queue */
144#endif /* CONFIG_PPP_MULTILINK */ 144#endif /* CONFIG_PPP_MULTILINK */
145#ifdef CONFIG_PPP_FILTER 145#ifdef CONFIG_PPP_FILTER
146 struct sk_filter *pass_filter; /* filter for packets to pass */ 146 struct bpf_prog *pass_filter; /* filter for packets to pass */
147 struct sk_filter *active_filter;/* filter for pkts to reset idle */ 147 struct bpf_prog *active_filter; /* filter for pkts to reset idle */
148#endif /* CONFIG_PPP_FILTER */ 148#endif /* CONFIG_PPP_FILTER */
149 struct net *ppp_net; /* the net we belong to */ 149 struct net *ppp_net; /* the net we belong to */
150 struct ppp_link_stats stats64; /* 64 bit network stats */ 150 struct ppp_link_stats stats64; /* 64 bit network stats */
@@ -762,12 +762,12 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
762 762
763 ppp_lock(ppp); 763 ppp_lock(ppp);
764 if (ppp->pass_filter) { 764 if (ppp->pass_filter) {
765 sk_unattached_filter_destroy(ppp->pass_filter); 765 bpf_prog_destroy(ppp->pass_filter);
766 ppp->pass_filter = NULL; 766 ppp->pass_filter = NULL;
767 } 767 }
768 if (fprog.filter != NULL) 768 if (fprog.filter != NULL)
769 err = sk_unattached_filter_create(&ppp->pass_filter, 769 err = bpf_prog_create(&ppp->pass_filter,
770 &fprog); 770 &fprog);
771 else 771 else
772 err = 0; 772 err = 0;
773 kfree(code); 773 kfree(code);
@@ -788,12 +788,12 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
788 788
789 ppp_lock(ppp); 789 ppp_lock(ppp);
790 if (ppp->active_filter) { 790 if (ppp->active_filter) {
791 sk_unattached_filter_destroy(ppp->active_filter); 791 bpf_prog_destroy(ppp->active_filter);
792 ppp->active_filter = NULL; 792 ppp->active_filter = NULL;
793 } 793 }
794 if (fprog.filter != NULL) 794 if (fprog.filter != NULL)
795 err = sk_unattached_filter_create(&ppp->active_filter, 795 err = bpf_prog_create(&ppp->active_filter,
796 &fprog); 796 &fprog);
797 else 797 else
798 err = 0; 798 err = 0;
799 kfree(code); 799 kfree(code);
@@ -1205,7 +1205,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1205 a four-byte PPP header on each packet */ 1205 a four-byte PPP header on each packet */
1206 *skb_push(skb, 2) = 1; 1206 *skb_push(skb, 2) = 1;
1207 if (ppp->pass_filter && 1207 if (ppp->pass_filter &&
1208 SK_RUN_FILTER(ppp->pass_filter, skb) == 0) { 1208 BPF_PROG_RUN(ppp->pass_filter, skb) == 0) {
1209 if (ppp->debug & 1) 1209 if (ppp->debug & 1)
1210 netdev_printk(KERN_DEBUG, ppp->dev, 1210 netdev_printk(KERN_DEBUG, ppp->dev,
1211 "PPP: outbound frame " 1211 "PPP: outbound frame "
@@ -1215,7 +1215,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1215 } 1215 }
1216 /* if this packet passes the active filter, record the time */ 1216 /* if this packet passes the active filter, record the time */
1217 if (!(ppp->active_filter && 1217 if (!(ppp->active_filter &&
1218 SK_RUN_FILTER(ppp->active_filter, skb) == 0)) 1218 BPF_PROG_RUN(ppp->active_filter, skb) == 0))
1219 ppp->last_xmit = jiffies; 1219 ppp->last_xmit = jiffies;
1220 skb_pull(skb, 2); 1220 skb_pull(skb, 2);
1221#else 1221#else
@@ -1839,7 +1839,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1839 1839
1840 *skb_push(skb, 2) = 0; 1840 *skb_push(skb, 2) = 0;
1841 if (ppp->pass_filter && 1841 if (ppp->pass_filter &&
1842 SK_RUN_FILTER(ppp->pass_filter, skb) == 0) { 1842 BPF_PROG_RUN(ppp->pass_filter, skb) == 0) {
1843 if (ppp->debug & 1) 1843 if (ppp->debug & 1)
1844 netdev_printk(KERN_DEBUG, ppp->dev, 1844 netdev_printk(KERN_DEBUG, ppp->dev,
1845 "PPP: inbound frame " 1845 "PPP: inbound frame "
@@ -1848,7 +1848,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1848 return; 1848 return;
1849 } 1849 }
1850 if (!(ppp->active_filter && 1850 if (!(ppp->active_filter &&
1851 SK_RUN_FILTER(ppp->active_filter, skb) == 0)) 1851 BPF_PROG_RUN(ppp->active_filter, skb) == 0))
1852 ppp->last_recv = jiffies; 1852 ppp->last_recv = jiffies;
1853 __skb_pull(skb, 2); 1853 __skb_pull(skb, 2);
1854 } else 1854 } else
@@ -2829,12 +2829,12 @@ static void ppp_destroy_interface(struct ppp *ppp)
2829#endif /* CONFIG_PPP_MULTILINK */ 2829#endif /* CONFIG_PPP_MULTILINK */
2830#ifdef CONFIG_PPP_FILTER 2830#ifdef CONFIG_PPP_FILTER
2831 if (ppp->pass_filter) { 2831 if (ppp->pass_filter) {
2832 sk_unattached_filter_destroy(ppp->pass_filter); 2832 bpf_prog_destroy(ppp->pass_filter);
2833 ppp->pass_filter = NULL; 2833 ppp->pass_filter = NULL;
2834 } 2834 }
2835 2835
2836 if (ppp->active_filter) { 2836 if (ppp->active_filter) {
2837 sk_unattached_filter_destroy(ppp->active_filter); 2837 bpf_prog_destroy(ppp->active_filter);
2838 ppp->active_filter = NULL; 2838 ppp->active_filter = NULL;
2839 } 2839 }
2840#endif /* CONFIG_PPP_FILTER */ 2840#endif /* CONFIG_PPP_FILTER */
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index d7be9b36bce6..a1536d0d83a9 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -58,7 +58,7 @@ struct lb_priv_ex {
58}; 58};
59 59
60struct lb_priv { 60struct lb_priv {
61 struct sk_filter __rcu *fp; 61 struct bpf_prog __rcu *fp;
62 lb_select_tx_port_func_t __rcu *select_tx_port_func; 62 lb_select_tx_port_func_t __rcu *select_tx_port_func;
63 struct lb_pcpu_stats __percpu *pcpu_stats; 63 struct lb_pcpu_stats __percpu *pcpu_stats;
64 struct lb_priv_ex *ex; /* priv extension */ 64 struct lb_priv_ex *ex; /* priv extension */
@@ -174,14 +174,14 @@ static lb_select_tx_port_func_t *lb_select_tx_port_get_func(const char *name)
174static unsigned int lb_get_skb_hash(struct lb_priv *lb_priv, 174static unsigned int lb_get_skb_hash(struct lb_priv *lb_priv,
175 struct sk_buff *skb) 175 struct sk_buff *skb)
176{ 176{
177 struct sk_filter *fp; 177 struct bpf_prog *fp;
178 uint32_t lhash; 178 uint32_t lhash;
179 unsigned char *c; 179 unsigned char *c;
180 180
181 fp = rcu_dereference_bh(lb_priv->fp); 181 fp = rcu_dereference_bh(lb_priv->fp);
182 if (unlikely(!fp)) 182 if (unlikely(!fp))
183 return 0; 183 return 0;
184 lhash = SK_RUN_FILTER(fp, skb); 184 lhash = BPF_PROG_RUN(fp, skb);
185 c = (char *) &lhash; 185 c = (char *) &lhash;
186 return c[0] ^ c[1] ^ c[2] ^ c[3]; 186 return c[0] ^ c[1] ^ c[2] ^ c[3];
187} 187}
@@ -271,8 +271,8 @@ static void __fprog_destroy(struct sock_fprog_kern *fprog)
271static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx) 271static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
272{ 272{
273 struct lb_priv *lb_priv = get_lb_priv(team); 273 struct lb_priv *lb_priv = get_lb_priv(team);
274 struct sk_filter *fp = NULL; 274 struct bpf_prog *fp = NULL;
275 struct sk_filter *orig_fp = NULL; 275 struct bpf_prog *orig_fp = NULL;
276 struct sock_fprog_kern *fprog = NULL; 276 struct sock_fprog_kern *fprog = NULL;
277 int err; 277 int err;
278 278
@@ -281,7 +281,7 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
281 ctx->data.bin_val.ptr); 281 ctx->data.bin_val.ptr);
282 if (err) 282 if (err)
283 return err; 283 return err;
284 err = sk_unattached_filter_create(&fp, fprog); 284 err = bpf_prog_create(&fp, fprog);
285 if (err) { 285 if (err) {
286 __fprog_destroy(fprog); 286 __fprog_destroy(fprog);
287 return err; 287 return err;
@@ -300,7 +300,7 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
300 300
301 if (orig_fp) { 301 if (orig_fp) {
302 synchronize_rcu(); 302 synchronize_rcu();
303 sk_unattached_filter_destroy(orig_fp); 303 bpf_prog_destroy(orig_fp);
304 } 304 }
305 return 0; 305 return 0;
306} 306}
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 20dd50ef7271..a5227ab8ccb1 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -296,7 +296,8 @@ enum {
296}) 296})
297 297
298/* Macro to invoke filter function. */ 298/* Macro to invoke filter function. */
299#define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) 299#define SK_RUN_FILTER(filter, ctx) \
300 (*filter->prog->bpf_func)(ctx, filter->prog->insnsi)
300 301
301struct bpf_insn { 302struct bpf_insn {
302 __u8 code; /* opcode */ 303 __u8 code; /* opcode */
@@ -323,12 +324,10 @@ struct sk_buff;
323struct sock; 324struct sock;
324struct seccomp_data; 325struct seccomp_data;
325 326
326struct sk_filter { 327struct bpf_prog {
327 atomic_t refcnt;
328 u32 jited:1, /* Is our filter JIT'ed? */ 328 u32 jited:1, /* Is our filter JIT'ed? */
329 len:31; /* Number of filter blocks */ 329 len:31; /* Number of filter blocks */
330 struct sock_fprog_kern *orig_prog; /* Original BPF program */ 330 struct sock_fprog_kern *orig_prog; /* Original BPF program */
331 struct rcu_head rcu;
332 unsigned int (*bpf_func)(const struct sk_buff *skb, 331 unsigned int (*bpf_func)(const struct sk_buff *skb,
333 const struct bpf_insn *filter); 332 const struct bpf_insn *filter);
334 union { 333 union {
@@ -338,39 +337,45 @@ struct sk_filter {
338 }; 337 };
339}; 338};
340 339
341static inline unsigned int sk_filter_size(unsigned int proglen) 340struct sk_filter {
341 atomic_t refcnt;
342 struct rcu_head rcu;
343 struct bpf_prog *prog;
344};
345
346#define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi)
347
348static inline unsigned int bpf_prog_size(unsigned int proglen)
342{ 349{
343 return max(sizeof(struct sk_filter), 350 return max(sizeof(struct bpf_prog),
344 offsetof(struct sk_filter, insns[proglen])); 351 offsetof(struct bpf_prog, insns[proglen]));
345} 352}
346 353
347#define sk_filter_proglen(fprog) \ 354#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
348 (fprog->len * sizeof(fprog->filter[0]))
349 355
350int sk_filter(struct sock *sk, struct sk_buff *skb); 356int sk_filter(struct sock *sk, struct sk_buff *skb);
351 357
352void sk_filter_select_runtime(struct sk_filter *fp); 358void bpf_prog_select_runtime(struct bpf_prog *fp);
353void sk_filter_free(struct sk_filter *fp); 359void bpf_prog_free(struct bpf_prog *fp);
354 360
355int sk_convert_filter(struct sock_filter *prog, int len, 361int bpf_convert_filter(struct sock_filter *prog, int len,
356 struct bpf_insn *new_prog, int *new_len); 362 struct bpf_insn *new_prog, int *new_len);
357 363
358int sk_unattached_filter_create(struct sk_filter **pfp, 364int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
359 struct sock_fprog_kern *fprog); 365void bpf_prog_destroy(struct bpf_prog *fp);
360void sk_unattached_filter_destroy(struct sk_filter *fp);
361 366
362int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); 367int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
363int sk_detach_filter(struct sock *sk); 368int sk_detach_filter(struct sock *sk);
364 369
365int sk_chk_filter(const struct sock_filter *filter, unsigned int flen); 370int bpf_check_classic(const struct sock_filter *filter, unsigned int flen);
366int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, 371int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
367 unsigned int len); 372 unsigned int len);
368 373
369void sk_filter_charge(struct sock *sk, struct sk_filter *fp); 374bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
370void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); 375void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
371 376
372u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 377u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
373void bpf_int_jit_compile(struct sk_filter *fp); 378void bpf_int_jit_compile(struct bpf_prog *fp);
374 379
375#define BPF_ANC BIT(15) 380#define BPF_ANC BIT(15)
376 381
@@ -424,8 +429,8 @@ static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
424#include <linux/linkage.h> 429#include <linux/linkage.h>
425#include <linux/printk.h> 430#include <linux/printk.h>
426 431
427void bpf_jit_compile(struct sk_filter *fp); 432void bpf_jit_compile(struct bpf_prog *fp);
428void bpf_jit_free(struct sk_filter *fp); 433void bpf_jit_free(struct bpf_prog *fp);
429 434
430static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, 435static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
431 u32 pass, void *image) 436 u32 pass, void *image)
@@ -439,11 +444,11 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
439#else 444#else
440#include <linux/slab.h> 445#include <linux/slab.h>
441 446
442static inline void bpf_jit_compile(struct sk_filter *fp) 447static inline void bpf_jit_compile(struct bpf_prog *fp)
443{ 448{
444} 449}
445 450
446static inline void bpf_jit_free(struct sk_filter *fp) 451static inline void bpf_jit_free(struct bpf_prog *fp)
447{ 452{
448 kfree(fp); 453 kfree(fp);
449} 454}
diff --git a/include/linux/isdn_ppp.h b/include/linux/isdn_ppp.h
index 8e10f57f109f..a0070c6dfaf8 100644
--- a/include/linux/isdn_ppp.h
+++ b/include/linux/isdn_ppp.h
@@ -180,8 +180,8 @@ struct ippp_struct {
180 struct slcompress *slcomp; 180 struct slcompress *slcomp;
181#endif 181#endif
182#ifdef CONFIG_IPPP_FILTER 182#ifdef CONFIG_IPPP_FILTER
183 struct sk_filter *pass_filter; /* filter for packets to pass */ 183 struct bpf_prog *pass_filter; /* filter for packets to pass */
184 struct sk_filter *active_filter; /* filter for pkts to reset idle */ 184 struct bpf_prog *active_filter; /* filter for pkts to reset idle */
185#endif 185#endif
186 unsigned long debug; 186 unsigned long debug;
187 struct isdn_ppp_compressor *compressor,*decompressor; 187 struct isdn_ppp_compressor *compressor,*decompressor;
diff --git a/include/uapi/linux/netfilter/xt_bpf.h b/include/uapi/linux/netfilter/xt_bpf.h
index 2ec9fbcd06f9..1fad2c27ac32 100644
--- a/include/uapi/linux/netfilter/xt_bpf.h
+++ b/include/uapi/linux/netfilter/xt_bpf.h
@@ -6,14 +6,14 @@
6 6
7#define XT_BPF_MAX_NUM_INSTR 64 7#define XT_BPF_MAX_NUM_INSTR 64
8 8
9struct sk_filter; 9struct bpf_prog;
10 10
11struct xt_bpf_info { 11struct xt_bpf_info {
12 __u16 bpf_program_num_elem; 12 __u16 bpf_program_num_elem;
13 struct sock_filter bpf_program[XT_BPF_MAX_NUM_INSTR]; 13 struct sock_filter bpf_program[XT_BPF_MAX_NUM_INSTR];
14 14
15 /* only used in the kernel */ 15 /* only used in the kernel */
16 struct sk_filter *filter __attribute__((aligned(8))); 16 struct bpf_prog *filter __attribute__((aligned(8)));
17}; 17};
18 18
19#endif /*_XT_BPF_H */ 19#endif /*_XT_BPF_H */
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 265a02cc822d..7f0dbcbb34af 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -18,7 +18,7 @@
18 * 2 of the License, or (at your option) any later version. 18 * 2 of the License, or (at your option) any later version.
19 * 19 *
20 * Andi Kleen - Fix a few bad bugs and races. 20 * Andi Kleen - Fix a few bad bugs and races.
21 * Kris Katterjohn - Added many additional checks in sk_chk_filter() 21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
22 */ 22 */
23#include <linux/filter.h> 23#include <linux/filter.h>
24#include <linux/skbuff.h> 24#include <linux/skbuff.h>
@@ -73,15 +73,13 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
73} 73}
74 74
75/** 75/**
76 * __sk_run_filter - run a filter on a given context 76 * __bpf_prog_run - run eBPF program on a given context
77 * @ctx: buffer to run the filter on 77 * @ctx: is the data we are operating on
78 * @insn: filter to apply 78 * @insn: is the array of eBPF instructions
79 * 79 *
80 * Decode and apply filter instructions to the skb->data. Return length to 80 * Decode and execute eBPF instructions.
81 * keep, 0 for none. @ctx is the data we are operating on, @insn is the
82 * array of filter instructions.
83 */ 81 */
84static unsigned int __sk_run_filter(void *ctx, const struct bpf_insn *insn) 82static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
85{ 83{
86 u64 stack[MAX_BPF_STACK / sizeof(u64)]; 84 u64 stack[MAX_BPF_STACK / sizeof(u64)];
87 u64 regs[MAX_BPF_REG], tmp; 85 u64 regs[MAX_BPF_REG], tmp;
@@ -446,7 +444,7 @@ load_word:
446 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are 444 /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
447 * only appearing in the programs where ctx == 445 * only appearing in the programs where ctx ==
448 * skb. All programs keep 'ctx' in regs[BPF_REG_CTX] 446 * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
449 * == BPF_R6, sk_convert_filter() saves it in BPF_R6, 447 * == BPF_R6, bpf_convert_filter() saves it in BPF_R6,
450 * internal BPF verifier will check that BPF_R6 == 448 * internal BPF verifier will check that BPF_R6 ==
451 * ctx. 449 * ctx.
452 * 450 *
@@ -508,29 +506,29 @@ load_byte:
508 return 0; 506 return 0;
509} 507}
510 508
511void __weak bpf_int_jit_compile(struct sk_filter *prog) 509void __weak bpf_int_jit_compile(struct bpf_prog *prog)
512{ 510{
513} 511}
514 512
515/** 513/**
516 * sk_filter_select_runtime - select execution runtime for BPF program 514 * bpf_prog_select_runtime - select execution runtime for BPF program
517 * @fp: sk_filter populated with internal BPF program 515 * @fp: bpf_prog populated with internal BPF program
518 * 516 *
519 * try to JIT internal BPF program, if JIT is not available select interpreter 517 * try to JIT internal BPF program, if JIT is not available select interpreter
520 * BPF program will be executed via SK_RUN_FILTER() macro 518 * BPF program will be executed via BPF_PROG_RUN() macro
521 */ 519 */
522void sk_filter_select_runtime(struct sk_filter *fp) 520void bpf_prog_select_runtime(struct bpf_prog *fp)
523{ 521{
524 fp->bpf_func = (void *) __sk_run_filter; 522 fp->bpf_func = (void *) __bpf_prog_run;
525 523
526 /* Probe if internal BPF can be JITed */ 524 /* Probe if internal BPF can be JITed */
527 bpf_int_jit_compile(fp); 525 bpf_int_jit_compile(fp);
528} 526}
529EXPORT_SYMBOL_GPL(sk_filter_select_runtime); 527EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
530 528
531/* free internal BPF program */ 529/* free internal BPF program */
532void sk_filter_free(struct sk_filter *fp) 530void bpf_prog_free(struct bpf_prog *fp)
533{ 531{
534 bpf_jit_free(fp); 532 bpf_jit_free(fp);
535} 533}
536EXPORT_SYMBOL_GPL(sk_filter_free); 534EXPORT_SYMBOL_GPL(bpf_prog_free);
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 565743db5384..2f3fa2cc2eac 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -54,7 +54,7 @@
54struct seccomp_filter { 54struct seccomp_filter {
55 atomic_t usage; 55 atomic_t usage;
56 struct seccomp_filter *prev; 56 struct seccomp_filter *prev;
57 struct sk_filter *prog; 57 struct bpf_prog *prog;
58}; 58};
59 59
60/* Limit any path through the tree to 256KB worth of instructions. */ 60/* Limit any path through the tree to 256KB worth of instructions. */
@@ -87,7 +87,7 @@ static void populate_seccomp_data(struct seccomp_data *sd)
87 * @filter: filter to verify 87 * @filter: filter to verify
88 * @flen: length of filter 88 * @flen: length of filter
89 * 89 *
90 * Takes a previously checked filter (by sk_chk_filter) and 90 * Takes a previously checked filter (by bpf_check_classic) and
91 * redirects all filter code that loads struct sk_buff data 91 * redirects all filter code that loads struct sk_buff data
92 * and related data through seccomp_bpf_load. It also 92 * and related data through seccomp_bpf_load. It also
93 * enforces length and alignment checking of those loads. 93 * enforces length and alignment checking of those loads.
@@ -187,7 +187,7 @@ static u32 seccomp_run_filters(int syscall)
187 * value always takes priority (ignoring the DATA). 187 * value always takes priority (ignoring the DATA).
188 */ 188 */
189 for (f = current->seccomp.filter; f; f = f->prev) { 189 for (f = current->seccomp.filter; f; f = f->prev) {
190 u32 cur_ret = SK_RUN_FILTER(f->prog, (void *)&sd); 190 u32 cur_ret = BPF_PROG_RUN(f->prog, (void *)&sd);
191 191
192 if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION)) 192 if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
193 ret = cur_ret; 193 ret = cur_ret;
@@ -239,7 +239,7 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
239 goto free_prog; 239 goto free_prog;
240 240
241 /* Check and rewrite the fprog via the skb checker */ 241 /* Check and rewrite the fprog via the skb checker */
242 ret = sk_chk_filter(fp, fprog->len); 242 ret = bpf_check_classic(fp, fprog->len);
243 if (ret) 243 if (ret)
244 goto free_prog; 244 goto free_prog;
245 245
@@ -249,7 +249,7 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
249 goto free_prog; 249 goto free_prog;
250 250
251 /* Convert 'sock_filter' insns to 'bpf_insn' insns */ 251 /* Convert 'sock_filter' insns to 'bpf_insn' insns */
252 ret = sk_convert_filter(fp, fprog->len, NULL, &new_len); 252 ret = bpf_convert_filter(fp, fprog->len, NULL, &new_len);
253 if (ret) 253 if (ret)
254 goto free_prog; 254 goto free_prog;
255 255
@@ -260,12 +260,12 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
260 if (!filter) 260 if (!filter)
261 goto free_prog; 261 goto free_prog;
262 262
263 filter->prog = kzalloc(sk_filter_size(new_len), 263 filter->prog = kzalloc(bpf_prog_size(new_len),
264 GFP_KERNEL|__GFP_NOWARN); 264 GFP_KERNEL|__GFP_NOWARN);
265 if (!filter->prog) 265 if (!filter->prog)
266 goto free_filter; 266 goto free_filter;
267 267
268 ret = sk_convert_filter(fp, fprog->len, filter->prog->insnsi, &new_len); 268 ret = bpf_convert_filter(fp, fprog->len, filter->prog->insnsi, &new_len);
269 if (ret) 269 if (ret)
270 goto free_filter_prog; 270 goto free_filter_prog;
271 kfree(fp); 271 kfree(fp);
@@ -273,7 +273,7 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
273 atomic_set(&filter->usage, 1); 273 atomic_set(&filter->usage, 1);
274 filter->prog->len = new_len; 274 filter->prog->len = new_len;
275 275
276 sk_filter_select_runtime(filter->prog); 276 bpf_prog_select_runtime(filter->prog);
277 277
278 /* 278 /*
279 * If there is an existing filter, make it the prev and don't drop its 279 * If there is an existing filter, make it the prev and don't drop its
@@ -337,7 +337,7 @@ void put_seccomp_filter(struct task_struct *tsk)
337 while (orig && atomic_dec_and_test(&orig->usage)) { 337 while (orig && atomic_dec_and_test(&orig->usage)) {
338 struct seccomp_filter *freeme = orig; 338 struct seccomp_filter *freeme = orig;
339 orig = orig->prev; 339 orig = orig->prev;
340 sk_filter_free(freeme->prog); 340 bpf_prog_free(freeme->prog);
341 kfree(freeme); 341 kfree(freeme);
342 } 342 }
343} 343}
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 5f48623ee1a7..89e0345733bd 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -1761,9 +1761,9 @@ static int probe_filter_length(struct sock_filter *fp)
1761 return len + 1; 1761 return len + 1;
1762} 1762}
1763 1763
1764static struct sk_filter *generate_filter(int which, int *err) 1764static struct bpf_prog *generate_filter(int which, int *err)
1765{ 1765{
1766 struct sk_filter *fp; 1766 struct bpf_prog *fp;
1767 struct sock_fprog_kern fprog; 1767 struct sock_fprog_kern fprog;
1768 unsigned int flen = probe_filter_length(tests[which].u.insns); 1768 unsigned int flen = probe_filter_length(tests[which].u.insns);
1769 __u8 test_type = tests[which].aux & TEST_TYPE_MASK; 1769 __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
@@ -1773,7 +1773,7 @@ static struct sk_filter *generate_filter(int which, int *err)
1773 fprog.filter = tests[which].u.insns; 1773 fprog.filter = tests[which].u.insns;
1774 fprog.len = flen; 1774 fprog.len = flen;
1775 1775
1776 *err = sk_unattached_filter_create(&fp, &fprog); 1776 *err = bpf_prog_create(&fp, &fprog);
1777 if (tests[which].aux & FLAG_EXPECTED_FAIL) { 1777 if (tests[which].aux & FLAG_EXPECTED_FAIL) {
1778 if (*err == -EINVAL) { 1778 if (*err == -EINVAL) {
1779 pr_cont("PASS\n"); 1779 pr_cont("PASS\n");
@@ -1798,7 +1798,7 @@ static struct sk_filter *generate_filter(int which, int *err)
1798 break; 1798 break;
1799 1799
1800 case INTERNAL: 1800 case INTERNAL:
1801 fp = kzalloc(sk_filter_size(flen), GFP_KERNEL); 1801 fp = kzalloc(bpf_prog_size(flen), GFP_KERNEL);
1802 if (fp == NULL) { 1802 if (fp == NULL) {
1803 pr_cont("UNEXPECTED_FAIL no memory left\n"); 1803 pr_cont("UNEXPECTED_FAIL no memory left\n");
1804 *err = -ENOMEM; 1804 *err = -ENOMEM;
@@ -1809,7 +1809,7 @@ static struct sk_filter *generate_filter(int which, int *err)
1809 memcpy(fp->insnsi, tests[which].u.insns_int, 1809 memcpy(fp->insnsi, tests[which].u.insns_int,
1810 fp->len * sizeof(struct bpf_insn)); 1810 fp->len * sizeof(struct bpf_insn));
1811 1811
1812 sk_filter_select_runtime(fp); 1812 bpf_prog_select_runtime(fp);
1813 break; 1813 break;
1814 } 1814 }
1815 1815
@@ -1817,21 +1817,21 @@ static struct sk_filter *generate_filter(int which, int *err)
1817 return fp; 1817 return fp;
1818} 1818}
1819 1819
1820static void release_filter(struct sk_filter *fp, int which) 1820static void release_filter(struct bpf_prog *fp, int which)
1821{ 1821{
1822 __u8 test_type = tests[which].aux & TEST_TYPE_MASK; 1822 __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
1823 1823
1824 switch (test_type) { 1824 switch (test_type) {
1825 case CLASSIC: 1825 case CLASSIC:
1826 sk_unattached_filter_destroy(fp); 1826 bpf_prog_destroy(fp);
1827 break; 1827 break;
1828 case INTERNAL: 1828 case INTERNAL:
1829 sk_filter_free(fp); 1829 bpf_prog_free(fp);
1830 break; 1830 break;
1831 } 1831 }
1832} 1832}
1833 1833
1834static int __run_one(const struct sk_filter *fp, const void *data, 1834static int __run_one(const struct bpf_prog *fp, const void *data,
1835 int runs, u64 *duration) 1835 int runs, u64 *duration)
1836{ 1836{
1837 u64 start, finish; 1837 u64 start, finish;
@@ -1840,7 +1840,7 @@ static int __run_one(const struct sk_filter *fp, const void *data,
1840 start = ktime_to_us(ktime_get()); 1840 start = ktime_to_us(ktime_get());
1841 1841
1842 for (i = 0; i < runs; i++) 1842 for (i = 0; i < runs; i++)
1843 ret = SK_RUN_FILTER(fp, data); 1843 ret = BPF_PROG_RUN(fp, data);
1844 1844
1845 finish = ktime_to_us(ktime_get()); 1845 finish = ktime_to_us(ktime_get());
1846 1846
@@ -1850,7 +1850,7 @@ static int __run_one(const struct sk_filter *fp, const void *data,
1850 return ret; 1850 return ret;
1851} 1851}
1852 1852
1853static int run_one(const struct sk_filter *fp, struct bpf_test *test) 1853static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
1854{ 1854{
1855 int err_cnt = 0, i, runs = MAX_TESTRUNS; 1855 int err_cnt = 0, i, runs = MAX_TESTRUNS;
1856 1856
@@ -1884,7 +1884,7 @@ static __init int test_bpf(void)
1884 int i, err_cnt = 0, pass_cnt = 0; 1884 int i, err_cnt = 0, pass_cnt = 0;
1885 1885
1886 for (i = 0; i < ARRAY_SIZE(tests); i++) { 1886 for (i = 0; i < ARRAY_SIZE(tests); i++) {
1887 struct sk_filter *fp; 1887 struct bpf_prog *fp;
1888 int err; 1888 int err;
1889 1889
1890 pr_info("#%d %s ", i, tests[i].descr); 1890 pr_info("#%d %s ", i, tests[i].descr);
diff --git a/net/core/filter.c b/net/core/filter.c
index 42c1944b0c63..d814b8a89d0f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -18,7 +18,7 @@
18 * 2 of the License, or (at your option) any later version. 18 * 2 of the License, or (at your option) any later version.
19 * 19 *
20 * Andi Kleen - Fix a few bad bugs and races. 20 * Andi Kleen - Fix a few bad bugs and races.
21 * Kris Katterjohn - Added many additional checks in sk_chk_filter() 21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
22 */ 22 */
23 23
24#include <linux/module.h> 24#include <linux/module.h>
@@ -312,7 +312,7 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
312} 312}
313 313
314/** 314/**
315 * sk_convert_filter - convert filter program 315 * bpf_convert_filter - convert filter program
316 * @prog: the user passed filter program 316 * @prog: the user passed filter program
317 * @len: the length of the user passed filter program 317 * @len: the length of the user passed filter program
318 * @new_prog: buffer where converted program will be stored 318 * @new_prog: buffer where converted program will be stored
@@ -322,12 +322,12 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
322 * Conversion workflow: 322 * Conversion workflow:
323 * 323 *
324 * 1) First pass for calculating the new program length: 324 * 1) First pass for calculating the new program length:
325 * sk_convert_filter(old_prog, old_len, NULL, &new_len) 325 * bpf_convert_filter(old_prog, old_len, NULL, &new_len)
326 * 326 *
327 * 2) 2nd pass to remap in two passes: 1st pass finds new 327 * 2) 2nd pass to remap in two passes: 1st pass finds new
328 * jump offsets, 2nd pass remapping: 328 * jump offsets, 2nd pass remapping:
329 * new_prog = kmalloc(sizeof(struct bpf_insn) * new_len); 329 * new_prog = kmalloc(sizeof(struct bpf_insn) * new_len);
330 * sk_convert_filter(old_prog, old_len, new_prog, &new_len); 330 * bpf_convert_filter(old_prog, old_len, new_prog, &new_len);
331 * 331 *
332 * User BPF's register A is mapped to our BPF register 6, user BPF 332 * User BPF's register A is mapped to our BPF register 6, user BPF
333 * register X is mapped to BPF register 7; frame pointer is always 333 * register X is mapped to BPF register 7; frame pointer is always
@@ -335,8 +335,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
335 * for socket filters: ctx == 'struct sk_buff *', for seccomp: 335 * for socket filters: ctx == 'struct sk_buff *', for seccomp:
336 * ctx == 'struct seccomp_data *'. 336 * ctx == 'struct seccomp_data *'.
337 */ 337 */
338int sk_convert_filter(struct sock_filter *prog, int len, 338int bpf_convert_filter(struct sock_filter *prog, int len,
339 struct bpf_insn *new_prog, int *new_len) 339 struct bpf_insn *new_prog, int *new_len)
340{ 340{
341 int new_flen = 0, pass = 0, target, i; 341 int new_flen = 0, pass = 0, target, i;
342 struct bpf_insn *new_insn; 342 struct bpf_insn *new_insn;
@@ -721,7 +721,7 @@ static bool chk_code_allowed(u16 code_to_probe)
721} 721}
722 722
723/** 723/**
724 * sk_chk_filter - verify socket filter code 724 * bpf_check_classic - verify socket filter code
725 * @filter: filter to verify 725 * @filter: filter to verify
726 * @flen: length of filter 726 * @flen: length of filter
727 * 727 *
@@ -734,7 +734,7 @@ static bool chk_code_allowed(u16 code_to_probe)
734 * 734 *
735 * Returns 0 if the rule set is legal or -EINVAL if not. 735 * Returns 0 if the rule set is legal or -EINVAL if not.
736 */ 736 */
737int sk_chk_filter(const struct sock_filter *filter, unsigned int flen) 737int bpf_check_classic(const struct sock_filter *filter, unsigned int flen)
738{ 738{
739 bool anc_found; 739 bool anc_found;
740 int pc; 740 int pc;
@@ -808,12 +808,12 @@ int sk_chk_filter(const struct sock_filter *filter, unsigned int flen)
808 808
809 return -EINVAL; 809 return -EINVAL;
810} 810}
811EXPORT_SYMBOL(sk_chk_filter); 811EXPORT_SYMBOL(bpf_check_classic);
812 812
813static int sk_store_orig_filter(struct sk_filter *fp, 813static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
814 const struct sock_fprog *fprog) 814 const struct sock_fprog *fprog)
815{ 815{
816 unsigned int fsize = sk_filter_proglen(fprog); 816 unsigned int fsize = bpf_classic_proglen(fprog);
817 struct sock_fprog_kern *fkprog; 817 struct sock_fprog_kern *fkprog;
818 818
819 fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL); 819 fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
@@ -831,7 +831,7 @@ static int sk_store_orig_filter(struct sk_filter *fp,
831 return 0; 831 return 0;
832} 832}
833 833
834static void sk_release_orig_filter(struct sk_filter *fp) 834static void bpf_release_orig_filter(struct bpf_prog *fp)
835{ 835{
836 struct sock_fprog_kern *fprog = fp->orig_prog; 836 struct sock_fprog_kern *fprog = fp->orig_prog;
837 837
@@ -841,10 +841,16 @@ static void sk_release_orig_filter(struct sk_filter *fp)
841 } 841 }
842} 842}
843 843
844static void __bpf_prog_release(struct bpf_prog *prog)
845{
846 bpf_release_orig_filter(prog);
847 bpf_prog_free(prog);
848}
849
844static void __sk_filter_release(struct sk_filter *fp) 850static void __sk_filter_release(struct sk_filter *fp)
845{ 851{
846 sk_release_orig_filter(fp); 852 __bpf_prog_release(fp->prog);
847 sk_filter_free(fp); 853 kfree(fp);
848} 854}
849 855
850/** 856/**
@@ -872,44 +878,33 @@ static void sk_filter_release(struct sk_filter *fp)
872 878
873void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) 879void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
874{ 880{
875 atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc); 881 u32 filter_size = bpf_prog_size(fp->prog->len);
876 sk_filter_release(fp);
877}
878 882
879void sk_filter_charge(struct sock *sk, struct sk_filter *fp) 883 atomic_sub(filter_size, &sk->sk_omem_alloc);
880{ 884 sk_filter_release(fp);
881 atomic_inc(&fp->refcnt);
882 atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
883} 885}
884 886
885static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp, 887/* try to charge the socket memory if there is space available
886 struct sock *sk, 888 * return true on success
887 unsigned int len) 889 */
890bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
888{ 891{
889 struct sk_filter *fp_new; 892 u32 filter_size = bpf_prog_size(fp->prog->len);
890 893
891 if (sk == NULL) 894 /* same check as in sock_kmalloc() */
892 return krealloc(fp, len, GFP_KERNEL); 895 if (filter_size <= sysctl_optmem_max &&
893 896 atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
894 fp_new = sock_kmalloc(sk, len, GFP_KERNEL); 897 atomic_inc(&fp->refcnt);
895 if (fp_new) { 898 atomic_add(filter_size, &sk->sk_omem_alloc);
896 *fp_new = *fp; 899 return true;
897 /* As we're keeping orig_prog in fp_new along,
898 * we need to make sure we're not evicting it
899 * from the old fp.
900 */
901 fp->orig_prog = NULL;
902 sk_filter_uncharge(sk, fp);
903 } 900 }
904 901 return false;
905 return fp_new;
906} 902}
907 903
908static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp, 904static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
909 struct sock *sk)
910{ 905{
911 struct sock_filter *old_prog; 906 struct sock_filter *old_prog;
912 struct sk_filter *old_fp; 907 struct bpf_prog *old_fp;
913 int err, new_len, old_len = fp->len; 908 int err, new_len, old_len = fp->len;
914 909
915 /* We are free to overwrite insns et al right here as it 910 /* We are free to overwrite insns et al right here as it
@@ -932,13 +927,13 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
932 } 927 }
933 928
934 /* 1st pass: calculate the new program length. */ 929 /* 1st pass: calculate the new program length. */
935 err = sk_convert_filter(old_prog, old_len, NULL, &new_len); 930 err = bpf_convert_filter(old_prog, old_len, NULL, &new_len);
936 if (err) 931 if (err)
937 goto out_err_free; 932 goto out_err_free;
938 933
939 /* Expand fp for appending the new filter representation. */ 934 /* Expand fp for appending the new filter representation. */
940 old_fp = fp; 935 old_fp = fp;
941 fp = __sk_migrate_realloc(old_fp, sk, sk_filter_size(new_len)); 936 fp = krealloc(old_fp, bpf_prog_size(new_len), GFP_KERNEL);
942 if (!fp) { 937 if (!fp) {
943 /* The old_fp is still around in case we couldn't 938 /* The old_fp is still around in case we couldn't
944 * allocate new memory, so uncharge on that one. 939 * allocate new memory, so uncharge on that one.
@@ -951,16 +946,16 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
951 fp->len = new_len; 946 fp->len = new_len;
952 947
953 /* 2nd pass: remap sock_filter insns into bpf_insn insns. */ 948 /* 2nd pass: remap sock_filter insns into bpf_insn insns. */
954 err = sk_convert_filter(old_prog, old_len, fp->insnsi, &new_len); 949 err = bpf_convert_filter(old_prog, old_len, fp->insnsi, &new_len);
955 if (err) 950 if (err)
956 /* 2nd sk_convert_filter() can fail only if it fails 951 /* 2nd bpf_convert_filter() can fail only if it fails
957 * to allocate memory, remapping must succeed. Note, 952 * to allocate memory, remapping must succeed. Note,
958 * that at this time old_fp has already been released 953 * that at this time old_fp has already been released
959 * by __sk_migrate_realloc(). 954 * by krealloc().
960 */ 955 */
961 goto out_err_free; 956 goto out_err_free;
962 957
963 sk_filter_select_runtime(fp); 958 bpf_prog_select_runtime(fp);
964 959
965 kfree(old_prog); 960 kfree(old_prog);
966 return fp; 961 return fp;
@@ -968,28 +963,20 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
968out_err_free: 963out_err_free:
969 kfree(old_prog); 964 kfree(old_prog);
970out_err: 965out_err:
971 /* Rollback filter setup. */ 966 __bpf_prog_release(fp);
972 if (sk != NULL)
973 sk_filter_uncharge(sk, fp);
974 else
975 kfree(fp);
976 return ERR_PTR(err); 967 return ERR_PTR(err);
977} 968}
978 969
979static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp, 970static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp)
980 struct sock *sk)
981{ 971{
982 int err; 972 int err;
983 973
984 fp->bpf_func = NULL; 974 fp->bpf_func = NULL;
985 fp->jited = 0; 975 fp->jited = 0;
986 976
987 err = sk_chk_filter(fp->insns, fp->len); 977 err = bpf_check_classic(fp->insns, fp->len);
988 if (err) { 978 if (err) {
989 if (sk != NULL) 979 __bpf_prog_release(fp);
990 sk_filter_uncharge(sk, fp);
991 else
992 kfree(fp);
993 return ERR_PTR(err); 980 return ERR_PTR(err);
994 } 981 }
995 982
@@ -1002,13 +989,13 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
1002 * internal BPF translation for the optimized interpreter. 989 * internal BPF translation for the optimized interpreter.
1003 */ 990 */
1004 if (!fp->jited) 991 if (!fp->jited)
1005 fp = __sk_migrate_filter(fp, sk); 992 fp = bpf_migrate_filter(fp);
1006 993
1007 return fp; 994 return fp;
1008} 995}
1009 996
1010/** 997/**
1011 * sk_unattached_filter_create - create an unattached filter 998 * bpf_prog_create - create an unattached filter
1012 * @pfp: the unattached filter that is created 999 * @pfp: the unattached filter that is created
1013 * @fprog: the filter program 1000 * @fprog: the filter program
1014 * 1001 *
@@ -1017,23 +1004,21 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
1017 * If an error occurs or there is insufficient memory for the filter 1004 * If an error occurs or there is insufficient memory for the filter
1018 * a negative errno code is returned. On success the return is zero. 1005 * a negative errno code is returned. On success the return is zero.
1019 */ 1006 */
1020int sk_unattached_filter_create(struct sk_filter **pfp, 1007int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
1021 struct sock_fprog_kern *fprog)
1022{ 1008{
1023 unsigned int fsize = sk_filter_proglen(fprog); 1009 unsigned int fsize = bpf_classic_proglen(fprog);
1024 struct sk_filter *fp; 1010 struct bpf_prog *fp;
1025 1011
1026 /* Make sure new filter is there and in the right amounts. */ 1012 /* Make sure new filter is there and in the right amounts. */
1027 if (fprog->filter == NULL) 1013 if (fprog->filter == NULL)
1028 return -EINVAL; 1014 return -EINVAL;
1029 1015
1030 fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL); 1016 fp = kmalloc(bpf_prog_size(fprog->len), GFP_KERNEL);
1031 if (!fp) 1017 if (!fp)
1032 return -ENOMEM; 1018 return -ENOMEM;
1033 1019
1034 memcpy(fp->insns, fprog->filter, fsize); 1020 memcpy(fp->insns, fprog->filter, fsize);
1035 1021
1036 atomic_set(&fp->refcnt, 1);
1037 fp->len = fprog->len; 1022 fp->len = fprog->len;
1038 /* Since unattached filters are not copied back to user 1023 /* Since unattached filters are not copied back to user
1039 * space through sk_get_filter(), we do not need to hold 1024 * space through sk_get_filter(), we do not need to hold
@@ -1041,23 +1026,23 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
1041 */ 1026 */
1042 fp->orig_prog = NULL; 1027 fp->orig_prog = NULL;
1043 1028
1044 /* __sk_prepare_filter() already takes care of uncharging 1029 /* bpf_prepare_filter() already takes care of freeing
1045 * memory in case something goes wrong. 1030 * memory in case something goes wrong.
1046 */ 1031 */
1047 fp = __sk_prepare_filter(fp, NULL); 1032 fp = bpf_prepare_filter(fp);
1048 if (IS_ERR(fp)) 1033 if (IS_ERR(fp))
1049 return PTR_ERR(fp); 1034 return PTR_ERR(fp);
1050 1035
1051 *pfp = fp; 1036 *pfp = fp;
1052 return 0; 1037 return 0;
1053} 1038}
1054EXPORT_SYMBOL_GPL(sk_unattached_filter_create); 1039EXPORT_SYMBOL_GPL(bpf_prog_create);
1055 1040
1056void sk_unattached_filter_destroy(struct sk_filter *fp) 1041void bpf_prog_destroy(struct bpf_prog *fp)
1057{ 1042{
1058 __sk_filter_release(fp); 1043 __bpf_prog_release(fp);
1059} 1044}
1060EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy); 1045EXPORT_SYMBOL_GPL(bpf_prog_destroy);
1061 1046
1062/** 1047/**
1063 * sk_attach_filter - attach a socket filter 1048 * sk_attach_filter - attach a socket filter
@@ -1072,8 +1057,9 @@ EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
1072int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) 1057int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1073{ 1058{
1074 struct sk_filter *fp, *old_fp; 1059 struct sk_filter *fp, *old_fp;
1075 unsigned int fsize = sk_filter_proglen(fprog); 1060 unsigned int fsize = bpf_classic_proglen(fprog);
1076 unsigned int sk_fsize = sk_filter_size(fprog->len); 1061 unsigned int bpf_fsize = bpf_prog_size(fprog->len);
1062 struct bpf_prog *prog;
1077 int err; 1063 int err;
1078 1064
1079 if (sock_flag(sk, SOCK_FILTER_LOCKED)) 1065 if (sock_flag(sk, SOCK_FILTER_LOCKED))
@@ -1083,30 +1069,43 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1083 if (fprog->filter == NULL) 1069 if (fprog->filter == NULL)
1084 return -EINVAL; 1070 return -EINVAL;
1085 1071
1086 fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL); 1072 prog = kmalloc(bpf_fsize, GFP_KERNEL);
1087 if (!fp) 1073 if (!prog)
1088 return -ENOMEM; 1074 return -ENOMEM;
1089 1075
1090 if (copy_from_user(fp->insns, fprog->filter, fsize)) { 1076 if (copy_from_user(prog->insns, fprog->filter, fsize)) {
1091 sock_kfree_s(sk, fp, sk_fsize); 1077 kfree(prog);
1092 return -EFAULT; 1078 return -EFAULT;
1093 } 1079 }
1094 1080
1095 atomic_set(&fp->refcnt, 1); 1081 prog->len = fprog->len;
1096 fp->len = fprog->len;
1097 1082
1098 err = sk_store_orig_filter(fp, fprog); 1083 err = bpf_prog_store_orig_filter(prog, fprog);
1099 if (err) { 1084 if (err) {
1100 sk_filter_uncharge(sk, fp); 1085 kfree(prog);
1101 return -ENOMEM; 1086 return -ENOMEM;
1102 } 1087 }
1103 1088
1104 /* __sk_prepare_filter() already takes care of uncharging 1089 /* bpf_prepare_filter() already takes care of freeing
1105 * memory in case something goes wrong. 1090 * memory in case something goes wrong.
1106 */ 1091 */
1107 fp = __sk_prepare_filter(fp, sk); 1092 prog = bpf_prepare_filter(prog);
1108 if (IS_ERR(fp)) 1093 if (IS_ERR(prog))
1109 return PTR_ERR(fp); 1094 return PTR_ERR(prog);
1095
1096 fp = kmalloc(sizeof(*fp), GFP_KERNEL);
1097 if (!fp) {
1098 __bpf_prog_release(prog);
1099 return -ENOMEM;
1100 }
1101 fp->prog = prog;
1102
1103 atomic_set(&fp->refcnt, 0);
1104
1105 if (!sk_filter_charge(sk, fp)) {
1106 __sk_filter_release(fp);
1107 return -ENOMEM;
1108 }
1110 1109
1111 old_fp = rcu_dereference_protected(sk->sk_filter, 1110 old_fp = rcu_dereference_protected(sk->sk_filter,
1112 sock_owned_by_user(sk)); 1111 sock_owned_by_user(sk));
@@ -1155,7 +1154,7 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
1155 /* We're copying the filter that has been originally attached, 1154 /* We're copying the filter that has been originally attached,
1156 * so no conversion/decode needed anymore. 1155 * so no conversion/decode needed anymore.
1157 */ 1156 */
1158 fprog = filter->orig_prog; 1157 fprog = filter->prog->orig_prog;
1159 1158
1160 ret = fprog->len; 1159 ret = fprog->len;
1161 if (!len) 1160 if (!len)
@@ -1167,7 +1166,7 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
1167 goto out; 1166 goto out;
1168 1167
1169 ret = -EFAULT; 1168 ret = -EFAULT;
1170 if (copy_to_user(ubuf, fprog->filter, sk_filter_proglen(fprog))) 1169 if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog)))
1171 goto out; 1170 goto out;
1172 1171
1173 /* Instead of bytes, the API requests to return the number 1172 /* Instead of bytes, the API requests to return the number
diff --git a/net/core/ptp_classifier.c b/net/core/ptp_classifier.c
index 12ab7b4be609..4eab4a94a59d 100644
--- a/net/core/ptp_classifier.c
+++ b/net/core/ptp_classifier.c
@@ -107,11 +107,11 @@
107#include <linux/filter.h> 107#include <linux/filter.h>
108#include <linux/ptp_classify.h> 108#include <linux/ptp_classify.h>
109 109
110static struct sk_filter *ptp_insns __read_mostly; 110static struct bpf_prog *ptp_insns __read_mostly;
111 111
112unsigned int ptp_classify_raw(const struct sk_buff *skb) 112unsigned int ptp_classify_raw(const struct sk_buff *skb)
113{ 113{
114 return SK_RUN_FILTER(ptp_insns, skb); 114 return BPF_PROG_RUN(ptp_insns, skb);
115} 115}
116EXPORT_SYMBOL_GPL(ptp_classify_raw); 116EXPORT_SYMBOL_GPL(ptp_classify_raw);
117 117
@@ -189,5 +189,5 @@ void __init ptp_classifier_init(void)
189 .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter, 189 .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter,
190 }; 190 };
191 191
192 BUG_ON(sk_unattached_filter_create(&ptp_insns, &ptp_prog)); 192 BUG_ON(bpf_prog_create(&ptp_insns, &ptp_prog));
193} 193}
diff --git a/net/core/sock.c b/net/core/sock.c
index 134291d73fcd..a741163568fa 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1474,6 +1474,7 @@ static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1474struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) 1474struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1475{ 1475{
1476 struct sock *newsk; 1476 struct sock *newsk;
1477 bool is_charged = true;
1477 1478
1478 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family); 1479 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1479 if (newsk != NULL) { 1480 if (newsk != NULL) {
@@ -1518,9 +1519,13 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1518 1519
1519 filter = rcu_dereference_protected(newsk->sk_filter, 1); 1520 filter = rcu_dereference_protected(newsk->sk_filter, 1);
1520 if (filter != NULL) 1521 if (filter != NULL)
1521 sk_filter_charge(newsk, filter); 1522 /* though it's an empty new sock, the charging may fail
1523 * if sysctl_optmem_max was changed between creation of
1524 * original socket and cloning
1525 */
1526 is_charged = sk_filter_charge(newsk, filter);
1522 1527
1523 if (unlikely(xfrm_sk_clone_policy(newsk))) { 1528 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk))) {
1524 /* It is still raw copy of parent, so invalidate 1529 /* It is still raw copy of parent, so invalidate
1525 * destructor and make plain sk_free() */ 1530 * destructor and make plain sk_free() */
1526 newsk->sk_destruct = NULL; 1531 newsk->sk_destruct = NULL;
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index a4216a4c9572..ad704c757bb4 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -68,8 +68,8 @@ int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
68 if (!filter) 68 if (!filter)
69 goto out; 69 goto out;
70 70
71 fprog = filter->orig_prog; 71 fprog = filter->prog->orig_prog;
72 flen = sk_filter_proglen(fprog); 72 flen = bpf_classic_proglen(fprog);
73 73
74 attr = nla_reserve(skb, attrtype, flen); 74 attr = nla_reserve(skb, attrtype, flen);
75 if (attr == NULL) { 75 if (attr == NULL) {
diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c
index bbffdbdaf603..dffee9d47ec4 100644
--- a/net/netfilter/xt_bpf.c
+++ b/net/netfilter/xt_bpf.c
@@ -28,7 +28,7 @@ static int bpf_mt_check(const struct xt_mtchk_param *par)
28 program.len = info->bpf_program_num_elem; 28 program.len = info->bpf_program_num_elem;
29 program.filter = info->bpf_program; 29 program.filter = info->bpf_program;
30 30
31 if (sk_unattached_filter_create(&info->filter, &program)) { 31 if (bpf_prog_create(&info->filter, &program)) {
32 pr_info("bpf: check failed: parse error\n"); 32 pr_info("bpf: check failed: parse error\n");
33 return -EINVAL; 33 return -EINVAL;
34 } 34 }
@@ -40,13 +40,13 @@ static bool bpf_mt(const struct sk_buff *skb, struct xt_action_param *par)
40{ 40{
41 const struct xt_bpf_info *info = par->matchinfo; 41 const struct xt_bpf_info *info = par->matchinfo;
42 42
43 return SK_RUN_FILTER(info->filter, skb); 43 return BPF_PROG_RUN(info->filter, skb);
44} 44}
45 45
46static void bpf_mt_destroy(const struct xt_mtdtor_param *par) 46static void bpf_mt_destroy(const struct xt_mtdtor_param *par)
47{ 47{
48 const struct xt_bpf_info *info = par->matchinfo; 48 const struct xt_bpf_info *info = par->matchinfo;
49 sk_unattached_filter_destroy(info->filter); 49 bpf_prog_destroy(info->filter);
50} 50}
51 51
52static struct xt_match bpf_mt_reg __read_mostly = { 52static struct xt_match bpf_mt_reg __read_mostly = {
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 13f64df2c710..0e30d58149da 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -30,7 +30,7 @@ struct cls_bpf_head {
30}; 30};
31 31
32struct cls_bpf_prog { 32struct cls_bpf_prog {
33 struct sk_filter *filter; 33 struct bpf_prog *filter;
34 struct sock_filter *bpf_ops; 34 struct sock_filter *bpf_ops;
35 struct tcf_exts exts; 35 struct tcf_exts exts;
36 struct tcf_result res; 36 struct tcf_result res;
@@ -54,7 +54,7 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
54 int ret; 54 int ret;
55 55
56 list_for_each_entry(prog, &head->plist, link) { 56 list_for_each_entry(prog, &head->plist, link) {
57 int filter_res = SK_RUN_FILTER(prog->filter, skb); 57 int filter_res = BPF_PROG_RUN(prog->filter, skb);
58 58
59 if (filter_res == 0) 59 if (filter_res == 0)
60 continue; 60 continue;
@@ -92,7 +92,7 @@ static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
92 tcf_unbind_filter(tp, &prog->res); 92 tcf_unbind_filter(tp, &prog->res);
93 tcf_exts_destroy(tp, &prog->exts); 93 tcf_exts_destroy(tp, &prog->exts);
94 94
95 sk_unattached_filter_destroy(prog->filter); 95 bpf_prog_destroy(prog->filter);
96 96
97 kfree(prog->bpf_ops); 97 kfree(prog->bpf_ops);
98 kfree(prog); 98 kfree(prog);
@@ -161,7 +161,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
161 struct sock_filter *bpf_ops, *bpf_old; 161 struct sock_filter *bpf_ops, *bpf_old;
162 struct tcf_exts exts; 162 struct tcf_exts exts;
163 struct sock_fprog_kern tmp; 163 struct sock_fprog_kern tmp;
164 struct sk_filter *fp, *fp_old; 164 struct bpf_prog *fp, *fp_old;
165 u16 bpf_size, bpf_len; 165 u16 bpf_size, bpf_len;
166 u32 classid; 166 u32 classid;
167 int ret; 167 int ret;
@@ -193,7 +193,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
193 tmp.len = bpf_len; 193 tmp.len = bpf_len;
194 tmp.filter = bpf_ops; 194 tmp.filter = bpf_ops;
195 195
196 ret = sk_unattached_filter_create(&fp, &tmp); 196 ret = bpf_prog_create(&fp, &tmp);
197 if (ret) 197 if (ret)
198 goto errout_free; 198 goto errout_free;
199 199
@@ -211,7 +211,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
211 tcf_exts_change(tp, &prog->exts, &exts); 211 tcf_exts_change(tp, &prog->exts, &exts);
212 212
213 if (fp_old) 213 if (fp_old)
214 sk_unattached_filter_destroy(fp_old); 214 bpf_prog_destroy(fp_old);
215 if (bpf_old) 215 if (bpf_old)
216 kfree(bpf_old); 216 kfree(bpf_old);
217 217