aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-08-02 18:04:10 -0400
committerDavid S. Miller <davem@davemloft.net>2014-08-02 18:04:10 -0400
commite339756c9995648eecd015391f66baf2fd251fec (patch)
treedcb1aba57530e6c9426a81758173ca146ffafcaf /net
parent4330487acfff0cf1d7b14d238583a182e0a444bb (diff)
parent7ae457c1e5b45a1b826fad9d62b32191d2bdcfdb (diff)
Merge branch 'filter-next'
Alexei Starovoitov says: ==================== net: filter: split sk_filter into socket and bpf, cleanup names The main goal of the series is to split 'struct sk_filter' into socket and bpf parts and cleanup names in the following way: - everything that deals with sockets keeps 'sk_*' prefix - everything that is pure BPF is changed to 'bpf_*' prefix split 'struct sk_filter' into struct sk_filter { atomic_t refcnt; struct rcu_head rcu; struct bpf_prog *prog; }; and struct bpf_prog { u32 jited:1, len:31; struct sock_fprog_kern *orig_prog; unsigned int (*bpf_func)(const struct sk_buff *skb, const struct bpf_insn *filter); union { struct sock_filter insns[0]; struct bpf_insn insnsi[0]; struct work_struct work; }; }; so that 'struct bpf_prog' can be used independent of sockets and cleans up 'unattached' bpf use cases: isdn, ppp, team, seccomp, ptp, xt_bpf, cls_bpf, test_bpf which don't need refcnt/rcu fields. It's a follow up to the rcu cleanup started by Pablo in commit 34c5bd66e5 ("net: filter: don't release unattached filter through call_rcu()") Patch 1 - cleans up socket memory charging and makes it possible for functions sk(bpf)_migrate_filter(), sk(bpf)_prepare_filter() to be socket independent Patches 2-4 - trivial renames Patch 5 - sk_filter split and renames of related sk_*() functions ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/filter.c183
-rw-r--r--net/core/ptp_classifier.c6
-rw-r--r--net/core/sock.c9
-rw-r--r--net/core/sock_diag.c4
-rw-r--r--net/netfilter/xt_bpf.c6
-rw-r--r--net/sched/cls_bpf.c12
6 files changed, 112 insertions, 108 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index 42c1944b0c63..d814b8a89d0f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -18,7 +18,7 @@
18 * 2 of the License, or (at your option) any later version. 18 * 2 of the License, or (at your option) any later version.
19 * 19 *
20 * Andi Kleen - Fix a few bad bugs and races. 20 * Andi Kleen - Fix a few bad bugs and races.
21 * Kris Katterjohn - Added many additional checks in sk_chk_filter() 21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
22 */ 22 */
23 23
24#include <linux/module.h> 24#include <linux/module.h>
@@ -312,7 +312,7 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
312} 312}
313 313
314/** 314/**
315 * sk_convert_filter - convert filter program 315 * bpf_convert_filter - convert filter program
316 * @prog: the user passed filter program 316 * @prog: the user passed filter program
317 * @len: the length of the user passed filter program 317 * @len: the length of the user passed filter program
318 * @new_prog: buffer where converted program will be stored 318 * @new_prog: buffer where converted program will be stored
@@ -322,12 +322,12 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
322 * Conversion workflow: 322 * Conversion workflow:
323 * 323 *
324 * 1) First pass for calculating the new program length: 324 * 1) First pass for calculating the new program length:
325 * sk_convert_filter(old_prog, old_len, NULL, &new_len) 325 * bpf_convert_filter(old_prog, old_len, NULL, &new_len)
326 * 326 *
327 * 2) 2nd pass to remap in two passes: 1st pass finds new 327 * 2) 2nd pass to remap in two passes: 1st pass finds new
328 * jump offsets, 2nd pass remapping: 328 * jump offsets, 2nd pass remapping:
329 * new_prog = kmalloc(sizeof(struct bpf_insn) * new_len); 329 * new_prog = kmalloc(sizeof(struct bpf_insn) * new_len);
330 * sk_convert_filter(old_prog, old_len, new_prog, &new_len); 330 * bpf_convert_filter(old_prog, old_len, new_prog, &new_len);
331 * 331 *
332 * User BPF's register A is mapped to our BPF register 6, user BPF 332 * User BPF's register A is mapped to our BPF register 6, user BPF
333 * register X is mapped to BPF register 7; frame pointer is always 333 * register X is mapped to BPF register 7; frame pointer is always
@@ -335,8 +335,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
335 * for socket filters: ctx == 'struct sk_buff *', for seccomp: 335 * for socket filters: ctx == 'struct sk_buff *', for seccomp:
336 * ctx == 'struct seccomp_data *'. 336 * ctx == 'struct seccomp_data *'.
337 */ 337 */
338int sk_convert_filter(struct sock_filter *prog, int len, 338int bpf_convert_filter(struct sock_filter *prog, int len,
339 struct bpf_insn *new_prog, int *new_len) 339 struct bpf_insn *new_prog, int *new_len)
340{ 340{
341 int new_flen = 0, pass = 0, target, i; 341 int new_flen = 0, pass = 0, target, i;
342 struct bpf_insn *new_insn; 342 struct bpf_insn *new_insn;
@@ -721,7 +721,7 @@ static bool chk_code_allowed(u16 code_to_probe)
721} 721}
722 722
723/** 723/**
724 * sk_chk_filter - verify socket filter code 724 * bpf_check_classic - verify socket filter code
725 * @filter: filter to verify 725 * @filter: filter to verify
726 * @flen: length of filter 726 * @flen: length of filter
727 * 727 *
@@ -734,7 +734,7 @@ static bool chk_code_allowed(u16 code_to_probe)
734 * 734 *
735 * Returns 0 if the rule set is legal or -EINVAL if not. 735 * Returns 0 if the rule set is legal or -EINVAL if not.
736 */ 736 */
737int sk_chk_filter(const struct sock_filter *filter, unsigned int flen) 737int bpf_check_classic(const struct sock_filter *filter, unsigned int flen)
738{ 738{
739 bool anc_found; 739 bool anc_found;
740 int pc; 740 int pc;
@@ -808,12 +808,12 @@ int sk_chk_filter(const struct sock_filter *filter, unsigned int flen)
808 808
809 return -EINVAL; 809 return -EINVAL;
810} 810}
811EXPORT_SYMBOL(sk_chk_filter); 811EXPORT_SYMBOL(bpf_check_classic);
812 812
813static int sk_store_orig_filter(struct sk_filter *fp, 813static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
814 const struct sock_fprog *fprog) 814 const struct sock_fprog *fprog)
815{ 815{
816 unsigned int fsize = sk_filter_proglen(fprog); 816 unsigned int fsize = bpf_classic_proglen(fprog);
817 struct sock_fprog_kern *fkprog; 817 struct sock_fprog_kern *fkprog;
818 818
819 fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL); 819 fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
@@ -831,7 +831,7 @@ static int sk_store_orig_filter(struct sk_filter *fp,
831 return 0; 831 return 0;
832} 832}
833 833
834static void sk_release_orig_filter(struct sk_filter *fp) 834static void bpf_release_orig_filter(struct bpf_prog *fp)
835{ 835{
836 struct sock_fprog_kern *fprog = fp->orig_prog; 836 struct sock_fprog_kern *fprog = fp->orig_prog;
837 837
@@ -841,10 +841,16 @@ static void sk_release_orig_filter(struct sk_filter *fp)
841 } 841 }
842} 842}
843 843
844static void __bpf_prog_release(struct bpf_prog *prog)
845{
846 bpf_release_orig_filter(prog);
847 bpf_prog_free(prog);
848}
849
844static void __sk_filter_release(struct sk_filter *fp) 850static void __sk_filter_release(struct sk_filter *fp)
845{ 851{
846 sk_release_orig_filter(fp); 852 __bpf_prog_release(fp->prog);
847 sk_filter_free(fp); 853 kfree(fp);
848} 854}
849 855
850/** 856/**
@@ -872,44 +878,33 @@ static void sk_filter_release(struct sk_filter *fp)
872 878
873void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) 879void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
874{ 880{
875 atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc); 881 u32 filter_size = bpf_prog_size(fp->prog->len);
876 sk_filter_release(fp);
877}
878 882
879void sk_filter_charge(struct sock *sk, struct sk_filter *fp) 883 atomic_sub(filter_size, &sk->sk_omem_alloc);
880{ 884 sk_filter_release(fp);
881 atomic_inc(&fp->refcnt);
882 atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
883} 885}
884 886
885static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp, 887/* try to charge the socket memory if there is space available
886 struct sock *sk, 888 * return true on success
887 unsigned int len) 889 */
890bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
888{ 891{
889 struct sk_filter *fp_new; 892 u32 filter_size = bpf_prog_size(fp->prog->len);
890 893
891 if (sk == NULL) 894 /* same check as in sock_kmalloc() */
892 return krealloc(fp, len, GFP_KERNEL); 895 if (filter_size <= sysctl_optmem_max &&
893 896 atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
894 fp_new = sock_kmalloc(sk, len, GFP_KERNEL); 897 atomic_inc(&fp->refcnt);
895 if (fp_new) { 898 atomic_add(filter_size, &sk->sk_omem_alloc);
896 *fp_new = *fp; 899 return true;
897 /* As we're keeping orig_prog in fp_new along,
898 * we need to make sure we're not evicting it
899 * from the old fp.
900 */
901 fp->orig_prog = NULL;
902 sk_filter_uncharge(sk, fp);
903 } 900 }
904 901 return false;
905 return fp_new;
906} 902}
907 903
908static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp, 904static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
909 struct sock *sk)
910{ 905{
911 struct sock_filter *old_prog; 906 struct sock_filter *old_prog;
912 struct sk_filter *old_fp; 907 struct bpf_prog *old_fp;
913 int err, new_len, old_len = fp->len; 908 int err, new_len, old_len = fp->len;
914 909
915 /* We are free to overwrite insns et al right here as it 910 /* We are free to overwrite insns et al right here as it
@@ -932,13 +927,13 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
932 } 927 }
933 928
934 /* 1st pass: calculate the new program length. */ 929 /* 1st pass: calculate the new program length. */
935 err = sk_convert_filter(old_prog, old_len, NULL, &new_len); 930 err = bpf_convert_filter(old_prog, old_len, NULL, &new_len);
936 if (err) 931 if (err)
937 goto out_err_free; 932 goto out_err_free;
938 933
939 /* Expand fp for appending the new filter representation. */ 934 /* Expand fp for appending the new filter representation. */
940 old_fp = fp; 935 old_fp = fp;
941 fp = __sk_migrate_realloc(old_fp, sk, sk_filter_size(new_len)); 936 fp = krealloc(old_fp, bpf_prog_size(new_len), GFP_KERNEL);
942 if (!fp) { 937 if (!fp) {
943 /* The old_fp is still around in case we couldn't 938 /* The old_fp is still around in case we couldn't
944 * allocate new memory, so uncharge on that one. 939 * allocate new memory, so uncharge on that one.
@@ -951,16 +946,16 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
951 fp->len = new_len; 946 fp->len = new_len;
952 947
953 /* 2nd pass: remap sock_filter insns into bpf_insn insns. */ 948 /* 2nd pass: remap sock_filter insns into bpf_insn insns. */
954 err = sk_convert_filter(old_prog, old_len, fp->insnsi, &new_len); 949 err = bpf_convert_filter(old_prog, old_len, fp->insnsi, &new_len);
955 if (err) 950 if (err)
956 /* 2nd sk_convert_filter() can fail only if it fails 951 /* 2nd bpf_convert_filter() can fail only if it fails
957 * to allocate memory, remapping must succeed. Note, 952 * to allocate memory, remapping must succeed. Note,
958 * that at this time old_fp has already been released 953 * that at this time old_fp has already been released
959 * by __sk_migrate_realloc(). 954 * by krealloc().
960 */ 955 */
961 goto out_err_free; 956 goto out_err_free;
962 957
963 sk_filter_select_runtime(fp); 958 bpf_prog_select_runtime(fp);
964 959
965 kfree(old_prog); 960 kfree(old_prog);
966 return fp; 961 return fp;
@@ -968,28 +963,20 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
968out_err_free: 963out_err_free:
969 kfree(old_prog); 964 kfree(old_prog);
970out_err: 965out_err:
971 /* Rollback filter setup. */ 966 __bpf_prog_release(fp);
972 if (sk != NULL)
973 sk_filter_uncharge(sk, fp);
974 else
975 kfree(fp);
976 return ERR_PTR(err); 967 return ERR_PTR(err);
977} 968}
978 969
979static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp, 970static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp)
980 struct sock *sk)
981{ 971{
982 int err; 972 int err;
983 973
984 fp->bpf_func = NULL; 974 fp->bpf_func = NULL;
985 fp->jited = 0; 975 fp->jited = 0;
986 976
987 err = sk_chk_filter(fp->insns, fp->len); 977 err = bpf_check_classic(fp->insns, fp->len);
988 if (err) { 978 if (err) {
989 if (sk != NULL) 979 __bpf_prog_release(fp);
990 sk_filter_uncharge(sk, fp);
991 else
992 kfree(fp);
993 return ERR_PTR(err); 980 return ERR_PTR(err);
994 } 981 }
995 982
@@ -1002,13 +989,13 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
1002 * internal BPF translation for the optimized interpreter. 989 * internal BPF translation for the optimized interpreter.
1003 */ 990 */
1004 if (!fp->jited) 991 if (!fp->jited)
1005 fp = __sk_migrate_filter(fp, sk); 992 fp = bpf_migrate_filter(fp);
1006 993
1007 return fp; 994 return fp;
1008} 995}
1009 996
1010/** 997/**
1011 * sk_unattached_filter_create - create an unattached filter 998 * bpf_prog_create - create an unattached filter
1012 * @pfp: the unattached filter that is created 999 * @pfp: the unattached filter that is created
1013 * @fprog: the filter program 1000 * @fprog: the filter program
1014 * 1001 *
@@ -1017,23 +1004,21 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
1017 * If an error occurs or there is insufficient memory for the filter 1004 * If an error occurs or there is insufficient memory for the filter
1018 * a negative errno code is returned. On success the return is zero. 1005 * a negative errno code is returned. On success the return is zero.
1019 */ 1006 */
1020int sk_unattached_filter_create(struct sk_filter **pfp, 1007int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
1021 struct sock_fprog_kern *fprog)
1022{ 1008{
1023 unsigned int fsize = sk_filter_proglen(fprog); 1009 unsigned int fsize = bpf_classic_proglen(fprog);
1024 struct sk_filter *fp; 1010 struct bpf_prog *fp;
1025 1011
1026 /* Make sure new filter is there and in the right amounts. */ 1012 /* Make sure new filter is there and in the right amounts. */
1027 if (fprog->filter == NULL) 1013 if (fprog->filter == NULL)
1028 return -EINVAL; 1014 return -EINVAL;
1029 1015
1030 fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL); 1016 fp = kmalloc(bpf_prog_size(fprog->len), GFP_KERNEL);
1031 if (!fp) 1017 if (!fp)
1032 return -ENOMEM; 1018 return -ENOMEM;
1033 1019
1034 memcpy(fp->insns, fprog->filter, fsize); 1020 memcpy(fp->insns, fprog->filter, fsize);
1035 1021
1036 atomic_set(&fp->refcnt, 1);
1037 fp->len = fprog->len; 1022 fp->len = fprog->len;
1038 /* Since unattached filters are not copied back to user 1023 /* Since unattached filters are not copied back to user
1039 * space through sk_get_filter(), we do not need to hold 1024 * space through sk_get_filter(), we do not need to hold
@@ -1041,23 +1026,23 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
1041 */ 1026 */
1042 fp->orig_prog = NULL; 1027 fp->orig_prog = NULL;
1043 1028
1044 /* __sk_prepare_filter() already takes care of uncharging 1029 /* bpf_prepare_filter() already takes care of freeing
1045 * memory in case something goes wrong. 1030 * memory in case something goes wrong.
1046 */ 1031 */
1047 fp = __sk_prepare_filter(fp, NULL); 1032 fp = bpf_prepare_filter(fp);
1048 if (IS_ERR(fp)) 1033 if (IS_ERR(fp))
1049 return PTR_ERR(fp); 1034 return PTR_ERR(fp);
1050 1035
1051 *pfp = fp; 1036 *pfp = fp;
1052 return 0; 1037 return 0;
1053} 1038}
1054EXPORT_SYMBOL_GPL(sk_unattached_filter_create); 1039EXPORT_SYMBOL_GPL(bpf_prog_create);
1055 1040
1056void sk_unattached_filter_destroy(struct sk_filter *fp) 1041void bpf_prog_destroy(struct bpf_prog *fp)
1057{ 1042{
1058 __sk_filter_release(fp); 1043 __bpf_prog_release(fp);
1059} 1044}
1060EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy); 1045EXPORT_SYMBOL_GPL(bpf_prog_destroy);
1061 1046
1062/** 1047/**
1063 * sk_attach_filter - attach a socket filter 1048 * sk_attach_filter - attach a socket filter
@@ -1072,8 +1057,9 @@ EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
1072int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) 1057int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1073{ 1058{
1074 struct sk_filter *fp, *old_fp; 1059 struct sk_filter *fp, *old_fp;
1075 unsigned int fsize = sk_filter_proglen(fprog); 1060 unsigned int fsize = bpf_classic_proglen(fprog);
1076 unsigned int sk_fsize = sk_filter_size(fprog->len); 1061 unsigned int bpf_fsize = bpf_prog_size(fprog->len);
1062 struct bpf_prog *prog;
1077 int err; 1063 int err;
1078 1064
1079 if (sock_flag(sk, SOCK_FILTER_LOCKED)) 1065 if (sock_flag(sk, SOCK_FILTER_LOCKED))
@@ -1083,30 +1069,43 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1083 if (fprog->filter == NULL) 1069 if (fprog->filter == NULL)
1084 return -EINVAL; 1070 return -EINVAL;
1085 1071
1086 fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL); 1072 prog = kmalloc(bpf_fsize, GFP_KERNEL);
1087 if (!fp) 1073 if (!prog)
1088 return -ENOMEM; 1074 return -ENOMEM;
1089 1075
1090 if (copy_from_user(fp->insns, fprog->filter, fsize)) { 1076 if (copy_from_user(prog->insns, fprog->filter, fsize)) {
1091 sock_kfree_s(sk, fp, sk_fsize); 1077 kfree(prog);
1092 return -EFAULT; 1078 return -EFAULT;
1093 } 1079 }
1094 1080
1095 atomic_set(&fp->refcnt, 1); 1081 prog->len = fprog->len;
1096 fp->len = fprog->len;
1097 1082
1098 err = sk_store_orig_filter(fp, fprog); 1083 err = bpf_prog_store_orig_filter(prog, fprog);
1099 if (err) { 1084 if (err) {
1100 sk_filter_uncharge(sk, fp); 1085 kfree(prog);
1101 return -ENOMEM; 1086 return -ENOMEM;
1102 } 1087 }
1103 1088
1104 /* __sk_prepare_filter() already takes care of uncharging 1089 /* bpf_prepare_filter() already takes care of freeing
1105 * memory in case something goes wrong. 1090 * memory in case something goes wrong.
1106 */ 1091 */
1107 fp = __sk_prepare_filter(fp, sk); 1092 prog = bpf_prepare_filter(prog);
1108 if (IS_ERR(fp)) 1093 if (IS_ERR(prog))
1109 return PTR_ERR(fp); 1094 return PTR_ERR(prog);
1095
1096 fp = kmalloc(sizeof(*fp), GFP_KERNEL);
1097 if (!fp) {
1098 __bpf_prog_release(prog);
1099 return -ENOMEM;
1100 }
1101 fp->prog = prog;
1102
1103 atomic_set(&fp->refcnt, 0);
1104
1105 if (!sk_filter_charge(sk, fp)) {
1106 __sk_filter_release(fp);
1107 return -ENOMEM;
1108 }
1110 1109
1111 old_fp = rcu_dereference_protected(sk->sk_filter, 1110 old_fp = rcu_dereference_protected(sk->sk_filter,
1112 sock_owned_by_user(sk)); 1111 sock_owned_by_user(sk));
@@ -1155,7 +1154,7 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
1155 /* We're copying the filter that has been originally attached, 1154 /* We're copying the filter that has been originally attached,
1156 * so no conversion/decode needed anymore. 1155 * so no conversion/decode needed anymore.
1157 */ 1156 */
1158 fprog = filter->orig_prog; 1157 fprog = filter->prog->orig_prog;
1159 1158
1160 ret = fprog->len; 1159 ret = fprog->len;
1161 if (!len) 1160 if (!len)
@@ -1167,7 +1166,7 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
1167 goto out; 1166 goto out;
1168 1167
1169 ret = -EFAULT; 1168 ret = -EFAULT;
1170 if (copy_to_user(ubuf, fprog->filter, sk_filter_proglen(fprog))) 1169 if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog)))
1171 goto out; 1170 goto out;
1172 1171
1173 /* Instead of bytes, the API requests to return the number 1172 /* Instead of bytes, the API requests to return the number
diff --git a/net/core/ptp_classifier.c b/net/core/ptp_classifier.c
index 12ab7b4be609..4eab4a94a59d 100644
--- a/net/core/ptp_classifier.c
+++ b/net/core/ptp_classifier.c
@@ -107,11 +107,11 @@
107#include <linux/filter.h> 107#include <linux/filter.h>
108#include <linux/ptp_classify.h> 108#include <linux/ptp_classify.h>
109 109
110static struct sk_filter *ptp_insns __read_mostly; 110static struct bpf_prog *ptp_insns __read_mostly;
111 111
112unsigned int ptp_classify_raw(const struct sk_buff *skb) 112unsigned int ptp_classify_raw(const struct sk_buff *skb)
113{ 113{
114 return SK_RUN_FILTER(ptp_insns, skb); 114 return BPF_PROG_RUN(ptp_insns, skb);
115} 115}
116EXPORT_SYMBOL_GPL(ptp_classify_raw); 116EXPORT_SYMBOL_GPL(ptp_classify_raw);
117 117
@@ -189,5 +189,5 @@ void __init ptp_classifier_init(void)
189 .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter, 189 .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter,
190 }; 190 };
191 191
192 BUG_ON(sk_unattached_filter_create(&ptp_insns, &ptp_prog)); 192 BUG_ON(bpf_prog_create(&ptp_insns, &ptp_prog));
193} 193}
diff --git a/net/core/sock.c b/net/core/sock.c
index 134291d73fcd..a741163568fa 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1474,6 +1474,7 @@ static void sk_update_clone(const struct sock *sk, struct sock *newsk)
1474struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) 1474struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1475{ 1475{
1476 struct sock *newsk; 1476 struct sock *newsk;
1477 bool is_charged = true;
1477 1478
1478 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family); 1479 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
1479 if (newsk != NULL) { 1480 if (newsk != NULL) {
@@ -1518,9 +1519,13 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1518 1519
1519 filter = rcu_dereference_protected(newsk->sk_filter, 1); 1520 filter = rcu_dereference_protected(newsk->sk_filter, 1);
1520 if (filter != NULL) 1521 if (filter != NULL)
1521 sk_filter_charge(newsk, filter); 1522 /* though it's an empty new sock, the charging may fail
1523 * if sysctl_optmem_max was changed between creation of
1524 * original socket and cloning
1525 */
1526 is_charged = sk_filter_charge(newsk, filter);
1522 1527
1523 if (unlikely(xfrm_sk_clone_policy(newsk))) { 1528 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk))) {
1524 /* It is still raw copy of parent, so invalidate 1529 /* It is still raw copy of parent, so invalidate
1525 * destructor and make plain sk_free() */ 1530 * destructor and make plain sk_free() */
1526 newsk->sk_destruct = NULL; 1531 newsk->sk_destruct = NULL;
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index a4216a4c9572..ad704c757bb4 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -68,8 +68,8 @@ int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
68 if (!filter) 68 if (!filter)
69 goto out; 69 goto out;
70 70
71 fprog = filter->orig_prog; 71 fprog = filter->prog->orig_prog;
72 flen = sk_filter_proglen(fprog); 72 flen = bpf_classic_proglen(fprog);
73 73
74 attr = nla_reserve(skb, attrtype, flen); 74 attr = nla_reserve(skb, attrtype, flen);
75 if (attr == NULL) { 75 if (attr == NULL) {
diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c
index bbffdbdaf603..dffee9d47ec4 100644
--- a/net/netfilter/xt_bpf.c
+++ b/net/netfilter/xt_bpf.c
@@ -28,7 +28,7 @@ static int bpf_mt_check(const struct xt_mtchk_param *par)
28 program.len = info->bpf_program_num_elem; 28 program.len = info->bpf_program_num_elem;
29 program.filter = info->bpf_program; 29 program.filter = info->bpf_program;
30 30
31 if (sk_unattached_filter_create(&info->filter, &program)) { 31 if (bpf_prog_create(&info->filter, &program)) {
32 pr_info("bpf: check failed: parse error\n"); 32 pr_info("bpf: check failed: parse error\n");
33 return -EINVAL; 33 return -EINVAL;
34 } 34 }
@@ -40,13 +40,13 @@ static bool bpf_mt(const struct sk_buff *skb, struct xt_action_param *par)
40{ 40{
41 const struct xt_bpf_info *info = par->matchinfo; 41 const struct xt_bpf_info *info = par->matchinfo;
42 42
43 return SK_RUN_FILTER(info->filter, skb); 43 return BPF_PROG_RUN(info->filter, skb);
44} 44}
45 45
46static void bpf_mt_destroy(const struct xt_mtdtor_param *par) 46static void bpf_mt_destroy(const struct xt_mtdtor_param *par)
47{ 47{
48 const struct xt_bpf_info *info = par->matchinfo; 48 const struct xt_bpf_info *info = par->matchinfo;
49 sk_unattached_filter_destroy(info->filter); 49 bpf_prog_destroy(info->filter);
50} 50}
51 51
52static struct xt_match bpf_mt_reg __read_mostly = { 52static struct xt_match bpf_mt_reg __read_mostly = {
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 13f64df2c710..0e30d58149da 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -30,7 +30,7 @@ struct cls_bpf_head {
30}; 30};
31 31
32struct cls_bpf_prog { 32struct cls_bpf_prog {
33 struct sk_filter *filter; 33 struct bpf_prog *filter;
34 struct sock_filter *bpf_ops; 34 struct sock_filter *bpf_ops;
35 struct tcf_exts exts; 35 struct tcf_exts exts;
36 struct tcf_result res; 36 struct tcf_result res;
@@ -54,7 +54,7 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
54 int ret; 54 int ret;
55 55
56 list_for_each_entry(prog, &head->plist, link) { 56 list_for_each_entry(prog, &head->plist, link) {
57 int filter_res = SK_RUN_FILTER(prog->filter, skb); 57 int filter_res = BPF_PROG_RUN(prog->filter, skb);
58 58
59 if (filter_res == 0) 59 if (filter_res == 0)
60 continue; 60 continue;
@@ -92,7 +92,7 @@ static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
92 tcf_unbind_filter(tp, &prog->res); 92 tcf_unbind_filter(tp, &prog->res);
93 tcf_exts_destroy(tp, &prog->exts); 93 tcf_exts_destroy(tp, &prog->exts);
94 94
95 sk_unattached_filter_destroy(prog->filter); 95 bpf_prog_destroy(prog->filter);
96 96
97 kfree(prog->bpf_ops); 97 kfree(prog->bpf_ops);
98 kfree(prog); 98 kfree(prog);
@@ -161,7 +161,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
161 struct sock_filter *bpf_ops, *bpf_old; 161 struct sock_filter *bpf_ops, *bpf_old;
162 struct tcf_exts exts; 162 struct tcf_exts exts;
163 struct sock_fprog_kern tmp; 163 struct sock_fprog_kern tmp;
164 struct sk_filter *fp, *fp_old; 164 struct bpf_prog *fp, *fp_old;
165 u16 bpf_size, bpf_len; 165 u16 bpf_size, bpf_len;
166 u32 classid; 166 u32 classid;
167 int ret; 167 int ret;
@@ -193,7 +193,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
193 tmp.len = bpf_len; 193 tmp.len = bpf_len;
194 tmp.filter = bpf_ops; 194 tmp.filter = bpf_ops;
195 195
196 ret = sk_unattached_filter_create(&fp, &tmp); 196 ret = bpf_prog_create(&fp, &tmp);
197 if (ret) 197 if (ret)
198 goto errout_free; 198 goto errout_free;
199 199
@@ -211,7 +211,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
211 tcf_exts_change(tp, &prog->exts, &exts); 211 tcf_exts_change(tp, &prog->exts, &exts);
212 212
213 if (fp_old) 213 if (fp_old)
214 sk_unattached_filter_destroy(fp_old); 214 bpf_prog_destroy(fp_old);
215 if (bpf_old) 215 if (bpf_old)
216 kfree(bpf_old); 216 kfree(bpf_old);
217 217