aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@plumgrid.com>2014-07-30 23:34:16 -0400
committerDavid S. Miller <davem@davemloft.net>2014-08-02 18:03:58 -0400
commit7ae457c1e5b45a1b826fad9d62b32191d2bdcfdb (patch)
treedcb1aba57530e6c9426a81758173ca146ffafcaf /net
parent8fb575ca396bc31d9fa99c26336e2432b41d1bfc (diff)
net: filter: split 'struct sk_filter' into socket and bpf parts
clean up names related to socket filtering and bpf in the following way: - everything that deals with sockets keeps 'sk_*' prefix - everything that is pure BPF is changed to 'bpf_*' prefix split 'struct sk_filter' into struct sk_filter { atomic_t refcnt; struct rcu_head rcu; struct bpf_prog *prog; }; and struct bpf_prog { u32 jited:1, len:31; struct sock_fprog_kern *orig_prog; unsigned int (*bpf_func)(const struct sk_buff *skb, const struct bpf_insn *filter); union { struct sock_filter insns[0]; struct bpf_insn insnsi[0]; struct work_struct work; }; }; so that 'struct bpf_prog' can be used independent of sockets and cleans up 'unattached' bpf use cases split SK_RUN_FILTER macro into: SK_RUN_FILTER to be used with 'struct sk_filter *' and BPF_PROG_RUN to be used with 'struct bpf_prog *' __sk_filter_release(struct sk_filter *) gains __bpf_prog_release(struct bpf_prog *) helper function also perform related renames for the functions that work with 'struct bpf_prog *', since they're on the same lines: sk_filter_size -> bpf_prog_size sk_filter_select_runtime -> bpf_prog_select_runtime sk_filter_free -> bpf_prog_free sk_unattached_filter_create -> bpf_prog_create sk_unattached_filter_destroy -> bpf_prog_destroy sk_store_orig_filter -> bpf_prog_store_orig_filter sk_release_orig_filter -> bpf_release_orig_filter __sk_migrate_filter -> bpf_migrate_filter __sk_prepare_filter -> bpf_prepare_filter API for attaching classic BPF to a socket stays the same: sk_attach_filter(prog, struct sock *)/sk_detach_filter(struct sock *) and SK_RUN_FILTER(struct sk_filter *, ctx) to execute a program which is used by sockets, tun, af_packet API for 'unattached' BPF programs becomes: bpf_prog_create(struct bpf_prog **)/bpf_prog_destroy(struct bpf_prog *) and BPF_PROG_RUN(struct bpf_prog *, ctx) to execute a program which is used by isdn, ppp, team, seccomp, ptp, xt_bpf, cls_bpf, test_bpf Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/filter.c92
-rw-r--r--net/core/ptp_classifier.c6
-rw-r--r--net/core/sock_diag.c2
-rw-r--r--net/netfilter/xt_bpf.c6
-rw-r--r--net/sched/cls_bpf.c12
5 files changed, 65 insertions, 53 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index 6ac901613bee..d814b8a89d0f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -810,8 +810,8 @@ int bpf_check_classic(const struct sock_filter *filter, unsigned int flen)
810} 810}
811EXPORT_SYMBOL(bpf_check_classic); 811EXPORT_SYMBOL(bpf_check_classic);
812 812
813static int sk_store_orig_filter(struct sk_filter *fp, 813static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
814 const struct sock_fprog *fprog) 814 const struct sock_fprog *fprog)
815{ 815{
816 unsigned int fsize = bpf_classic_proglen(fprog); 816 unsigned int fsize = bpf_classic_proglen(fprog);
817 struct sock_fprog_kern *fkprog; 817 struct sock_fprog_kern *fkprog;
@@ -831,7 +831,7 @@ static int sk_store_orig_filter(struct sk_filter *fp,
831 return 0; 831 return 0;
832} 832}
833 833
834static void sk_release_orig_filter(struct sk_filter *fp) 834static void bpf_release_orig_filter(struct bpf_prog *fp)
835{ 835{
836 struct sock_fprog_kern *fprog = fp->orig_prog; 836 struct sock_fprog_kern *fprog = fp->orig_prog;
837 837
@@ -841,10 +841,16 @@ static void sk_release_orig_filter(struct sk_filter *fp)
841 } 841 }
842} 842}
843 843
844static void __bpf_prog_release(struct bpf_prog *prog)
845{
846 bpf_release_orig_filter(prog);
847 bpf_prog_free(prog);
848}
849
844static void __sk_filter_release(struct sk_filter *fp) 850static void __sk_filter_release(struct sk_filter *fp)
845{ 851{
846 sk_release_orig_filter(fp); 852 __bpf_prog_release(fp->prog);
847 sk_filter_free(fp); 853 kfree(fp);
848} 854}
849 855
850/** 856/**
@@ -872,7 +878,7 @@ static void sk_filter_release(struct sk_filter *fp)
872 878
873void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) 879void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
874{ 880{
875 u32 filter_size = sk_filter_size(fp->len); 881 u32 filter_size = bpf_prog_size(fp->prog->len);
876 882
877 atomic_sub(filter_size, &sk->sk_omem_alloc); 883 atomic_sub(filter_size, &sk->sk_omem_alloc);
878 sk_filter_release(fp); 884 sk_filter_release(fp);
@@ -883,7 +889,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
883 */ 889 */
884bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) 890bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
885{ 891{
886 u32 filter_size = sk_filter_size(fp->len); 892 u32 filter_size = bpf_prog_size(fp->prog->len);
887 893
888 /* same check as in sock_kmalloc() */ 894 /* same check as in sock_kmalloc() */
889 if (filter_size <= sysctl_optmem_max && 895 if (filter_size <= sysctl_optmem_max &&
@@ -895,10 +901,10 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
895 return false; 901 return false;
896} 902}
897 903
898static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp) 904static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
899{ 905{
900 struct sock_filter *old_prog; 906 struct sock_filter *old_prog;
901 struct sk_filter *old_fp; 907 struct bpf_prog *old_fp;
902 int err, new_len, old_len = fp->len; 908 int err, new_len, old_len = fp->len;
903 909
904 /* We are free to overwrite insns et al right here as it 910 /* We are free to overwrite insns et al right here as it
@@ -927,7 +933,7 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp)
927 933
928 /* Expand fp for appending the new filter representation. */ 934 /* Expand fp for appending the new filter representation. */
929 old_fp = fp; 935 old_fp = fp;
930 fp = krealloc(old_fp, sk_filter_size(new_len), GFP_KERNEL); 936 fp = krealloc(old_fp, bpf_prog_size(new_len), GFP_KERNEL);
931 if (!fp) { 937 if (!fp) {
932 /* The old_fp is still around in case we couldn't 938 /* The old_fp is still around in case we couldn't
933 * allocate new memory, so uncharge on that one. 939 * allocate new memory, so uncharge on that one.
@@ -949,7 +955,7 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp)
949 */ 955 */
950 goto out_err_free; 956 goto out_err_free;
951 957
952 sk_filter_select_runtime(fp); 958 bpf_prog_select_runtime(fp);
953 959
954 kfree(old_prog); 960 kfree(old_prog);
955 return fp; 961 return fp;
@@ -957,11 +963,11 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp)
957out_err_free: 963out_err_free:
958 kfree(old_prog); 964 kfree(old_prog);
959out_err: 965out_err:
960 __sk_filter_release(fp); 966 __bpf_prog_release(fp);
961 return ERR_PTR(err); 967 return ERR_PTR(err);
962} 968}
963 969
964static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp) 970static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp)
965{ 971{
966 int err; 972 int err;
967 973
@@ -970,7 +976,7 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp)
970 976
971 err = bpf_check_classic(fp->insns, fp->len); 977 err = bpf_check_classic(fp->insns, fp->len);
972 if (err) { 978 if (err) {
973 __sk_filter_release(fp); 979 __bpf_prog_release(fp);
974 return ERR_PTR(err); 980 return ERR_PTR(err);
975 } 981 }
976 982
@@ -983,13 +989,13 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp)
983 * internal BPF translation for the optimized interpreter. 989 * internal BPF translation for the optimized interpreter.
984 */ 990 */
985 if (!fp->jited) 991 if (!fp->jited)
986 fp = __sk_migrate_filter(fp); 992 fp = bpf_migrate_filter(fp);
987 993
988 return fp; 994 return fp;
989} 995}
990 996
991/** 997/**
992 * sk_unattached_filter_create - create an unattached filter 998 * bpf_prog_create - create an unattached filter
993 * @pfp: the unattached filter that is created 999 * @pfp: the unattached filter that is created
994 * @fprog: the filter program 1000 * @fprog: the filter program
995 * 1001 *
@@ -998,23 +1004,21 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp)
998 * If an error occurs or there is insufficient memory for the filter 1004 * If an error occurs or there is insufficient memory for the filter
999 * a negative errno code is returned. On success the return is zero. 1005 * a negative errno code is returned. On success the return is zero.
1000 */ 1006 */
1001int sk_unattached_filter_create(struct sk_filter **pfp, 1007int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
1002 struct sock_fprog_kern *fprog)
1003{ 1008{
1004 unsigned int fsize = bpf_classic_proglen(fprog); 1009 unsigned int fsize = bpf_classic_proglen(fprog);
1005 struct sk_filter *fp; 1010 struct bpf_prog *fp;
1006 1011
1007 /* Make sure new filter is there and in the right amounts. */ 1012 /* Make sure new filter is there and in the right amounts. */
1008 if (fprog->filter == NULL) 1013 if (fprog->filter == NULL)
1009 return -EINVAL; 1014 return -EINVAL;
1010 1015
1011 fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL); 1016 fp = kmalloc(bpf_prog_size(fprog->len), GFP_KERNEL);
1012 if (!fp) 1017 if (!fp)
1013 return -ENOMEM; 1018 return -ENOMEM;
1014 1019
1015 memcpy(fp->insns, fprog->filter, fsize); 1020 memcpy(fp->insns, fprog->filter, fsize);
1016 1021
1017 atomic_set(&fp->refcnt, 1);
1018 fp->len = fprog->len; 1022 fp->len = fprog->len;
1019 /* Since unattached filters are not copied back to user 1023 /* Since unattached filters are not copied back to user
1020 * space through sk_get_filter(), we do not need to hold 1024 * space through sk_get_filter(), we do not need to hold
@@ -1022,23 +1026,23 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
1022 */ 1026 */
1023 fp->orig_prog = NULL; 1027 fp->orig_prog = NULL;
1024 1028
1025 /* __sk_prepare_filter() already takes care of freeing 1029 /* bpf_prepare_filter() already takes care of freeing
1026 * memory in case something goes wrong. 1030 * memory in case something goes wrong.
1027 */ 1031 */
1028 fp = __sk_prepare_filter(fp); 1032 fp = bpf_prepare_filter(fp);
1029 if (IS_ERR(fp)) 1033 if (IS_ERR(fp))
1030 return PTR_ERR(fp); 1034 return PTR_ERR(fp);
1031 1035
1032 *pfp = fp; 1036 *pfp = fp;
1033 return 0; 1037 return 0;
1034} 1038}
1035EXPORT_SYMBOL_GPL(sk_unattached_filter_create); 1039EXPORT_SYMBOL_GPL(bpf_prog_create);
1036 1040
1037void sk_unattached_filter_destroy(struct sk_filter *fp) 1041void bpf_prog_destroy(struct bpf_prog *fp)
1038{ 1042{
1039 __sk_filter_release(fp); 1043 __bpf_prog_release(fp);
1040} 1044}
1041EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy); 1045EXPORT_SYMBOL_GPL(bpf_prog_destroy);
1042 1046
1043/** 1047/**
1044 * sk_attach_filter - attach a socket filter 1048 * sk_attach_filter - attach a socket filter
@@ -1054,7 +1058,8 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1054{ 1058{
1055 struct sk_filter *fp, *old_fp; 1059 struct sk_filter *fp, *old_fp;
1056 unsigned int fsize = bpf_classic_proglen(fprog); 1060 unsigned int fsize = bpf_classic_proglen(fprog);
1057 unsigned int sk_fsize = sk_filter_size(fprog->len); 1061 unsigned int bpf_fsize = bpf_prog_size(fprog->len);
1062 struct bpf_prog *prog;
1058 int err; 1063 int err;
1059 1064
1060 if (sock_flag(sk, SOCK_FILTER_LOCKED)) 1065 if (sock_flag(sk, SOCK_FILTER_LOCKED))
@@ -1064,29 +1069,36 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1064 if (fprog->filter == NULL) 1069 if (fprog->filter == NULL)
1065 return -EINVAL; 1070 return -EINVAL;
1066 1071
1067 fp = kmalloc(sk_fsize, GFP_KERNEL); 1072 prog = kmalloc(bpf_fsize, GFP_KERNEL);
1068 if (!fp) 1073 if (!prog)
1069 return -ENOMEM; 1074 return -ENOMEM;
1070 1075
1071 if (copy_from_user(fp->insns, fprog->filter, fsize)) { 1076 if (copy_from_user(prog->insns, fprog->filter, fsize)) {
1072 kfree(fp); 1077 kfree(prog);
1073 return -EFAULT; 1078 return -EFAULT;
1074 } 1079 }
1075 1080
1076 fp->len = fprog->len; 1081 prog->len = fprog->len;
1077 1082
1078 err = sk_store_orig_filter(fp, fprog); 1083 err = bpf_prog_store_orig_filter(prog, fprog);
1079 if (err) { 1084 if (err) {
1080 kfree(fp); 1085 kfree(prog);
1081 return -ENOMEM; 1086 return -ENOMEM;
1082 } 1087 }
1083 1088
1084 /* __sk_prepare_filter() already takes care of freeing 1089 /* bpf_prepare_filter() already takes care of freeing
1085 * memory in case something goes wrong. 1090 * memory in case something goes wrong.
1086 */ 1091 */
1087 fp = __sk_prepare_filter(fp); 1092 prog = bpf_prepare_filter(prog);
1088 if (IS_ERR(fp)) 1093 if (IS_ERR(prog))
1089 return PTR_ERR(fp); 1094 return PTR_ERR(prog);
1095
1096 fp = kmalloc(sizeof(*fp), GFP_KERNEL);
1097 if (!fp) {
1098 __bpf_prog_release(prog);
1099 return -ENOMEM;
1100 }
1101 fp->prog = prog;
1090 1102
1091 atomic_set(&fp->refcnt, 0); 1103 atomic_set(&fp->refcnt, 0);
1092 1104
@@ -1142,7 +1154,7 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
1142 /* We're copying the filter that has been originally attached, 1154 /* We're copying the filter that has been originally attached,
1143 * so no conversion/decode needed anymore. 1155 * so no conversion/decode needed anymore.
1144 */ 1156 */
1145 fprog = filter->orig_prog; 1157 fprog = filter->prog->orig_prog;
1146 1158
1147 ret = fprog->len; 1159 ret = fprog->len;
1148 if (!len) 1160 if (!len)
diff --git a/net/core/ptp_classifier.c b/net/core/ptp_classifier.c
index 12ab7b4be609..4eab4a94a59d 100644
--- a/net/core/ptp_classifier.c
+++ b/net/core/ptp_classifier.c
@@ -107,11 +107,11 @@
107#include <linux/filter.h> 107#include <linux/filter.h>
108#include <linux/ptp_classify.h> 108#include <linux/ptp_classify.h>
109 109
110static struct sk_filter *ptp_insns __read_mostly; 110static struct bpf_prog *ptp_insns __read_mostly;
111 111
112unsigned int ptp_classify_raw(const struct sk_buff *skb) 112unsigned int ptp_classify_raw(const struct sk_buff *skb)
113{ 113{
114 return SK_RUN_FILTER(ptp_insns, skb); 114 return BPF_PROG_RUN(ptp_insns, skb);
115} 115}
116EXPORT_SYMBOL_GPL(ptp_classify_raw); 116EXPORT_SYMBOL_GPL(ptp_classify_raw);
117 117
@@ -189,5 +189,5 @@ void __init ptp_classifier_init(void)
189 .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter, 189 .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter,
190 }; 190 };
191 191
192 BUG_ON(sk_unattached_filter_create(&ptp_insns, &ptp_prog)); 192 BUG_ON(bpf_prog_create(&ptp_insns, &ptp_prog));
193} 193}
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 57d922320c59..ad704c757bb4 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -68,7 +68,7 @@ int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
68 if (!filter) 68 if (!filter)
69 goto out; 69 goto out;
70 70
71 fprog = filter->orig_prog; 71 fprog = filter->prog->orig_prog;
72 flen = bpf_classic_proglen(fprog); 72 flen = bpf_classic_proglen(fprog);
73 73
74 attr = nla_reserve(skb, attrtype, flen); 74 attr = nla_reserve(skb, attrtype, flen);
diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c
index bbffdbdaf603..dffee9d47ec4 100644
--- a/net/netfilter/xt_bpf.c
+++ b/net/netfilter/xt_bpf.c
@@ -28,7 +28,7 @@ static int bpf_mt_check(const struct xt_mtchk_param *par)
28 program.len = info->bpf_program_num_elem; 28 program.len = info->bpf_program_num_elem;
29 program.filter = info->bpf_program; 29 program.filter = info->bpf_program;
30 30
31 if (sk_unattached_filter_create(&info->filter, &program)) { 31 if (bpf_prog_create(&info->filter, &program)) {
32 pr_info("bpf: check failed: parse error\n"); 32 pr_info("bpf: check failed: parse error\n");
33 return -EINVAL; 33 return -EINVAL;
34 } 34 }
@@ -40,13 +40,13 @@ static bool bpf_mt(const struct sk_buff *skb, struct xt_action_param *par)
40{ 40{
41 const struct xt_bpf_info *info = par->matchinfo; 41 const struct xt_bpf_info *info = par->matchinfo;
42 42
43 return SK_RUN_FILTER(info->filter, skb); 43 return BPF_PROG_RUN(info->filter, skb);
44} 44}
45 45
46static void bpf_mt_destroy(const struct xt_mtdtor_param *par) 46static void bpf_mt_destroy(const struct xt_mtdtor_param *par)
47{ 47{
48 const struct xt_bpf_info *info = par->matchinfo; 48 const struct xt_bpf_info *info = par->matchinfo;
49 sk_unattached_filter_destroy(info->filter); 49 bpf_prog_destroy(info->filter);
50} 50}
51 51
52static struct xt_match bpf_mt_reg __read_mostly = { 52static struct xt_match bpf_mt_reg __read_mostly = {
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 13f64df2c710..0e30d58149da 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -30,7 +30,7 @@ struct cls_bpf_head {
30}; 30};
31 31
32struct cls_bpf_prog { 32struct cls_bpf_prog {
33 struct sk_filter *filter; 33 struct bpf_prog *filter;
34 struct sock_filter *bpf_ops; 34 struct sock_filter *bpf_ops;
35 struct tcf_exts exts; 35 struct tcf_exts exts;
36 struct tcf_result res; 36 struct tcf_result res;
@@ -54,7 +54,7 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
54 int ret; 54 int ret;
55 55
56 list_for_each_entry(prog, &head->plist, link) { 56 list_for_each_entry(prog, &head->plist, link) {
57 int filter_res = SK_RUN_FILTER(prog->filter, skb); 57 int filter_res = BPF_PROG_RUN(prog->filter, skb);
58 58
59 if (filter_res == 0) 59 if (filter_res == 0)
60 continue; 60 continue;
@@ -92,7 +92,7 @@ static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
92 tcf_unbind_filter(tp, &prog->res); 92 tcf_unbind_filter(tp, &prog->res);
93 tcf_exts_destroy(tp, &prog->exts); 93 tcf_exts_destroy(tp, &prog->exts);
94 94
95 sk_unattached_filter_destroy(prog->filter); 95 bpf_prog_destroy(prog->filter);
96 96
97 kfree(prog->bpf_ops); 97 kfree(prog->bpf_ops);
98 kfree(prog); 98 kfree(prog);
@@ -161,7 +161,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
161 struct sock_filter *bpf_ops, *bpf_old; 161 struct sock_filter *bpf_ops, *bpf_old;
162 struct tcf_exts exts; 162 struct tcf_exts exts;
163 struct sock_fprog_kern tmp; 163 struct sock_fprog_kern tmp;
164 struct sk_filter *fp, *fp_old; 164 struct bpf_prog *fp, *fp_old;
165 u16 bpf_size, bpf_len; 165 u16 bpf_size, bpf_len;
166 u32 classid; 166 u32 classid;
167 int ret; 167 int ret;
@@ -193,7 +193,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
193 tmp.len = bpf_len; 193 tmp.len = bpf_len;
194 tmp.filter = bpf_ops; 194 tmp.filter = bpf_ops;
195 195
196 ret = sk_unattached_filter_create(&fp, &tmp); 196 ret = bpf_prog_create(&fp, &tmp);
197 if (ret) 197 if (ret)
198 goto errout_free; 198 goto errout_free;
199 199
@@ -211,7 +211,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
211 tcf_exts_change(tp, &prog->exts, &exts); 211 tcf_exts_change(tp, &prog->exts, &exts);
212 212
213 if (fp_old) 213 if (fp_old)
214 sk_unattached_filter_destroy(fp_old); 214 bpf_prog_destroy(fp_old);
215 if (bpf_old) 215 if (bpf_old)
216 kfree(bpf_old); 216 kfree(bpf_old);
217 217