aboutsummaryrefslogtreecommitdiffstats
path: root/lib/test_bpf.c
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@plumgrid.com>2014-07-30 23:34:16 -0400
committerDavid S. Miller <davem@davemloft.net>2014-08-02 18:03:58 -0400
commit7ae457c1e5b45a1b826fad9d62b32191d2bdcfdb (patch)
treedcb1aba57530e6c9426a81758173ca146ffafcaf /lib/test_bpf.c
parent8fb575ca396bc31d9fa99c26336e2432b41d1bfc (diff)
net: filter: split 'struct sk_filter' into socket and bpf parts
clean up names related to socket filtering and bpf in the following way: - everything that deals with sockets keeps 'sk_*' prefix - everything that is pure BPF is changed to 'bpf_*' prefix split 'struct sk_filter' into struct sk_filter { atomic_t refcnt; struct rcu_head rcu; struct bpf_prog *prog; }; and struct bpf_prog { u32 jited:1, len:31; struct sock_fprog_kern *orig_prog; unsigned int (*bpf_func)(const struct sk_buff *skb, const struct bpf_insn *filter); union { struct sock_filter insns[0]; struct bpf_insn insnsi[0]; struct work_struct work; }; }; so that 'struct bpf_prog' can be used independent of sockets and cleans up 'unattached' bpf use cases split SK_RUN_FILTER macro into: SK_RUN_FILTER to be used with 'struct sk_filter *' and BPF_PROG_RUN to be used with 'struct bpf_prog *' __sk_filter_release(struct sk_filter *) gains __bpf_prog_release(struct bpf_prog *) helper function also perform related renames for the functions that work with 'struct bpf_prog *', since they're on the same lines: sk_filter_size -> bpf_prog_size sk_filter_select_runtime -> bpf_prog_select_runtime sk_filter_free -> bpf_prog_free sk_unattached_filter_create -> bpf_prog_create sk_unattached_filter_destroy -> bpf_prog_destroy sk_store_orig_filter -> bpf_prog_store_orig_filter sk_release_orig_filter -> bpf_release_orig_filter __sk_migrate_filter -> bpf_migrate_filter __sk_prepare_filter -> bpf_prepare_filter API for attaching classic BPF to a socket stays the same: sk_attach_filter(prog, struct sock *)/sk_detach_filter(struct sock *) and SK_RUN_FILTER(struct sk_filter *, ctx) to execute a program which is used by sockets, tun, af_packet API for 'unattached' BPF programs becomes: bpf_prog_create(struct bpf_prog **)/bpf_prog_destroy(struct bpf_prog *) and BPF_PROG_RUN(struct bpf_prog *, ctx) to execute a program which is used by isdn, ppp, team, seccomp, ptp, xt_bpf, cls_bpf, test_bpf Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/test_bpf.c')
-rw-r--r--lib/test_bpf.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 5f48623ee1a7..89e0345733bd 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -1761,9 +1761,9 @@ static int probe_filter_length(struct sock_filter *fp)
1761 return len + 1; 1761 return len + 1;
1762} 1762}
1763 1763
1764static struct sk_filter *generate_filter(int which, int *err) 1764static struct bpf_prog *generate_filter(int which, int *err)
1765{ 1765{
1766 struct sk_filter *fp; 1766 struct bpf_prog *fp;
1767 struct sock_fprog_kern fprog; 1767 struct sock_fprog_kern fprog;
1768 unsigned int flen = probe_filter_length(tests[which].u.insns); 1768 unsigned int flen = probe_filter_length(tests[which].u.insns);
1769 __u8 test_type = tests[which].aux & TEST_TYPE_MASK; 1769 __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
@@ -1773,7 +1773,7 @@ static struct sk_filter *generate_filter(int which, int *err)
1773 fprog.filter = tests[which].u.insns; 1773 fprog.filter = tests[which].u.insns;
1774 fprog.len = flen; 1774 fprog.len = flen;
1775 1775
1776 *err = sk_unattached_filter_create(&fp, &fprog); 1776 *err = bpf_prog_create(&fp, &fprog);
1777 if (tests[which].aux & FLAG_EXPECTED_FAIL) { 1777 if (tests[which].aux & FLAG_EXPECTED_FAIL) {
1778 if (*err == -EINVAL) { 1778 if (*err == -EINVAL) {
1779 pr_cont("PASS\n"); 1779 pr_cont("PASS\n");
@@ -1798,7 +1798,7 @@ static struct sk_filter *generate_filter(int which, int *err)
1798 break; 1798 break;
1799 1799
1800 case INTERNAL: 1800 case INTERNAL:
1801 fp = kzalloc(sk_filter_size(flen), GFP_KERNEL); 1801 fp = kzalloc(bpf_prog_size(flen), GFP_KERNEL);
1802 if (fp == NULL) { 1802 if (fp == NULL) {
1803 pr_cont("UNEXPECTED_FAIL no memory left\n"); 1803 pr_cont("UNEXPECTED_FAIL no memory left\n");
1804 *err = -ENOMEM; 1804 *err = -ENOMEM;
@@ -1809,7 +1809,7 @@ static struct sk_filter *generate_filter(int which, int *err)
1809 memcpy(fp->insnsi, tests[which].u.insns_int, 1809 memcpy(fp->insnsi, tests[which].u.insns_int,
1810 fp->len * sizeof(struct bpf_insn)); 1810 fp->len * sizeof(struct bpf_insn));
1811 1811
1812 sk_filter_select_runtime(fp); 1812 bpf_prog_select_runtime(fp);
1813 break; 1813 break;
1814 } 1814 }
1815 1815
@@ -1817,21 +1817,21 @@ static struct sk_filter *generate_filter(int which, int *err)
1817 return fp; 1817 return fp;
1818} 1818}
1819 1819
1820static void release_filter(struct sk_filter *fp, int which) 1820static void release_filter(struct bpf_prog *fp, int which)
1821{ 1821{
1822 __u8 test_type = tests[which].aux & TEST_TYPE_MASK; 1822 __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
1823 1823
1824 switch (test_type) { 1824 switch (test_type) {
1825 case CLASSIC: 1825 case CLASSIC:
1826 sk_unattached_filter_destroy(fp); 1826 bpf_prog_destroy(fp);
1827 break; 1827 break;
1828 case INTERNAL: 1828 case INTERNAL:
1829 sk_filter_free(fp); 1829 bpf_prog_free(fp);
1830 break; 1830 break;
1831 } 1831 }
1832} 1832}
1833 1833
1834static int __run_one(const struct sk_filter *fp, const void *data, 1834static int __run_one(const struct bpf_prog *fp, const void *data,
1835 int runs, u64 *duration) 1835 int runs, u64 *duration)
1836{ 1836{
1837 u64 start, finish; 1837 u64 start, finish;
@@ -1840,7 +1840,7 @@ static int __run_one(const struct sk_filter *fp, const void *data,
1840 start = ktime_to_us(ktime_get()); 1840 start = ktime_to_us(ktime_get());
1841 1841
1842 for (i = 0; i < runs; i++) 1842 for (i = 0; i < runs; i++)
1843 ret = SK_RUN_FILTER(fp, data); 1843 ret = BPF_PROG_RUN(fp, data);
1844 1844
1845 finish = ktime_to_us(ktime_get()); 1845 finish = ktime_to_us(ktime_get());
1846 1846
@@ -1850,7 +1850,7 @@ static int __run_one(const struct sk_filter *fp, const void *data,
1850 return ret; 1850 return ret;
1851} 1851}
1852 1852
1853static int run_one(const struct sk_filter *fp, struct bpf_test *test) 1853static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
1854{ 1854{
1855 int err_cnt = 0, i, runs = MAX_TESTRUNS; 1855 int err_cnt = 0, i, runs = MAX_TESTRUNS;
1856 1856
@@ -1884,7 +1884,7 @@ static __init int test_bpf(void)
1884 int i, err_cnt = 0, pass_cnt = 0; 1884 int i, err_cnt = 0, pass_cnt = 0;
1885 1885
1886 for (i = 0; i < ARRAY_SIZE(tests); i++) { 1886 for (i = 0; i < ARRAY_SIZE(tests); i++) {
1887 struct sk_filter *fp; 1887 struct bpf_prog *fp;
1888 int err; 1888 int err;
1889 1889
1890 pr_info("#%d %s ", i, tests[i].descr); 1890 pr_info("#%d %s ", i, tests[i].descr);