summaryrefslogtreecommitdiffstats
path: root/kernel/bpf/sockmap.c
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2018-10-12 20:45:57 -0400
committerAlexei Starovoitov <ast@kernel.org>2018-10-15 15:23:19 -0400
commit1243a51f6c05ecbb2c5c9e02fdcc1e7a06f76f26 (patch)
tree702caa53bfd6a96779e5631cc654dc8acebea858 /kernel/bpf/sockmap.c
parent8b9088f806e1ccd10c3d48b3b6d3d5d7855d92c5 (diff)
tcp, ulp: remove ulp bits from sockmap
In order to prepare sockmap logic to be used in combination with kTLS we need to detangle it from ULP, and further split it in later commits into a generic API. Joint work with John. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: John Fastabend <john.fastabend@gmail.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/sockmap.c')
-rw-r--r--kernel/bpf/sockmap.c39
1 files changed, 10 insertions, 29 deletions
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 5d0677d808ae..de6f7a65c72b 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -182,6 +182,7 @@ enum {
182static struct proto *saved_tcpv6_prot __read_mostly; 182static struct proto *saved_tcpv6_prot __read_mostly;
183static DEFINE_SPINLOCK(tcpv6_prot_lock); 183static DEFINE_SPINLOCK(tcpv6_prot_lock);
184static struct proto bpf_tcp_prots[SOCKMAP_NUM_PROTS][SOCKMAP_NUM_CONFIGS]; 184static struct proto bpf_tcp_prots[SOCKMAP_NUM_PROTS][SOCKMAP_NUM_CONFIGS];
185
185static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS], 186static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS],
186 struct proto *base) 187 struct proto *base)
187{ 188{
@@ -239,6 +240,13 @@ static int bpf_tcp_init(struct sock *sk)
239 return 0; 240 return 0;
240} 241}
241 242
243static int __init bpf_sock_init(void)
244{
245 build_protos(bpf_tcp_prots[SOCKMAP_IPV4], &tcp_prot);
246 return 0;
247}
248core_initcall(bpf_sock_init);
249
242static void smap_release_sock(struct smap_psock *psock, struct sock *sock); 250static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
243static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge); 251static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge);
244 252
@@ -413,15 +421,6 @@ enum __sk_action {
413 __SK_NONE, 421 __SK_NONE,
414}; 422};
415 423
416static struct tcp_ulp_ops bpf_tcp_ulp_ops __read_mostly = {
417 .name = "bpf_tcp",
418 .uid = TCP_ULP_BPF,
419 .user_visible = false,
420 .owner = NULL,
421 .init = bpf_tcp_init,
422 .release = bpf_tcp_release,
423};
424
425static int memcopy_from_iter(struct sock *sk, 424static int memcopy_from_iter(struct sock *sk,
426 struct sk_msg_buff *md, 425 struct sk_msg_buff *md,
427 struct iov_iter *from, int bytes) 426 struct iov_iter *from, int bytes)
@@ -1236,16 +1235,6 @@ static void bpf_tcp_msg_add(struct smap_psock *psock,
1236 bpf_prog_put(orig_tx_msg); 1235 bpf_prog_put(orig_tx_msg);
1237} 1236}
1238 1237
1239static int bpf_tcp_ulp_register(void)
1240{
1241 build_protos(bpf_tcp_prots[SOCKMAP_IPV4], &tcp_prot);
1242 /* Once BPF TX ULP is registered it is never unregistered. It
1243 * will be in the ULP list for the lifetime of the system. Doing
1244 * duplicate registers is not a problem.
1245 */
1246 return tcp_register_ulp(&bpf_tcp_ulp_ops);
1247}
1248
1249static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb) 1238static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
1250{ 1239{
1251 struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict); 1240 struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
@@ -1491,7 +1480,7 @@ static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
1491{ 1480{
1492 if (refcount_dec_and_test(&psock->refcnt)) { 1481 if (refcount_dec_and_test(&psock->refcnt)) {
1493 if (psock_is_smap_sk(sock)) 1482 if (psock_is_smap_sk(sock))
1494 tcp_cleanup_ulp(sock); 1483 bpf_tcp_release(sock);
1495 write_lock_bh(&sock->sk_callback_lock); 1484 write_lock_bh(&sock->sk_callback_lock);
1496 smap_stop_sock(psock, sock); 1485 smap_stop_sock(psock, sock);
1497 write_unlock_bh(&sock->sk_callback_lock); 1486 write_unlock_bh(&sock->sk_callback_lock);
@@ -1666,10 +1655,6 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
1666 attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK) 1655 attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
1667 return ERR_PTR(-EINVAL); 1656 return ERR_PTR(-EINVAL);
1668 1657
1669 err = bpf_tcp_ulp_register();
1670 if (err && err != -EEXIST)
1671 return ERR_PTR(err);
1672
1673 stab = kzalloc(sizeof(*stab), GFP_USER); 1658 stab = kzalloc(sizeof(*stab), GFP_USER);
1674 if (!stab) 1659 if (!stab)
1675 return ERR_PTR(-ENOMEM); 1660 return ERR_PTR(-ENOMEM);
@@ -1951,7 +1936,7 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
1951 if (tx_msg) 1936 if (tx_msg)
1952 bpf_tcp_msg_add(psock, sock, tx_msg); 1937 bpf_tcp_msg_add(psock, sock, tx_msg);
1953 if (new) { 1938 if (new) {
1954 err = tcp_set_ulp_id(sock, TCP_ULP_BPF); 1939 err = bpf_tcp_init(sock);
1955 if (err) 1940 if (err)
1956 goto out_free; 1941 goto out_free;
1957 } 1942 }
@@ -2187,10 +2172,6 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
2187 */ 2172 */
2188 return ERR_PTR(-E2BIG); 2173 return ERR_PTR(-E2BIG);
2189 2174
2190 err = bpf_tcp_ulp_register();
2191 if (err && err != -EEXIST)
2192 return ERR_PTR(err);
2193
2194 htab = kzalloc(sizeof(*htab), GFP_USER); 2175 htab = kzalloc(sizeof(*htab), GFP_USER);
2195 if (!htab) 2176 if (!htab)
2196 return ERR_PTR(-ENOMEM); 2177 return ERR_PTR(-ENOMEM);