summaryrefslogtreecommitdiffstats
path: root/include/linux/bpf-cgroup.h
diff options
context:
space:
mode:
authorDavid Ahern <dsa@cumulusnetworks.com>2016-12-01 11:48:04 -0500
committerDavid S. Miller <davem@davemloft.net>2016-12-02 13:46:08 -0500
commit61023658760032e97869b07d54be9681d2529e77 (patch)
tree9b10a9d2a8b5820450298f9bda8f3c23fbf66b57 /include/linux/bpf-cgroup.h
parentb2cd12574aa3e1625f471ff57cde7f628a18a46b (diff)
bpf: Add new cgroup attach type to enable sock modifications
Add new cgroup based program type, BPF_PROG_TYPE_CGROUP_SOCK. Similar to BPF_PROG_TYPE_CGROUP_SKB programs can be attached to a cgroup and run any time a process in the cgroup opens an AF_INET or AF_INET6 socket. Currently only sk_bound_dev_if is exported to userspace for modification by a bpf program. This allows a cgroup to be configured such that AF_INET{6} sockets opened by processes are automatically bound to a specific device. In turn, this enables the running of programs that do not support SO_BINDTODEVICE in a specific VRF context / L3 domain. Signed-off-by: David Ahern <dsa@cumulusnetworks.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/bpf-cgroup.h')
-rw-r--r--include/linux/bpf-cgroup.h14
1 files changed, 14 insertions, 0 deletions
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index af2ca8b432c0..7b6e5d168c95 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -40,6 +40,9 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
40 struct sk_buff *skb, 40 struct sk_buff *skb,
41 enum bpf_attach_type type); 41 enum bpf_attach_type type);
42 42
43int __cgroup_bpf_run_filter_sk(struct sock *sk,
44 enum bpf_attach_type type);
45
43/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */ 46/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
44#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ 47#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
45({ \ 48({ \
@@ -63,6 +66,16 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
63 __ret; \ 66 __ret; \
64}) 67})
65 68
69#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
70({ \
71 int __ret = 0; \
72 if (cgroup_bpf_enabled && sk) { \
73 __ret = __cgroup_bpf_run_filter_sk(sk, \
74 BPF_CGROUP_INET_SOCK_CREATE); \
75 } \
76 __ret; \
77})
78
66#else 79#else
67 80
68struct cgroup_bpf {}; 81struct cgroup_bpf {};
@@ -72,6 +85,7 @@ static inline void cgroup_bpf_inherit(struct cgroup *cgrp,
72 85
73#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) 86#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
74#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) 87#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
88#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
75 89
76#endif /* CONFIG_CGROUP_BPF */ 90#endif /* CONFIG_CGROUP_BPF */
77 91