diff options
author | Alexei Starovoitov <ast@plumgrid.com> | 2015-06-04 13:11:54 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-06-07 05:01:33 -0400 |
commit | d691f9e8d4405c334aa10d556e73c8bf44cb0e01 (patch) | |
tree | 295b1d647364407c42990d916358a72381f4d534 /samples/bpf | |
parent | 3431205e03977aaf32bce6d4b16fb8244b510056 (diff) |
bpf: allow programs to write to certain skb fields
allow programs read/write skb->mark, tc_index fields and
((struct qdisc_skb_cb *)cb)->data.
mark and tc_index are generically useful in TC.
cb[0]-cb[4] are primarily used to pass arguments from one
program to another called via bpf_tail_call() which can
be seen in sockex3_kern.c example.
All fields of 'struct __sk_buff' are readable to socket and tc_cls_act progs.
mark, tc_index are writeable from tc_cls_act only.
cb[0]-cb[4] are writeable by both sockets and tc_cls_act.
Add verifier tests and improve sample code.
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'samples/bpf')
-rw-r--r-- | samples/bpf/sockex3_kern.c | 35 | ||||
-rw-r--r-- | samples/bpf/test_verifier.c | 84 |
2 files changed, 93 insertions, 26 deletions
diff --git a/samples/bpf/sockex3_kern.c b/samples/bpf/sockex3_kern.c index 2625b987944f..41ae2fd21b13 100644 --- a/samples/bpf/sockex3_kern.c +++ b/samples/bpf/sockex3_kern.c | |||
@@ -89,7 +89,6 @@ static inline __u32 ipv6_addr_hash(struct __sk_buff *ctx, __u64 off) | |||
89 | 89 | ||
90 | struct globals { | 90 | struct globals { |
91 | struct flow_keys flow; | 91 | struct flow_keys flow; |
92 | __u32 nhoff; | ||
93 | }; | 92 | }; |
94 | 93 | ||
95 | struct bpf_map_def SEC("maps") percpu_map = { | 94 | struct bpf_map_def SEC("maps") percpu_map = { |
@@ -139,7 +138,7 @@ static void update_stats(struct __sk_buff *skb, struct globals *g) | |||
139 | static __always_inline void parse_ip_proto(struct __sk_buff *skb, | 138 | static __always_inline void parse_ip_proto(struct __sk_buff *skb, |
140 | struct globals *g, __u32 ip_proto) | 139 | struct globals *g, __u32 ip_proto) |
141 | { | 140 | { |
142 | __u32 nhoff = g->nhoff; | 141 | __u32 nhoff = skb->cb[0]; |
143 | int poff; | 142 | int poff; |
144 | 143 | ||
145 | switch (ip_proto) { | 144 | switch (ip_proto) { |
@@ -165,7 +164,7 @@ static __always_inline void parse_ip_proto(struct __sk_buff *skb, | |||
165 | if (gre_flags & GRE_SEQ) | 164 | if (gre_flags & GRE_SEQ) |
166 | nhoff += 4; | 165 | nhoff += 4; |
167 | 166 | ||
168 | g->nhoff = nhoff; | 167 | skb->cb[0] = nhoff; |
169 | parse_eth_proto(skb, gre_proto); | 168 | parse_eth_proto(skb, gre_proto); |
170 | break; | 169 | break; |
171 | } | 170 | } |
@@ -195,7 +194,7 @@ PROG(PARSE_IP)(struct __sk_buff *skb) | |||
195 | if (!g) | 194 | if (!g) |
196 | return 0; | 195 | return 0; |
197 | 196 | ||
198 | nhoff = g->nhoff; | 197 | nhoff = skb->cb[0]; |
199 | 198 | ||
200 | if (unlikely(ip_is_fragment(skb, nhoff))) | 199 | if (unlikely(ip_is_fragment(skb, nhoff))) |
201 | return 0; | 200 | return 0; |
@@ -210,7 +209,7 @@ PROG(PARSE_IP)(struct __sk_buff *skb) | |||
210 | verlen = load_byte(skb, nhoff + 0/*offsetof(struct iphdr, ihl)*/); | 209 | verlen = load_byte(skb, nhoff + 0/*offsetof(struct iphdr, ihl)*/); |
211 | nhoff += (verlen & 0xF) << 2; | 210 | nhoff += (verlen & 0xF) << 2; |
212 | 211 | ||
213 | g->nhoff = nhoff; | 212 | skb->cb[0] = nhoff; |
214 | parse_ip_proto(skb, g, ip_proto); | 213 | parse_ip_proto(skb, g, ip_proto); |
215 | return 0; | 214 | return 0; |
216 | } | 215 | } |
@@ -223,7 +222,7 @@ PROG(PARSE_IPV6)(struct __sk_buff *skb) | |||
223 | if (!g) | 222 | if (!g) |
224 | return 0; | 223 | return 0; |
225 | 224 | ||
226 | nhoff = g->nhoff; | 225 | nhoff = skb->cb[0]; |
227 | 226 | ||
228 | ip_proto = load_byte(skb, | 227 | ip_proto = load_byte(skb, |
229 | nhoff + offsetof(struct ipv6hdr, nexthdr)); | 228 | nhoff + offsetof(struct ipv6hdr, nexthdr)); |
@@ -233,25 +232,21 @@ PROG(PARSE_IPV6)(struct __sk_buff *skb) | |||
233 | nhoff + offsetof(struct ipv6hdr, daddr)); | 232 | nhoff + offsetof(struct ipv6hdr, daddr)); |
234 | nhoff += sizeof(struct ipv6hdr); | 233 | nhoff += sizeof(struct ipv6hdr); |
235 | 234 | ||
236 | g->nhoff = nhoff; | 235 | skb->cb[0] = nhoff; |
237 | parse_ip_proto(skb, g, ip_proto); | 236 | parse_ip_proto(skb, g, ip_proto); |
238 | return 0; | 237 | return 0; |
239 | } | 238 | } |
240 | 239 | ||
241 | PROG(PARSE_VLAN)(struct __sk_buff *skb) | 240 | PROG(PARSE_VLAN)(struct __sk_buff *skb) |
242 | { | 241 | { |
243 | struct globals *g = this_cpu_globals(); | ||
244 | __u32 nhoff, proto; | 242 | __u32 nhoff, proto; |
245 | 243 | ||
246 | if (!g) | 244 | nhoff = skb->cb[0]; |
247 | return 0; | ||
248 | |||
249 | nhoff = g->nhoff; | ||
250 | 245 | ||
251 | proto = load_half(skb, nhoff + offsetof(struct vlan_hdr, | 246 | proto = load_half(skb, nhoff + offsetof(struct vlan_hdr, |
252 | h_vlan_encapsulated_proto)); | 247 | h_vlan_encapsulated_proto)); |
253 | nhoff += sizeof(struct vlan_hdr); | 248 | nhoff += sizeof(struct vlan_hdr); |
254 | g->nhoff = nhoff; | 249 | skb->cb[0] = nhoff; |
255 | 250 | ||
256 | parse_eth_proto(skb, proto); | 251 | parse_eth_proto(skb, proto); |
257 | 252 | ||
@@ -260,17 +255,13 @@ PROG(PARSE_VLAN)(struct __sk_buff *skb) | |||
260 | 255 | ||
261 | PROG(PARSE_MPLS)(struct __sk_buff *skb) | 256 | PROG(PARSE_MPLS)(struct __sk_buff *skb) |
262 | { | 257 | { |
263 | struct globals *g = this_cpu_globals(); | ||
264 | __u32 nhoff, label; | 258 | __u32 nhoff, label; |
265 | 259 | ||
266 | if (!g) | 260 | nhoff = skb->cb[0]; |
267 | return 0; | ||
268 | |||
269 | nhoff = g->nhoff; | ||
270 | 261 | ||
271 | label = load_word(skb, nhoff); | 262 | label = load_word(skb, nhoff); |
272 | nhoff += sizeof(struct mpls_label); | 263 | nhoff += sizeof(struct mpls_label); |
273 | g->nhoff = nhoff; | 264 | skb->cb[0] = nhoff; |
274 | 265 | ||
275 | if (label & MPLS_LS_S_MASK) { | 266 | if (label & MPLS_LS_S_MASK) { |
276 | __u8 verlen = load_byte(skb, nhoff); | 267 | __u8 verlen = load_byte(skb, nhoff); |
@@ -288,14 +279,10 @@ PROG(PARSE_MPLS)(struct __sk_buff *skb) | |||
288 | SEC("socket/0") | 279 | SEC("socket/0") |
289 | int main_prog(struct __sk_buff *skb) | 280 | int main_prog(struct __sk_buff *skb) |
290 | { | 281 | { |
291 | struct globals *g = this_cpu_globals(); | ||
292 | __u32 nhoff = ETH_HLEN; | 282 | __u32 nhoff = ETH_HLEN; |
293 | __u32 proto = load_half(skb, 12); | 283 | __u32 proto = load_half(skb, 12); |
294 | 284 | ||
295 | if (!g) | 285 | skb->cb[0] = nhoff; |
296 | return 0; | ||
297 | |||
298 | g->nhoff = nhoff; | ||
299 | parse_eth_proto(skb, proto); | 286 | parse_eth_proto(skb, proto); |
300 | return 0; | 287 | return 0; |
301 | } | 288 | } |
diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c index 12f3780af73f..693605997abc 100644 --- a/samples/bpf/test_verifier.c +++ b/samples/bpf/test_verifier.c | |||
@@ -29,6 +29,7 @@ struct bpf_test { | |||
29 | ACCEPT, | 29 | ACCEPT, |
30 | REJECT | 30 | REJECT |
31 | } result; | 31 | } result; |
32 | enum bpf_prog_type prog_type; | ||
32 | }; | 33 | }; |
33 | 34 | ||
34 | static struct bpf_test tests[] = { | 35 | static struct bpf_test tests[] = { |
@@ -743,6 +744,84 @@ static struct bpf_test tests[] = { | |||
743 | .errstr = "different pointers", | 744 | .errstr = "different pointers", |
744 | .result = REJECT, | 745 | .result = REJECT, |
745 | }, | 746 | }, |
747 | { | ||
748 | "check skb->mark is not writeable by sockets", | ||
749 | .insns = { | ||
750 | BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, | ||
751 | offsetof(struct __sk_buff, mark)), | ||
752 | BPF_EXIT_INSN(), | ||
753 | }, | ||
754 | .errstr = "invalid bpf_context access", | ||
755 | .result = REJECT, | ||
756 | }, | ||
757 | { | ||
758 | "check skb->tc_index is not writeable by sockets", | ||
759 | .insns = { | ||
760 | BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, | ||
761 | offsetof(struct __sk_buff, tc_index)), | ||
762 | BPF_EXIT_INSN(), | ||
763 | }, | ||
764 | .errstr = "invalid bpf_context access", | ||
765 | .result = REJECT, | ||
766 | }, | ||
767 | { | ||
768 | "check non-u32 access to cb", | ||
769 | .insns = { | ||
770 | BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_1, | ||
771 | offsetof(struct __sk_buff, cb[0])), | ||
772 | BPF_EXIT_INSN(), | ||
773 | }, | ||
774 | .errstr = "invalid bpf_context access", | ||
775 | .result = REJECT, | ||
776 | }, | ||
777 | { | ||
778 | "check out of range skb->cb access", | ||
779 | .insns = { | ||
780 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, | ||
781 | offsetof(struct __sk_buff, cb[60])), | ||
782 | BPF_EXIT_INSN(), | ||
783 | }, | ||
784 | .errstr = "invalid bpf_context access", | ||
785 | .result = REJECT, | ||
786 | .prog_type = BPF_PROG_TYPE_SCHED_ACT, | ||
787 | }, | ||
788 | { | ||
789 | "write skb fields from socket prog", | ||
790 | .insns = { | ||
791 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, | ||
792 | offsetof(struct __sk_buff, cb[4])), | ||
793 | BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), | ||
794 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, | ||
795 | offsetof(struct __sk_buff, mark)), | ||
796 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, | ||
797 | offsetof(struct __sk_buff, tc_index)), | ||
798 | BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1), | ||
799 | BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, | ||
800 | offsetof(struct __sk_buff, cb[0])), | ||
801 | BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, | ||
802 | offsetof(struct __sk_buff, cb[2])), | ||
803 | BPF_EXIT_INSN(), | ||
804 | }, | ||
805 | .result = ACCEPT, | ||
806 | }, | ||
807 | { | ||
808 | "write skb fields from tc_cls_act prog", | ||
809 | .insns = { | ||
810 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, | ||
811 | offsetof(struct __sk_buff, cb[0])), | ||
812 | BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, | ||
813 | offsetof(struct __sk_buff, mark)), | ||
814 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, | ||
815 | offsetof(struct __sk_buff, tc_index)), | ||
816 | BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, | ||
817 | offsetof(struct __sk_buff, tc_index)), | ||
818 | BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, | ||
819 | offsetof(struct __sk_buff, cb[3])), | ||
820 | BPF_EXIT_INSN(), | ||
821 | }, | ||
822 | .result = ACCEPT, | ||
823 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
824 | }, | ||
746 | }; | 825 | }; |
747 | 826 | ||
748 | static int probe_filter_length(struct bpf_insn *fp) | 827 | static int probe_filter_length(struct bpf_insn *fp) |
@@ -775,6 +854,7 @@ static int test(void) | |||
775 | 854 | ||
776 | for (i = 0; i < ARRAY_SIZE(tests); i++) { | 855 | for (i = 0; i < ARRAY_SIZE(tests); i++) { |
777 | struct bpf_insn *prog = tests[i].insns; | 856 | struct bpf_insn *prog = tests[i].insns; |
857 | int prog_type = tests[i].prog_type; | ||
778 | int prog_len = probe_filter_length(prog); | 858 | int prog_len = probe_filter_length(prog); |
779 | int *fixup = tests[i].fixup; | 859 | int *fixup = tests[i].fixup; |
780 | int map_fd = -1; | 860 | int map_fd = -1; |
@@ -789,8 +869,8 @@ static int test(void) | |||
789 | } | 869 | } |
790 | printf("#%d %s ", i, tests[i].descr); | 870 | printf("#%d %s ", i, tests[i].descr); |
791 | 871 | ||
792 | prog_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, prog, | 872 | prog_fd = bpf_prog_load(prog_type ?: BPF_PROG_TYPE_SOCKET_FILTER, |
793 | prog_len * sizeof(struct bpf_insn), | 873 | prog, prog_len * sizeof(struct bpf_insn), |
794 | "GPL", 0); | 874 | "GPL", 0); |
795 | 875 | ||
796 | if (tests[i].result == ACCEPT) { | 876 | if (tests[i].result == ACCEPT) { |