aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStanislav Fomichev <sdf@google.com>2019-04-22 11:55:50 -0400
committerDaniel Borkmann <daniel@iogearbox.net>2019-04-23 12:36:34 -0400
commit0905beec9f52caf2c7065a8a88c08bc370850710 (patch)
tree8ef542273e104b77a67fc918cfa57d618afccb8d
parentc9cb2c1e11cee75b3af5699add3302a3997f78e4 (diff)
selftests/bpf: run flow dissector tests in skb-less mode
Export last_dissection map from flow dissector and use a known place in tun driver to trigger BPF flow dissection. Signed-off-by: Stanislav Fomichev <sdf@google.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
-rw-r--r--tools/testing/selftests/bpf/flow_dissector_load.c2
-rw-r--r--tools/testing/selftests/bpf/flow_dissector_load.h16
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector.c102
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_flow.c79
4 files changed, 165 insertions, 34 deletions
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.c b/tools/testing/selftests/bpf/flow_dissector_load.c
index 7136ab9ffa73..3fd83b9dc1bf 100644
--- a/tools/testing/selftests/bpf/flow_dissector_load.c
+++ b/tools/testing/selftests/bpf/flow_dissector_load.c
@@ -26,7 +26,7 @@ static void load_and_attach_program(void)
26 struct bpf_object *obj; 26 struct bpf_object *obj;
27 27
28 ret = bpf_flow_load(&obj, cfg_path_name, cfg_section_name, 28 ret = bpf_flow_load(&obj, cfg_path_name, cfg_section_name,
29 cfg_map_name, &prog_fd); 29 cfg_map_name, NULL, &prog_fd, NULL);
30 if (ret) 30 if (ret)
31 error(1, 0, "bpf_flow_load %s", cfg_path_name); 31 error(1, 0, "bpf_flow_load %s", cfg_path_name);
32 32
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.h b/tools/testing/selftests/bpf/flow_dissector_load.h
index 41dd6959feb0..eeb48b6fc827 100644
--- a/tools/testing/selftests/bpf/flow_dissector_load.h
+++ b/tools/testing/selftests/bpf/flow_dissector_load.h
@@ -9,10 +9,12 @@ static inline int bpf_flow_load(struct bpf_object **obj,
9 const char *path, 9 const char *path,
10 const char *section_name, 10 const char *section_name,
11 const char *map_name, 11 const char *map_name,
12 int *prog_fd) 12 const char *keys_map_name,
13 int *prog_fd,
14 int *keys_fd)
13{ 15{
14 struct bpf_program *prog, *main_prog; 16 struct bpf_program *prog, *main_prog;
15 struct bpf_map *prog_array; 17 struct bpf_map *prog_array, *keys;
16 int prog_array_fd; 18 int prog_array_fd;
17 int ret, fd, i; 19 int ret, fd, i;
18 20
@@ -37,6 +39,16 @@ static inline int bpf_flow_load(struct bpf_object **obj,
37 if (prog_array_fd < 0) 39 if (prog_array_fd < 0)
38 return ret; 40 return ret;
39 41
42 if (keys_map_name && keys_fd) {
43 keys = bpf_object__find_map_by_name(*obj, keys_map_name);
44 if (!keys)
45 return -1;
46
47 *keys_fd = bpf_map__fd(keys);
48 if (*keys_fd < 0)
49 return -1;
50 }
51
40 i = 0; 52 i = 0;
41 bpf_object__for_each_program(prog, *obj) { 53 bpf_object__for_each_program(prog, *obj) {
42 fd = bpf_program__fd(prog); 54 fd = bpf_program__fd(prog);
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
index 126319f9a97c..51758a0ca55e 100644
--- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
@@ -1,5 +1,8 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2#include <test_progs.h> 2#include <test_progs.h>
3#include <error.h>
4#include <linux/if.h>
5#include <linux/if_tun.h>
3 6
4#define CHECK_FLOW_KEYS(desc, got, expected) \ 7#define CHECK_FLOW_KEYS(desc, got, expected) \
5 CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0, \ 8 CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0, \
@@ -140,13 +143,73 @@ struct test tests[] = {
140 }, 143 },
141}; 144};
142 145
146static int create_tap(const char *ifname)
147{
148 struct ifreq ifr = {
149 .ifr_flags = IFF_TAP | IFF_NO_PI | IFF_NAPI | IFF_NAPI_FRAGS,
150 };
151 int fd, ret;
152
153 strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
154
155 fd = open("/dev/net/tun", O_RDWR);
156 if (fd < 0)
157 return -1;
158
159 ret = ioctl(fd, TUNSETIFF, &ifr);
160 if (ret)
161 return -1;
162
163 return fd;
164}
165
166static int tx_tap(int fd, void *pkt, size_t len)
167{
168 struct iovec iov[] = {
169 {
170 .iov_len = len,
171 .iov_base = pkt,
172 },
173 };
174 return writev(fd, iov, ARRAY_SIZE(iov));
175}
176
177static int ifup(const char *ifname)
178{
179 struct ifreq ifr = {};
180 int sk, ret;
181
182 strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
183
184 sk = socket(PF_INET, SOCK_DGRAM, 0);
185 if (sk < 0)
186 return -1;
187
188 ret = ioctl(sk, SIOCGIFFLAGS, &ifr);
189 if (ret) {
190 close(sk);
191 return -1;
192 }
193
194 ifr.ifr_flags |= IFF_UP;
195 ret = ioctl(sk, SIOCSIFFLAGS, &ifr);
196 if (ret) {
197 close(sk);
198 return -1;
199 }
200
201 close(sk);
202 return 0;
203}
204
143void test_flow_dissector(void) 205void test_flow_dissector(void)
144{ 206{
207 int i, err, prog_fd, keys_fd = -1, tap_fd;
145 struct bpf_object *obj; 208 struct bpf_object *obj;
146 int i, err, prog_fd; 209 __u32 duration = 0;
147 210
148 err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector", 211 err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector",
149 "jmp_table", &prog_fd); 212 "jmp_table", "last_dissection", &prog_fd, &keys_fd);
150 if (err) { 213 if (err) {
151 error_cnt++; 214 error_cnt++;
152 return; 215 return;
@@ -171,5 +234,40 @@ void test_flow_dissector(void)
171 CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); 234 CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
172 } 235 }
173 236
237 /* Do the same tests but for skb-less flow dissector.
238 * We use a known path in the net/tun driver that calls
239 * eth_get_headlen and we manually export bpf_flow_keys
240 * via BPF map in this case.
241 *
242 * Note, that since eth_get_headlen operates on a L2 level,
243 * we adjust exported nhoff/thoff by ETH_HLEN.
244 */
245
246 err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
247 CHECK(err, "bpf_prog_attach", "err %d errno %d", err, errno);
248
249 tap_fd = create_tap("tap0");
250 CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d", tap_fd, errno);
251 err = ifup("tap0");
252 CHECK(err, "ifup", "err %d errno %d", err, errno);
253
254 for (i = 0; i < ARRAY_SIZE(tests); i++) {
255 struct bpf_flow_keys flow_keys = {};
256 struct bpf_prog_test_run_attr tattr = {};
257 __u32 key = 0;
258
259 err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt));
260 CHECK(err < 0, "tx_tap", "err %d errno %d", err, errno);
261
262 err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys);
263 CHECK_ATTR(err, tests[i].name, "bpf_map_lookup_elem %d\n", err);
264
265 flow_keys.nhoff -= ETH_HLEN;
266 flow_keys.thoff -= ETH_HLEN;
267
268 CHECK_ATTR(err, tests[i].name, "skb-less err %d\n", err);
269 CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys);
270 }
271
174 bpf_object__close(obj); 272 bpf_object__close(obj);
175} 273}
diff --git a/tools/testing/selftests/bpf/progs/bpf_flow.c b/tools/testing/selftests/bpf/progs/bpf_flow.c
index 75b17cada539..81ad9a0b29d0 100644
--- a/tools/testing/selftests/bpf/progs/bpf_flow.c
+++ b/tools/testing/selftests/bpf/progs/bpf_flow.c
@@ -64,6 +64,25 @@ struct bpf_map_def SEC("maps") jmp_table = {
64 .max_entries = 8 64 .max_entries = 8
65}; 65};
66 66
67struct bpf_map_def SEC("maps") last_dissection = {
68 .type = BPF_MAP_TYPE_ARRAY,
69 .key_size = sizeof(__u32),
70 .value_size = sizeof(struct bpf_flow_keys),
71 .max_entries = 1,
72};
73
74static __always_inline int export_flow_keys(struct bpf_flow_keys *keys,
75 int ret)
76{
77 struct bpf_flow_keys *val;
78 __u32 key = 0;
79
80 val = bpf_map_lookup_elem(&last_dissection, &key);
81 if (val)
82 memcpy(val, keys, sizeof(*val));
83 return ret;
84}
85
67static __always_inline void *bpf_flow_dissect_get_header(struct __sk_buff *skb, 86static __always_inline void *bpf_flow_dissect_get_header(struct __sk_buff *skb,
68 __u16 hdr_size, 87 __u16 hdr_size,
69 void *buffer) 88 void *buffer)
@@ -109,10 +128,10 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)
109 break; 128 break;
110 default: 129 default:
111 /* Protocol not supported */ 130 /* Protocol not supported */
112 return BPF_DROP; 131 return export_flow_keys(keys, BPF_DROP);
113 } 132 }
114 133
115 return BPF_DROP; 134 return export_flow_keys(keys, BPF_DROP);
116} 135}
117 136
118SEC("flow_dissector") 137SEC("flow_dissector")
@@ -139,8 +158,8 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
139 case IPPROTO_ICMP: 158 case IPPROTO_ICMP:
140 icmp = bpf_flow_dissect_get_header(skb, sizeof(*icmp), &_icmp); 159 icmp = bpf_flow_dissect_get_header(skb, sizeof(*icmp), &_icmp);
141 if (!icmp) 160 if (!icmp)
142 return BPF_DROP; 161 return export_flow_keys(keys, BPF_DROP);
143 return BPF_OK; 162 return export_flow_keys(keys, BPF_OK);
144 case IPPROTO_IPIP: 163 case IPPROTO_IPIP:
145 keys->is_encap = true; 164 keys->is_encap = true;
146 return parse_eth_proto(skb, bpf_htons(ETH_P_IP)); 165 return parse_eth_proto(skb, bpf_htons(ETH_P_IP));
@@ -150,11 +169,11 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
150 case IPPROTO_GRE: 169 case IPPROTO_GRE:
151 gre = bpf_flow_dissect_get_header(skb, sizeof(*gre), &_gre); 170 gre = bpf_flow_dissect_get_header(skb, sizeof(*gre), &_gre);
152 if (!gre) 171 if (!gre)
153 return BPF_DROP; 172 return export_flow_keys(keys, BPF_DROP);
154 173
155 if (bpf_htons(gre->flags & GRE_VERSION)) 174 if (bpf_htons(gre->flags & GRE_VERSION))
156 /* Only inspect standard GRE packets with version 0 */ 175 /* Only inspect standard GRE packets with version 0 */
157 return BPF_OK; 176 return export_flow_keys(keys, BPF_OK);
158 177
159 keys->thoff += sizeof(*gre); /* Step over GRE Flags and Proto */ 178 keys->thoff += sizeof(*gre); /* Step over GRE Flags and Proto */
160 if (GRE_IS_CSUM(gre->flags)) 179 if (GRE_IS_CSUM(gre->flags))
@@ -170,7 +189,7 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
170 eth = bpf_flow_dissect_get_header(skb, sizeof(*eth), 189 eth = bpf_flow_dissect_get_header(skb, sizeof(*eth),
171 &_eth); 190 &_eth);
172 if (!eth) 191 if (!eth)
173 return BPF_DROP; 192 return export_flow_keys(keys, BPF_DROP);
174 193
175 keys->thoff += sizeof(*eth); 194 keys->thoff += sizeof(*eth);
176 195
@@ -181,31 +200,31 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
181 case IPPROTO_TCP: 200 case IPPROTO_TCP:
182 tcp = bpf_flow_dissect_get_header(skb, sizeof(*tcp), &_tcp); 201 tcp = bpf_flow_dissect_get_header(skb, sizeof(*tcp), &_tcp);
183 if (!tcp) 202 if (!tcp)
184 return BPF_DROP; 203 return export_flow_keys(keys, BPF_DROP);
185 204
186 if (tcp->doff < 5) 205 if (tcp->doff < 5)
187 return BPF_DROP; 206 return export_flow_keys(keys, BPF_DROP);
188 207
189 if ((__u8 *)tcp + (tcp->doff << 2) > data_end) 208 if ((__u8 *)tcp + (tcp->doff << 2) > data_end)
190 return BPF_DROP; 209 return export_flow_keys(keys, BPF_DROP);
191 210
192 keys->sport = tcp->source; 211 keys->sport = tcp->source;
193 keys->dport = tcp->dest; 212 keys->dport = tcp->dest;
194 return BPF_OK; 213 return export_flow_keys(keys, BPF_OK);
195 case IPPROTO_UDP: 214 case IPPROTO_UDP:
196 case IPPROTO_UDPLITE: 215 case IPPROTO_UDPLITE:
197 udp = bpf_flow_dissect_get_header(skb, sizeof(*udp), &_udp); 216 udp = bpf_flow_dissect_get_header(skb, sizeof(*udp), &_udp);
198 if (!udp) 217 if (!udp)
199 return BPF_DROP; 218 return export_flow_keys(keys, BPF_DROP);
200 219
201 keys->sport = udp->source; 220 keys->sport = udp->source;
202 keys->dport = udp->dest; 221 keys->dport = udp->dest;
203 return BPF_OK; 222 return export_flow_keys(keys, BPF_OK);
204 default: 223 default:
205 return BPF_DROP; 224 return export_flow_keys(keys, BPF_DROP);
206 } 225 }
207 226
208 return BPF_DROP; 227 return export_flow_keys(keys, BPF_DROP);
209} 228}
210 229
211static __always_inline int parse_ipv6_proto(struct __sk_buff *skb, __u8 nexthdr) 230static __always_inline int parse_ipv6_proto(struct __sk_buff *skb, __u8 nexthdr)
@@ -225,7 +244,7 @@ static __always_inline int parse_ipv6_proto(struct __sk_buff *skb, __u8 nexthdr)
225 return parse_ip_proto(skb, nexthdr); 244 return parse_ip_proto(skb, nexthdr);
226 } 245 }
227 246
228 return BPF_DROP; 247 return export_flow_keys(keys, BPF_DROP);
229} 248}
230 249
231PROG(IP)(struct __sk_buff *skb) 250PROG(IP)(struct __sk_buff *skb)
@@ -238,11 +257,11 @@ PROG(IP)(struct __sk_buff *skb)
238 257
239 iph = bpf_flow_dissect_get_header(skb, sizeof(*iph), &_iph); 258 iph = bpf_flow_dissect_get_header(skb, sizeof(*iph), &_iph);
240 if (!iph) 259 if (!iph)
241 return BPF_DROP; 260 return export_flow_keys(keys, BPF_DROP);
242 261
243 /* IP header cannot be smaller than 20 bytes */ 262 /* IP header cannot be smaller than 20 bytes */
244 if (iph->ihl < 5) 263 if (iph->ihl < 5)
245 return BPF_DROP; 264 return export_flow_keys(keys, BPF_DROP);
246 265
247 keys->addr_proto = ETH_P_IP; 266 keys->addr_proto = ETH_P_IP;
248 keys->ipv4_src = iph->saddr; 267 keys->ipv4_src = iph->saddr;
@@ -250,7 +269,7 @@ PROG(IP)(struct __sk_buff *skb)
250 269
251 keys->thoff += iph->ihl << 2; 270 keys->thoff += iph->ihl << 2;
252 if (data + keys->thoff > data_end) 271 if (data + keys->thoff > data_end)
253 return BPF_DROP; 272 return export_flow_keys(keys, BPF_DROP);
254 273
255 if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET)) { 274 if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET)) {
256 keys->is_frag = true; 275 keys->is_frag = true;
@@ -264,7 +283,7 @@ PROG(IP)(struct __sk_buff *skb)
264 } 283 }
265 284
266 if (done) 285 if (done)
267 return BPF_OK; 286 return export_flow_keys(keys, BPF_OK);
268 287
269 return parse_ip_proto(skb, iph->protocol); 288 return parse_ip_proto(skb, iph->protocol);
270} 289}
@@ -276,7 +295,7 @@ PROG(IPV6)(struct __sk_buff *skb)
276 295
277 ip6h = bpf_flow_dissect_get_header(skb, sizeof(*ip6h), &_ip6h); 296 ip6h = bpf_flow_dissect_get_header(skb, sizeof(*ip6h), &_ip6h);
278 if (!ip6h) 297 if (!ip6h)
279 return BPF_DROP; 298 return export_flow_keys(keys, BPF_DROP);
280 299
281 keys->addr_proto = ETH_P_IPV6; 300 keys->addr_proto = ETH_P_IPV6;
282 memcpy(&keys->ipv6_src, &ip6h->saddr, 2*sizeof(ip6h->saddr)); 301 memcpy(&keys->ipv6_src, &ip6h->saddr, 2*sizeof(ip6h->saddr));
@@ -288,11 +307,12 @@ PROG(IPV6)(struct __sk_buff *skb)
288 307
289PROG(IPV6OP)(struct __sk_buff *skb) 308PROG(IPV6OP)(struct __sk_buff *skb)
290{ 309{
310 struct bpf_flow_keys *keys = skb->flow_keys;
291 struct ipv6_opt_hdr *ip6h, _ip6h; 311 struct ipv6_opt_hdr *ip6h, _ip6h;
292 312
293 ip6h = bpf_flow_dissect_get_header(skb, sizeof(*ip6h), &_ip6h); 313 ip6h = bpf_flow_dissect_get_header(skb, sizeof(*ip6h), &_ip6h);
294 if (!ip6h) 314 if (!ip6h)
295 return BPF_DROP; 315 return export_flow_keys(keys, BPF_DROP);
296 316
297 /* hlen is in 8-octets and does not include the first 8 bytes 317 /* hlen is in 8-octets and does not include the first 8 bytes
298 * of the header 318 * of the header
@@ -309,7 +329,7 @@ PROG(IPV6FR)(struct __sk_buff *skb)
309 329
310 fragh = bpf_flow_dissect_get_header(skb, sizeof(*fragh), &_fragh); 330 fragh = bpf_flow_dissect_get_header(skb, sizeof(*fragh), &_fragh);
311 if (!fragh) 331 if (!fragh)
312 return BPF_DROP; 332 return export_flow_keys(keys, BPF_DROP);
313 333
314 keys->thoff += sizeof(*fragh); 334 keys->thoff += sizeof(*fragh);
315 keys->is_frag = true; 335 keys->is_frag = true;
@@ -321,13 +341,14 @@ PROG(IPV6FR)(struct __sk_buff *skb)
321 341
322PROG(MPLS)(struct __sk_buff *skb) 342PROG(MPLS)(struct __sk_buff *skb)
323{ 343{
344 struct bpf_flow_keys *keys = skb->flow_keys;
324 struct mpls_label *mpls, _mpls; 345 struct mpls_label *mpls, _mpls;
325 346
326 mpls = bpf_flow_dissect_get_header(skb, sizeof(*mpls), &_mpls); 347 mpls = bpf_flow_dissect_get_header(skb, sizeof(*mpls), &_mpls);
327 if (!mpls) 348 if (!mpls)
328 return BPF_DROP; 349 return export_flow_keys(keys, BPF_DROP);
329 350
330 return BPF_OK; 351 return export_flow_keys(keys, BPF_OK);
331} 352}
332 353
333PROG(VLAN)(struct __sk_buff *skb) 354PROG(VLAN)(struct __sk_buff *skb)
@@ -339,10 +360,10 @@ PROG(VLAN)(struct __sk_buff *skb)
339 if (keys->n_proto == bpf_htons(ETH_P_8021AD)) { 360 if (keys->n_proto == bpf_htons(ETH_P_8021AD)) {
340 vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan); 361 vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan);
341 if (!vlan) 362 if (!vlan)
342 return BPF_DROP; 363 return export_flow_keys(keys, BPF_DROP);
343 364
344 if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q)) 365 if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q))
345 return BPF_DROP; 366 return export_flow_keys(keys, BPF_DROP);
346 367
347 keys->nhoff += sizeof(*vlan); 368 keys->nhoff += sizeof(*vlan);
348 keys->thoff += sizeof(*vlan); 369 keys->thoff += sizeof(*vlan);
@@ -350,14 +371,14 @@ PROG(VLAN)(struct __sk_buff *skb)
350 371
351 vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan); 372 vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan);
352 if (!vlan) 373 if (!vlan)
353 return BPF_DROP; 374 return export_flow_keys(keys, BPF_DROP);
354 375
355 keys->nhoff += sizeof(*vlan); 376 keys->nhoff += sizeof(*vlan);
356 keys->thoff += sizeof(*vlan); 377 keys->thoff += sizeof(*vlan);
357 /* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/ 378 /* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/
358 if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) || 379 if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) ||
359 vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q)) 380 vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q))
360 return BPF_DROP; 381 return export_flow_keys(keys, BPF_DROP);
361 382
362 keys->n_proto = vlan->h_vlan_encapsulated_proto; 383 keys->n_proto = vlan->h_vlan_encapsulated_proto;
363 return parse_eth_proto(skb, vlan->h_vlan_encapsulated_proto); 384 return parse_eth_proto(skb, vlan->h_vlan_encapsulated_proto);