diff options
Diffstat (limited to 'tools/testing/selftests/bpf/test_progs.c')
-rw-r--r-- | tools/testing/selftests/bpf/test_progs.c | 355 |
1 files changed, 352 insertions, 3 deletions
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 6761be18a91f..b549308abd19 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c | |||
@@ -21,8 +21,10 @@ typedef __u16 __sum16; | |||
21 | #include <linux/ipv6.h> | 21 | #include <linux/ipv6.h> |
22 | #include <linux/tcp.h> | 22 | #include <linux/tcp.h> |
23 | #include <linux/filter.h> | 23 | #include <linux/filter.h> |
24 | #include <linux/perf_event.h> | ||
24 | #include <linux/unistd.h> | 25 | #include <linux/unistd.h> |
25 | 26 | ||
27 | #include <sys/ioctl.h> | ||
26 | #include <sys/wait.h> | 28 | #include <sys/wait.h> |
27 | #include <sys/resource.h> | 29 | #include <sys/resource.h> |
28 | #include <sys/types.h> | 30 | #include <sys/types.h> |
@@ -167,10 +169,9 @@ out: | |||
167 | #define NUM_ITER 100000 | 169 | #define NUM_ITER 100000 |
168 | #define VIP_NUM 5 | 170 | #define VIP_NUM 5 |
169 | 171 | ||
170 | static void test_l4lb(void) | 172 | static void test_l4lb(const char *file) |
171 | { | 173 | { |
172 | unsigned int nr_cpus = bpf_num_possible_cpus(); | 174 | unsigned int nr_cpus = bpf_num_possible_cpus(); |
173 | const char *file = "./test_l4lb.o"; | ||
174 | struct vip key = {.protocol = 6}; | 175 | struct vip key = {.protocol = 6}; |
175 | struct vip_meta { | 176 | struct vip_meta { |
176 | __u32 flags; | 177 | __u32 flags; |
@@ -247,6 +248,95 @@ out: | |||
247 | bpf_object__close(obj); | 248 | bpf_object__close(obj); |
248 | } | 249 | } |
249 | 250 | ||
251 | static void test_l4lb_all(void) | ||
252 | { | ||
253 | const char *file1 = "./test_l4lb.o"; | ||
254 | const char *file2 = "./test_l4lb_noinline.o"; | ||
255 | |||
256 | test_l4lb(file1); | ||
257 | test_l4lb(file2); | ||
258 | } | ||
259 | |||
260 | static void test_xdp_noinline(void) | ||
261 | { | ||
262 | const char *file = "./test_xdp_noinline.o"; | ||
263 | unsigned int nr_cpus = bpf_num_possible_cpus(); | ||
264 | struct vip key = {.protocol = 6}; | ||
265 | struct vip_meta { | ||
266 | __u32 flags; | ||
267 | __u32 vip_num; | ||
268 | } value = {.vip_num = VIP_NUM}; | ||
269 | __u32 stats_key = VIP_NUM; | ||
270 | struct vip_stats { | ||
271 | __u64 bytes; | ||
272 | __u64 pkts; | ||
273 | } stats[nr_cpus]; | ||
274 | struct real_definition { | ||
275 | union { | ||
276 | __be32 dst; | ||
277 | __be32 dstv6[4]; | ||
278 | }; | ||
279 | __u8 flags; | ||
280 | } real_def = {.dst = MAGIC_VAL}; | ||
281 | __u32 ch_key = 11, real_num = 3; | ||
282 | __u32 duration, retval, size; | ||
283 | int err, i, prog_fd, map_fd; | ||
284 | __u64 bytes = 0, pkts = 0; | ||
285 | struct bpf_object *obj; | ||
286 | char buf[128]; | ||
287 | u32 *magic = (u32 *)buf; | ||
288 | |||
289 | err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); | ||
290 | if (err) { | ||
291 | error_cnt++; | ||
292 | return; | ||
293 | } | ||
294 | |||
295 | map_fd = bpf_find_map(__func__, obj, "vip_map"); | ||
296 | if (map_fd < 0) | ||
297 | goto out; | ||
298 | bpf_map_update_elem(map_fd, &key, &value, 0); | ||
299 | |||
300 | map_fd = bpf_find_map(__func__, obj, "ch_rings"); | ||
301 | if (map_fd < 0) | ||
302 | goto out; | ||
303 | bpf_map_update_elem(map_fd, &ch_key, &real_num, 0); | ||
304 | |||
305 | map_fd = bpf_find_map(__func__, obj, "reals"); | ||
306 | if (map_fd < 0) | ||
307 | goto out; | ||
308 | bpf_map_update_elem(map_fd, &real_num, &real_def, 0); | ||
309 | |||
310 | err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4), | ||
311 | buf, &size, &retval, &duration); | ||
312 | CHECK(err || errno || retval != 1 || size != 54 || | ||
313 | *magic != MAGIC_VAL, "ipv4", | ||
314 | "err %d errno %d retval %d size %d magic %x\n", | ||
315 | err, errno, retval, size, *magic); | ||
316 | |||
317 | err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6), | ||
318 | buf, &size, &retval, &duration); | ||
319 | CHECK(err || errno || retval != 1 || size != 74 || | ||
320 | *magic != MAGIC_VAL, "ipv6", | ||
321 | "err %d errno %d retval %d size %d magic %x\n", | ||
322 | err, errno, retval, size, *magic); | ||
323 | |||
324 | map_fd = bpf_find_map(__func__, obj, "stats"); | ||
325 | if (map_fd < 0) | ||
326 | goto out; | ||
327 | bpf_map_lookup_elem(map_fd, &stats_key, stats); | ||
328 | for (i = 0; i < nr_cpus; i++) { | ||
329 | bytes += stats[i].bytes; | ||
330 | pkts += stats[i].pkts; | ||
331 | } | ||
332 | if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) { | ||
333 | error_cnt++; | ||
334 | printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts); | ||
335 | } | ||
336 | out: | ||
337 | bpf_object__close(obj); | ||
338 | } | ||
339 | |||
250 | static void test_tcp_estats(void) | 340 | static void test_tcp_estats(void) |
251 | { | 341 | { |
252 | const char *file = "./test_tcp_estats.o"; | 342 | const char *file = "./test_tcp_estats.o"; |
@@ -617,6 +707,262 @@ static void test_obj_name(void) | |||
617 | } | 707 | } |
618 | } | 708 | } |
619 | 709 | ||
710 | static void test_tp_attach_query(void) | ||
711 | { | ||
712 | const int num_progs = 3; | ||
713 | int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs]; | ||
714 | __u32 duration = 0, info_len, saved_prog_ids[num_progs]; | ||
715 | const char *file = "./test_tracepoint.o"; | ||
716 | struct perf_event_query_bpf *query; | ||
717 | struct perf_event_attr attr = {}; | ||
718 | struct bpf_object *obj[num_progs]; | ||
719 | struct bpf_prog_info prog_info; | ||
720 | char buf[256]; | ||
721 | |||
722 | snprintf(buf, sizeof(buf), | ||
723 | "/sys/kernel/debug/tracing/events/sched/sched_switch/id"); | ||
724 | efd = open(buf, O_RDONLY, 0); | ||
725 | if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) | ||
726 | return; | ||
727 | bytes = read(efd, buf, sizeof(buf)); | ||
728 | close(efd); | ||
729 | if (CHECK(bytes <= 0 || bytes >= sizeof(buf), | ||
730 | "read", "bytes %d errno %d\n", bytes, errno)) | ||
731 | return; | ||
732 | |||
733 | attr.config = strtol(buf, NULL, 0); | ||
734 | attr.type = PERF_TYPE_TRACEPOINT; | ||
735 | attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN; | ||
736 | attr.sample_period = 1; | ||
737 | attr.wakeup_events = 1; | ||
738 | |||
739 | query = malloc(sizeof(*query) + sizeof(__u32) * num_progs); | ||
740 | for (i = 0; i < num_progs; i++) { | ||
741 | err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i], | ||
742 | &prog_fd[i]); | ||
743 | if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) | ||
744 | goto cleanup1; | ||
745 | |||
746 | bzero(&prog_info, sizeof(prog_info)); | ||
747 | prog_info.jited_prog_len = 0; | ||
748 | prog_info.xlated_prog_len = 0; | ||
749 | prog_info.nr_map_ids = 0; | ||
750 | info_len = sizeof(prog_info); | ||
751 | err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len); | ||
752 | if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n", | ||
753 | err, errno)) | ||
754 | goto cleanup1; | ||
755 | saved_prog_ids[i] = prog_info.id; | ||
756 | |||
757 | pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */, | ||
758 | 0 /* cpu 0 */, -1 /* group id */, | ||
759 | 0 /* flags */); | ||
760 | if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n", | ||
761 | pmu_fd[i], errno)) | ||
762 | goto cleanup2; | ||
763 | err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0); | ||
764 | if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", | ||
765 | err, errno)) | ||
766 | goto cleanup3; | ||
767 | |||
768 | if (i == 0) { | ||
769 | /* check NULL prog array query */ | ||
770 | query->ids_len = num_progs; | ||
771 | err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query); | ||
772 | if (CHECK(err || query->prog_cnt != 0, | ||
773 | "perf_event_ioc_query_bpf", | ||
774 | "err %d errno %d query->prog_cnt %u\n", | ||
775 | err, errno, query->prog_cnt)) | ||
776 | goto cleanup3; | ||
777 | } | ||
778 | |||
779 | err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]); | ||
780 | if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", | ||
781 | err, errno)) | ||
782 | goto cleanup3; | ||
783 | |||
784 | if (i == 1) { | ||
785 | /* try to get # of programs only */ | ||
786 | query->ids_len = 0; | ||
787 | err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query); | ||
788 | if (CHECK(err || query->prog_cnt != 2, | ||
789 | "perf_event_ioc_query_bpf", | ||
790 | "err %d errno %d query->prog_cnt %u\n", | ||
791 | err, errno, query->prog_cnt)) | ||
792 | goto cleanup3; | ||
793 | |||
794 | /* try a few negative tests */ | ||
795 | /* invalid query pointer */ | ||
796 | err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, | ||
797 | (struct perf_event_query_bpf *)0x1); | ||
798 | if (CHECK(!err || errno != EFAULT, | ||
799 | "perf_event_ioc_query_bpf", | ||
800 | "err %d errno %d\n", err, errno)) | ||
801 | goto cleanup3; | ||
802 | |||
803 | /* no enough space */ | ||
804 | query->ids_len = 1; | ||
805 | err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query); | ||
806 | if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2, | ||
807 | "perf_event_ioc_query_bpf", | ||
808 | "err %d errno %d query->prog_cnt %u\n", | ||
809 | err, errno, query->prog_cnt)) | ||
810 | goto cleanup3; | ||
811 | } | ||
812 | |||
813 | query->ids_len = num_progs; | ||
814 | err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query); | ||
815 | if (CHECK(err || query->prog_cnt != (i + 1), | ||
816 | "perf_event_ioc_query_bpf", | ||
817 | "err %d errno %d query->prog_cnt %u\n", | ||
818 | err, errno, query->prog_cnt)) | ||
819 | goto cleanup3; | ||
820 | for (j = 0; j < i + 1; j++) | ||
821 | if (CHECK(saved_prog_ids[j] != query->ids[j], | ||
822 | "perf_event_ioc_query_bpf", | ||
823 | "#%d saved_prog_id %x query prog_id %x\n", | ||
824 | j, saved_prog_ids[j], query->ids[j])) | ||
825 | goto cleanup3; | ||
826 | } | ||
827 | |||
828 | i = num_progs - 1; | ||
829 | for (; i >= 0; i--) { | ||
830 | cleanup3: | ||
831 | ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE); | ||
832 | cleanup2: | ||
833 | close(pmu_fd[i]); | ||
834 | cleanup1: | ||
835 | bpf_object__close(obj[i]); | ||
836 | } | ||
837 | free(query); | ||
838 | } | ||
839 | |||
840 | static int compare_map_keys(int map1_fd, int map2_fd) | ||
841 | { | ||
842 | __u32 key, next_key; | ||
843 | char val_buf[PERF_MAX_STACK_DEPTH * sizeof(__u64)]; | ||
844 | int err; | ||
845 | |||
846 | err = bpf_map_get_next_key(map1_fd, NULL, &key); | ||
847 | if (err) | ||
848 | return err; | ||
849 | err = bpf_map_lookup_elem(map2_fd, &key, val_buf); | ||
850 | if (err) | ||
851 | return err; | ||
852 | |||
853 | while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) { | ||
854 | err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf); | ||
855 | if (err) | ||
856 | return err; | ||
857 | |||
858 | key = next_key; | ||
859 | } | ||
860 | if (errno != ENOENT) | ||
861 | return -1; | ||
862 | |||
863 | return 0; | ||
864 | } | ||
865 | |||
866 | static void test_stacktrace_map() | ||
867 | { | ||
868 | int control_map_fd, stackid_hmap_fd, stackmap_fd; | ||
869 | const char *file = "./test_stacktrace_map.o"; | ||
870 | int bytes, efd, err, pmu_fd, prog_fd; | ||
871 | struct perf_event_attr attr = {}; | ||
872 | __u32 key, val, duration = 0; | ||
873 | struct bpf_object *obj; | ||
874 | char buf[256]; | ||
875 | |||
876 | err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); | ||
877 | if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) | ||
878 | goto out; | ||
879 | |||
880 | /* Get the ID for the sched/sched_switch tracepoint */ | ||
881 | snprintf(buf, sizeof(buf), | ||
882 | "/sys/kernel/debug/tracing/events/sched/sched_switch/id"); | ||
883 | efd = open(buf, O_RDONLY, 0); | ||
884 | if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) | ||
885 | goto close_prog; | ||
886 | |||
887 | bytes = read(efd, buf, sizeof(buf)); | ||
888 | close(efd); | ||
889 | if (CHECK(bytes <= 0 || bytes >= sizeof(buf), | ||
890 | "read", "bytes %d errno %d\n", bytes, errno)) | ||
891 | goto close_prog; | ||
892 | |||
893 | /* Open the perf event and attach bpf progrram */ | ||
894 | attr.config = strtol(buf, NULL, 0); | ||
895 | attr.type = PERF_TYPE_TRACEPOINT; | ||
896 | attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN; | ||
897 | attr.sample_period = 1; | ||
898 | attr.wakeup_events = 1; | ||
899 | pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, | ||
900 | 0 /* cpu 0 */, -1 /* group id */, | ||
901 | 0 /* flags */); | ||
902 | if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", | ||
903 | pmu_fd, errno)) | ||
904 | goto close_prog; | ||
905 | |||
906 | err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); | ||
907 | if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", | ||
908 | err, errno)) | ||
909 | goto close_pmu; | ||
910 | |||
911 | err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); | ||
912 | if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", | ||
913 | err, errno)) | ||
914 | goto disable_pmu; | ||
915 | |||
916 | /* find map fds */ | ||
917 | control_map_fd = bpf_find_map(__func__, obj, "control_map"); | ||
918 | if (CHECK(control_map_fd < 0, "bpf_find_map control_map", | ||
919 | "err %d errno %d\n", err, errno)) | ||
920 | goto disable_pmu; | ||
921 | |||
922 | stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); | ||
923 | if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap", | ||
924 | "err %d errno %d\n", err, errno)) | ||
925 | goto disable_pmu; | ||
926 | |||
927 | stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); | ||
928 | if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n", | ||
929 | err, errno)) | ||
930 | goto disable_pmu; | ||
931 | |||
932 | /* give some time for bpf program run */ | ||
933 | sleep(1); | ||
934 | |||
935 | /* disable stack trace collection */ | ||
936 | key = 0; | ||
937 | val = 1; | ||
938 | bpf_map_update_elem(control_map_fd, &key, &val, 0); | ||
939 | |||
940 | /* for every element in stackid_hmap, we can find a corresponding one | ||
941 | * in stackmap, and vise versa. | ||
942 | */ | ||
943 | err = compare_map_keys(stackid_hmap_fd, stackmap_fd); | ||
944 | if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", | ||
945 | "err %d errno %d\n", err, errno)) | ||
946 | goto disable_pmu; | ||
947 | |||
948 | err = compare_map_keys(stackmap_fd, stackid_hmap_fd); | ||
949 | if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", | ||
950 | "err %d errno %d\n", err, errno)) | ||
951 | ; /* fall through */ | ||
952 | |||
953 | disable_pmu: | ||
954 | ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); | ||
955 | |||
956 | close_pmu: | ||
957 | close(pmu_fd); | ||
958 | |||
959 | close_prog: | ||
960 | bpf_object__close(obj); | ||
961 | |||
962 | out: | ||
963 | return; | ||
964 | } | ||
965 | |||
620 | int main(void) | 966 | int main(void) |
621 | { | 967 | { |
622 | struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; | 968 | struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; |
@@ -625,11 +971,14 @@ int main(void) | |||
625 | 971 | ||
626 | test_pkt_access(); | 972 | test_pkt_access(); |
627 | test_xdp(); | 973 | test_xdp(); |
628 | test_l4lb(); | 974 | test_l4lb_all(); |
975 | test_xdp_noinline(); | ||
629 | test_tcp_estats(); | 976 | test_tcp_estats(); |
630 | test_bpf_obj_id(); | 977 | test_bpf_obj_id(); |
631 | test_pkt_md_access(); | 978 | test_pkt_md_access(); |
632 | test_obj_name(); | 979 | test_obj_name(); |
980 | test_tp_attach_query(); | ||
981 | test_stacktrace_map(); | ||
633 | 982 | ||
634 | printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt); | 983 | printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt); |
635 | return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS; | 984 | return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS; |