aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStanislav Fomichev <sdf@google.com>2019-03-01 22:42:17 -0500
committerAlexei Starovoitov <ast@kernel.org>2019-03-02 14:10:40 -0500
commit20cb14ff9c49fcb189daf82246fb6fcd3923542c (patch)
treece0ecc585e176a6f4bae1ea851d69f694b248584
parent615741d81de6c16aa466c4eb37805caa868a9bb8 (diff)
selftests: bpf: break up test_progs - tracepoint
Move tracepoint prog tests into separate files. Signed-off-by: Stanislav Fomichev <sdf@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-rw-r--r--tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c139
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c78
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c82
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tp_attach_query.c132
-rw-r--r--tools/testing/selftests/bpf/test_progs.c427
5 files changed, 431 insertions, 427 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
new file mode 100644
index 000000000000..d7bb5beb1c57
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
@@ -0,0 +1,139 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <test_progs.h>
3
4#define MAX_CNT_RAWTP 10ull
5#define MAX_STACK_RAWTP 100
6struct get_stack_trace_t {
7 int pid;
8 int kern_stack_size;
9 int user_stack_size;
10 int user_stack_buildid_size;
11 __u64 kern_stack[MAX_STACK_RAWTP];
12 __u64 user_stack[MAX_STACK_RAWTP];
13 struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
14};
15
16static int get_stack_print_output(void *data, int size)
17{
18 bool good_kern_stack = false, good_user_stack = false;
19 const char *nonjit_func = "___bpf_prog_run";
20 struct get_stack_trace_t *e = data;
21 int i, num_stack;
22 static __u64 cnt;
23 struct ksym *ks;
24
25 cnt++;
26
27 if (size < sizeof(struct get_stack_trace_t)) {
28 __u64 *raw_data = data;
29 bool found = false;
30
31 num_stack = size / sizeof(__u64);
32 /* If jit is enabled, we do not have a good way to
33 * verify the sanity of the kernel stack. So we
34 * just assume it is good if the stack is not empty.
35 * This could be improved in the future.
36 */
37 if (jit_enabled) {
38 found = num_stack > 0;
39 } else {
40 for (i = 0; i < num_stack; i++) {
41 ks = ksym_search(raw_data[i]);
42 if (strcmp(ks->name, nonjit_func) == 0) {
43 found = true;
44 break;
45 }
46 }
47 }
48 if (found) {
49 good_kern_stack = true;
50 good_user_stack = true;
51 }
52 } else {
53 num_stack = e->kern_stack_size / sizeof(__u64);
54 if (jit_enabled) {
55 good_kern_stack = num_stack > 0;
56 } else {
57 for (i = 0; i < num_stack; i++) {
58 ks = ksym_search(e->kern_stack[i]);
59 if (strcmp(ks->name, nonjit_func) == 0) {
60 good_kern_stack = true;
61 break;
62 }
63 }
64 }
65 if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0)
66 good_user_stack = true;
67 }
68 if (!good_kern_stack || !good_user_stack)
69 return LIBBPF_PERF_EVENT_ERROR;
70
71 if (cnt == MAX_CNT_RAWTP)
72 return LIBBPF_PERF_EVENT_DONE;
73
74 return LIBBPF_PERF_EVENT_CONT;
75}
76
77void test_get_stack_raw_tp(void)
78{
79 const char *file = "./test_get_stack_rawtp.o";
80 int i, efd, err, prog_fd, pmu_fd, perfmap_fd;
81 struct perf_event_attr attr = {};
82 struct timespec tv = {0, 10};
83 __u32 key = 0, duration = 0;
84 struct bpf_object *obj;
85
86 err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
87 if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
88 return;
89
90 efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
91 if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
92 goto close_prog;
93
94 perfmap_fd = bpf_find_map(__func__, obj, "perfmap");
95 if (CHECK(perfmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
96 perfmap_fd, errno))
97 goto close_prog;
98
99 err = load_kallsyms();
100 if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
101 goto close_prog;
102
103 attr.sample_type = PERF_SAMPLE_RAW;
104 attr.type = PERF_TYPE_SOFTWARE;
105 attr.config = PERF_COUNT_SW_BPF_OUTPUT;
106 pmu_fd = syscall(__NR_perf_event_open, &attr, getpid()/*pid*/, -1/*cpu*/,
107 -1/*group_fd*/, 0);
108 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
109 errno))
110 goto close_prog;
111
112 err = bpf_map_update_elem(perfmap_fd, &key, &pmu_fd, BPF_ANY);
113 if (CHECK(err < 0, "bpf_map_update_elem", "err %d errno %d\n", err,
114 errno))
115 goto close_prog;
116
117 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
118 if (CHECK(err < 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n",
119 err, errno))
120 goto close_prog;
121
122 err = perf_event_mmap(pmu_fd);
123 if (CHECK(err < 0, "perf_event_mmap", "err %d errno %d\n", err, errno))
124 goto close_prog;
125
126 /* trigger some syscall action */
127 for (i = 0; i < MAX_CNT_RAWTP; i++)
128 nanosleep(&tv, NULL);
129
130 err = perf_event_poller(pmu_fd, get_stack_print_output);
131 if (CHECK(err < 0, "perf_event_poller", "err %d errno %d\n", err, errno))
132 goto close_prog;
133
134 goto close_prog_noerr;
135close_prog:
136 error_cnt++;
137close_prog_noerr:
138 bpf_object__close(obj);
139}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c b/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c
new file mode 100644
index 000000000000..958a3d88de99
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c
@@ -0,0 +1,78 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <test_progs.h>
3
4void test_task_fd_query_rawtp(void)
5{
6 const char *file = "./test_get_stack_rawtp.o";
7 __u64 probe_offset, probe_addr;
8 __u32 len, prog_id, fd_type;
9 struct bpf_object *obj;
10 int efd, err, prog_fd;
11 __u32 duration = 0;
12 char buf[256];
13
14 err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
15 if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
16 return;
17
18 efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
19 if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
20 goto close_prog;
21
22 /* query (getpid(), efd) */
23 len = sizeof(buf);
24 err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
25 &fd_type, &probe_offset, &probe_addr);
26 if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
27 errno))
28 goto close_prog;
29
30 err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
31 strcmp(buf, "sys_enter") == 0;
32 if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
33 fd_type, buf))
34 goto close_prog;
35
36 /* test zero len */
37 len = 0;
38 err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
39 &fd_type, &probe_offset, &probe_addr);
40 if (CHECK(err < 0, "bpf_task_fd_query (len = 0)", "err %d errno %d\n",
41 err, errno))
42 goto close_prog;
43 err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
44 len == strlen("sys_enter");
45 if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
46 goto close_prog;
47
48 /* test empty buffer */
49 len = sizeof(buf);
50 err = bpf_task_fd_query(getpid(), efd, 0, 0, &len, &prog_id,
51 &fd_type, &probe_offset, &probe_addr);
52 if (CHECK(err < 0, "bpf_task_fd_query (buf = 0)", "err %d errno %d\n",
53 err, errno))
54 goto close_prog;
55 err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
56 len == strlen("sys_enter");
57 if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
58 goto close_prog;
59
60 /* test smaller buffer */
61 len = 3;
62 err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
63 &fd_type, &probe_offset, &probe_addr);
64 if (CHECK(err >= 0 || errno != ENOSPC, "bpf_task_fd_query (len = 3)",
65 "err %d errno %d\n", err, errno))
66 goto close_prog;
67 err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
68 len == strlen("sys_enter") &&
69 strcmp(buf, "sy") == 0;
70 if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
71 goto close_prog;
72
73 goto close_prog_noerr;
74close_prog:
75 error_cnt++;
76close_prog_noerr:
77 bpf_object__close(obj);
78}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c b/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c
new file mode 100644
index 000000000000..d636a4f39476
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c
@@ -0,0 +1,82 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <test_progs.h>
3
4static void test_task_fd_query_tp_core(const char *probe_name,
5 const char *tp_name)
6{
7 const char *file = "./test_tracepoint.o";
8 int err, bytes, efd, prog_fd, pmu_fd;
9 struct perf_event_attr attr = {};
10 __u64 probe_offset, probe_addr;
11 __u32 len, prog_id, fd_type;
12 struct bpf_object *obj;
13 __u32 duration = 0;
14 char buf[256];
15
16 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
17 if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
18 goto close_prog;
19
20 snprintf(buf, sizeof(buf),
21 "/sys/kernel/debug/tracing/events/%s/id", probe_name);
22 efd = open(buf, O_RDONLY, 0);
23 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
24 goto close_prog;
25 bytes = read(efd, buf, sizeof(buf));
26 close(efd);
27 if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
28 "bytes %d errno %d\n", bytes, errno))
29 goto close_prog;
30
31 attr.config = strtol(buf, NULL, 0);
32 attr.type = PERF_TYPE_TRACEPOINT;
33 attr.sample_type = PERF_SAMPLE_RAW;
34 attr.sample_period = 1;
35 attr.wakeup_events = 1;
36 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
37 0 /* cpu 0 */, -1 /* group id */,
38 0 /* flags */);
39 if (CHECK(err, "perf_event_open", "err %d errno %d\n", err, errno))
40 goto close_pmu;
41
42 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
43 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
44 errno))
45 goto close_pmu;
46
47 err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
48 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
49 errno))
50 goto close_pmu;
51
52 /* query (getpid(), pmu_fd) */
53 len = sizeof(buf);
54 err = bpf_task_fd_query(getpid(), pmu_fd, 0, buf, &len, &prog_id,
55 &fd_type, &probe_offset, &probe_addr);
56 if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
57 errno))
58 goto close_pmu;
59
60 err = (fd_type == BPF_FD_TYPE_TRACEPOINT) && !strcmp(buf, tp_name);
61 if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
62 fd_type, buf))
63 goto close_pmu;
64
65 close(pmu_fd);
66 goto close_prog_noerr;
67
68close_pmu:
69 close(pmu_fd);
70close_prog:
71 error_cnt++;
72close_prog_noerr:
73 bpf_object__close(obj);
74}
75
76void test_task_fd_query_tp(void)
77{
78 test_task_fd_query_tp_core("sched/sched_switch",
79 "sched_switch");
80 test_task_fd_query_tp_core("syscalls/sys_enter_read",
81 "sys_enter_read");
82}
diff --git a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c
new file mode 100644
index 000000000000..a2f476f91637
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c
@@ -0,0 +1,132 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <test_progs.h>
3
4void test_tp_attach_query(void)
5{
6 const int num_progs = 3;
7 int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
8 __u32 duration = 0, info_len, saved_prog_ids[num_progs];
9 const char *file = "./test_tracepoint.o";
10 struct perf_event_query_bpf *query;
11 struct perf_event_attr attr = {};
12 struct bpf_object *obj[num_progs];
13 struct bpf_prog_info prog_info;
14 char buf[256];
15
16 snprintf(buf, sizeof(buf),
17 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
18 efd = open(buf, O_RDONLY, 0);
19 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
20 return;
21 bytes = read(efd, buf, sizeof(buf));
22 close(efd);
23 if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
24 "read", "bytes %d errno %d\n", bytes, errno))
25 return;
26
27 attr.config = strtol(buf, NULL, 0);
28 attr.type = PERF_TYPE_TRACEPOINT;
29 attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
30 attr.sample_period = 1;
31 attr.wakeup_events = 1;
32
33 query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
34 for (i = 0; i < num_progs; i++) {
35 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
36 &prog_fd[i]);
37 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
38 goto cleanup1;
39
40 bzero(&prog_info, sizeof(prog_info));
41 prog_info.jited_prog_len = 0;
42 prog_info.xlated_prog_len = 0;
43 prog_info.nr_map_ids = 0;
44 info_len = sizeof(prog_info);
45 err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
46 if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
47 err, errno))
48 goto cleanup1;
49 saved_prog_ids[i] = prog_info.id;
50
51 pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
52 0 /* cpu 0 */, -1 /* group id */,
53 0 /* flags */);
54 if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
55 pmu_fd[i], errno))
56 goto cleanup2;
57 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
58 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
59 err, errno))
60 goto cleanup3;
61
62 if (i == 0) {
63 /* check NULL prog array query */
64 query->ids_len = num_progs;
65 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
66 if (CHECK(err || query->prog_cnt != 0,
67 "perf_event_ioc_query_bpf",
68 "err %d errno %d query->prog_cnt %u\n",
69 err, errno, query->prog_cnt))
70 goto cleanup3;
71 }
72
73 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
74 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
75 err, errno))
76 goto cleanup3;
77
78 if (i == 1) {
79 /* try to get # of programs only */
80 query->ids_len = 0;
81 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
82 if (CHECK(err || query->prog_cnt != 2,
83 "perf_event_ioc_query_bpf",
84 "err %d errno %d query->prog_cnt %u\n",
85 err, errno, query->prog_cnt))
86 goto cleanup3;
87
88 /* try a few negative tests */
89 /* invalid query pointer */
90 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
91 (struct perf_event_query_bpf *)0x1);
92 if (CHECK(!err || errno != EFAULT,
93 "perf_event_ioc_query_bpf",
94 "err %d errno %d\n", err, errno))
95 goto cleanup3;
96
97 /* no enough space */
98 query->ids_len = 1;
99 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
100 if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
101 "perf_event_ioc_query_bpf",
102 "err %d errno %d query->prog_cnt %u\n",
103 err, errno, query->prog_cnt))
104 goto cleanup3;
105 }
106
107 query->ids_len = num_progs;
108 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
109 if (CHECK(err || query->prog_cnt != (i + 1),
110 "perf_event_ioc_query_bpf",
111 "err %d errno %d query->prog_cnt %u\n",
112 err, errno, query->prog_cnt))
113 goto cleanup3;
114 for (j = 0; j < i + 1; j++)
115 if (CHECK(saved_prog_ids[j] != query->ids[j],
116 "perf_event_ioc_query_bpf",
117 "#%d saved_prog_id %x query prog_id %x\n",
118 j, saved_prog_ids[j], query->ids[j]))
119 goto cleanup3;
120 }
121
122 i = num_progs - 1;
123 for (; i >= 0; i--) {
124 cleanup3:
125 ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
126 cleanup2:
127 close(pmu_fd[i]);
128 cleanup1:
129 bpf_object__close(obj[i]);
130 }
131 free(query);
132}
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index a342fbe19f86..6e41dfab1e75 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -531,136 +531,6 @@ static void test_obj_name(void)
531 } 531 }
532} 532}
533 533
534static void test_tp_attach_query(void)
535{
536 const int num_progs = 3;
537 int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
538 __u32 duration = 0, info_len, saved_prog_ids[num_progs];
539 const char *file = "./test_tracepoint.o";
540 struct perf_event_query_bpf *query;
541 struct perf_event_attr attr = {};
542 struct bpf_object *obj[num_progs];
543 struct bpf_prog_info prog_info;
544 char buf[256];
545
546 snprintf(buf, sizeof(buf),
547 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
548 efd = open(buf, O_RDONLY, 0);
549 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
550 return;
551 bytes = read(efd, buf, sizeof(buf));
552 close(efd);
553 if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
554 "read", "bytes %d errno %d\n", bytes, errno))
555 return;
556
557 attr.config = strtol(buf, NULL, 0);
558 attr.type = PERF_TYPE_TRACEPOINT;
559 attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
560 attr.sample_period = 1;
561 attr.wakeup_events = 1;
562
563 query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
564 for (i = 0; i < num_progs; i++) {
565 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
566 &prog_fd[i]);
567 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
568 goto cleanup1;
569
570 bzero(&prog_info, sizeof(prog_info));
571 prog_info.jited_prog_len = 0;
572 prog_info.xlated_prog_len = 0;
573 prog_info.nr_map_ids = 0;
574 info_len = sizeof(prog_info);
575 err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
576 if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
577 err, errno))
578 goto cleanup1;
579 saved_prog_ids[i] = prog_info.id;
580
581 pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
582 0 /* cpu 0 */, -1 /* group id */,
583 0 /* flags */);
584 if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
585 pmu_fd[i], errno))
586 goto cleanup2;
587 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
588 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
589 err, errno))
590 goto cleanup3;
591
592 if (i == 0) {
593 /* check NULL prog array query */
594 query->ids_len = num_progs;
595 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
596 if (CHECK(err || query->prog_cnt != 0,
597 "perf_event_ioc_query_bpf",
598 "err %d errno %d query->prog_cnt %u\n",
599 err, errno, query->prog_cnt))
600 goto cleanup3;
601 }
602
603 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
604 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
605 err, errno))
606 goto cleanup3;
607
608 if (i == 1) {
609 /* try to get # of programs only */
610 query->ids_len = 0;
611 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
612 if (CHECK(err || query->prog_cnt != 2,
613 "perf_event_ioc_query_bpf",
614 "err %d errno %d query->prog_cnt %u\n",
615 err, errno, query->prog_cnt))
616 goto cleanup3;
617
618 /* try a few negative tests */
619 /* invalid query pointer */
620 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
621 (struct perf_event_query_bpf *)0x1);
622 if (CHECK(!err || errno != EFAULT,
623 "perf_event_ioc_query_bpf",
624 "err %d errno %d\n", err, errno))
625 goto cleanup3;
626
627 /* no enough space */
628 query->ids_len = 1;
629 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
630 if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
631 "perf_event_ioc_query_bpf",
632 "err %d errno %d query->prog_cnt %u\n",
633 err, errno, query->prog_cnt))
634 goto cleanup3;
635 }
636
637 query->ids_len = num_progs;
638 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
639 if (CHECK(err || query->prog_cnt != (i + 1),
640 "perf_event_ioc_query_bpf",
641 "err %d errno %d query->prog_cnt %u\n",
642 err, errno, query->prog_cnt))
643 goto cleanup3;
644 for (j = 0; j < i + 1; j++)
645 if (CHECK(saved_prog_ids[j] != query->ids[j],
646 "perf_event_ioc_query_bpf",
647 "#%d saved_prog_id %x query prog_id %x\n",
648 j, saved_prog_ids[j], query->ids[j]))
649 goto cleanup3;
650 }
651
652 i = num_progs - 1;
653 for (; i >= 0; i--) {
654 cleanup3:
655 ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
656 cleanup2:
657 close(pmu_fd[i]);
658 cleanup1:
659 bpf_object__close(obj[i]);
660 }
661 free(query);
662}
663
664int compare_map_keys(int map1_fd, int map2_fd) 534int compare_map_keys(int map1_fd, int map2_fd)
665{ 535{
666 __u32 key, next_key; 536 __u32 key, next_key;
@@ -748,299 +618,6 @@ err:
748 return -1; 618 return -1;
749} 619}
750 620
751#define MAX_CNT_RAWTP 10ull
752#define MAX_STACK_RAWTP 100
753struct get_stack_trace_t {
754 int pid;
755 int kern_stack_size;
756 int user_stack_size;
757 int user_stack_buildid_size;
758 __u64 kern_stack[MAX_STACK_RAWTP];
759 __u64 user_stack[MAX_STACK_RAWTP];
760 struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
761};
762
763static int get_stack_print_output(void *data, int size)
764{
765 bool good_kern_stack = false, good_user_stack = false;
766 const char *nonjit_func = "___bpf_prog_run";
767 struct get_stack_trace_t *e = data;
768 int i, num_stack;
769 static __u64 cnt;
770 struct ksym *ks;
771
772 cnt++;
773
774 if (size < sizeof(struct get_stack_trace_t)) {
775 __u64 *raw_data = data;
776 bool found = false;
777
778 num_stack = size / sizeof(__u64);
779 /* If jit is enabled, we do not have a good way to
780 * verify the sanity of the kernel stack. So we
781 * just assume it is good if the stack is not empty.
782 * This could be improved in the future.
783 */
784 if (jit_enabled) {
785 found = num_stack > 0;
786 } else {
787 for (i = 0; i < num_stack; i++) {
788 ks = ksym_search(raw_data[i]);
789 if (strcmp(ks->name, nonjit_func) == 0) {
790 found = true;
791 break;
792 }
793 }
794 }
795 if (found) {
796 good_kern_stack = true;
797 good_user_stack = true;
798 }
799 } else {
800 num_stack = e->kern_stack_size / sizeof(__u64);
801 if (jit_enabled) {
802 good_kern_stack = num_stack > 0;
803 } else {
804 for (i = 0; i < num_stack; i++) {
805 ks = ksym_search(e->kern_stack[i]);
806 if (strcmp(ks->name, nonjit_func) == 0) {
807 good_kern_stack = true;
808 break;
809 }
810 }
811 }
812 if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0)
813 good_user_stack = true;
814 }
815 if (!good_kern_stack || !good_user_stack)
816 return LIBBPF_PERF_EVENT_ERROR;
817
818 if (cnt == MAX_CNT_RAWTP)
819 return LIBBPF_PERF_EVENT_DONE;
820
821 return LIBBPF_PERF_EVENT_CONT;
822}
823
824static void test_get_stack_raw_tp(void)
825{
826 const char *file = "./test_get_stack_rawtp.o";
827 int i, efd, err, prog_fd, pmu_fd, perfmap_fd;
828 struct perf_event_attr attr = {};
829 struct timespec tv = {0, 10};
830 __u32 key = 0, duration = 0;
831 struct bpf_object *obj;
832
833 err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
834 if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
835 return;
836
837 efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
838 if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
839 goto close_prog;
840
841 perfmap_fd = bpf_find_map(__func__, obj, "perfmap");
842 if (CHECK(perfmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
843 perfmap_fd, errno))
844 goto close_prog;
845
846 err = load_kallsyms();
847 if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
848 goto close_prog;
849
850 attr.sample_type = PERF_SAMPLE_RAW;
851 attr.type = PERF_TYPE_SOFTWARE;
852 attr.config = PERF_COUNT_SW_BPF_OUTPUT;
853 pmu_fd = syscall(__NR_perf_event_open, &attr, getpid()/*pid*/, -1/*cpu*/,
854 -1/*group_fd*/, 0);
855 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
856 errno))
857 goto close_prog;
858
859 err = bpf_map_update_elem(perfmap_fd, &key, &pmu_fd, BPF_ANY);
860 if (CHECK(err < 0, "bpf_map_update_elem", "err %d errno %d\n", err,
861 errno))
862 goto close_prog;
863
864 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
865 if (CHECK(err < 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n",
866 err, errno))
867 goto close_prog;
868
869 err = perf_event_mmap(pmu_fd);
870 if (CHECK(err < 0, "perf_event_mmap", "err %d errno %d\n", err, errno))
871 goto close_prog;
872
873 /* trigger some syscall action */
874 for (i = 0; i < MAX_CNT_RAWTP; i++)
875 nanosleep(&tv, NULL);
876
877 err = perf_event_poller(pmu_fd, get_stack_print_output);
878 if (CHECK(err < 0, "perf_event_poller", "err %d errno %d\n", err, errno))
879 goto close_prog;
880
881 goto close_prog_noerr;
882close_prog:
883 error_cnt++;
884close_prog_noerr:
885 bpf_object__close(obj);
886}
887
888static void test_task_fd_query_rawtp(void)
889{
890 const char *file = "./test_get_stack_rawtp.o";
891 __u64 probe_offset, probe_addr;
892 __u32 len, prog_id, fd_type;
893 struct bpf_object *obj;
894 int efd, err, prog_fd;
895 __u32 duration = 0;
896 char buf[256];
897
898 err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
899 if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
900 return;
901
902 efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
903 if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
904 goto close_prog;
905
906 /* query (getpid(), efd) */
907 len = sizeof(buf);
908 err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
909 &fd_type, &probe_offset, &probe_addr);
910 if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
911 errno))
912 goto close_prog;
913
914 err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
915 strcmp(buf, "sys_enter") == 0;
916 if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
917 fd_type, buf))
918 goto close_prog;
919
920 /* test zero len */
921 len = 0;
922 err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
923 &fd_type, &probe_offset, &probe_addr);
924 if (CHECK(err < 0, "bpf_task_fd_query (len = 0)", "err %d errno %d\n",
925 err, errno))
926 goto close_prog;
927 err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
928 len == strlen("sys_enter");
929 if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
930 goto close_prog;
931
932 /* test empty buffer */
933 len = sizeof(buf);
934 err = bpf_task_fd_query(getpid(), efd, 0, 0, &len, &prog_id,
935 &fd_type, &probe_offset, &probe_addr);
936 if (CHECK(err < 0, "bpf_task_fd_query (buf = 0)", "err %d errno %d\n",
937 err, errno))
938 goto close_prog;
939 err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
940 len == strlen("sys_enter");
941 if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
942 goto close_prog;
943
944 /* test smaller buffer */
945 len = 3;
946 err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
947 &fd_type, &probe_offset, &probe_addr);
948 if (CHECK(err >= 0 || errno != ENOSPC, "bpf_task_fd_query (len = 3)",
949 "err %d errno %d\n", err, errno))
950 goto close_prog;
951 err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
952 len == strlen("sys_enter") &&
953 strcmp(buf, "sy") == 0;
954 if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
955 goto close_prog;
956
957 goto close_prog_noerr;
958close_prog:
959 error_cnt++;
960close_prog_noerr:
961 bpf_object__close(obj);
962}
963
964static void test_task_fd_query_tp_core(const char *probe_name,
965 const char *tp_name)
966{
967 const char *file = "./test_tracepoint.o";
968 int err, bytes, efd, prog_fd, pmu_fd;
969 struct perf_event_attr attr = {};
970 __u64 probe_offset, probe_addr;
971 __u32 len, prog_id, fd_type;
972 struct bpf_object *obj;
973 __u32 duration = 0;
974 char buf[256];
975
976 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
977 if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
978 goto close_prog;
979
980 snprintf(buf, sizeof(buf),
981 "/sys/kernel/debug/tracing/events/%s/id", probe_name);
982 efd = open(buf, O_RDONLY, 0);
983 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
984 goto close_prog;
985 bytes = read(efd, buf, sizeof(buf));
986 close(efd);
987 if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
988 "bytes %d errno %d\n", bytes, errno))
989 goto close_prog;
990
991 attr.config = strtol(buf, NULL, 0);
992 attr.type = PERF_TYPE_TRACEPOINT;
993 attr.sample_type = PERF_SAMPLE_RAW;
994 attr.sample_period = 1;
995 attr.wakeup_events = 1;
996 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
997 0 /* cpu 0 */, -1 /* group id */,
998 0 /* flags */);
999 if (CHECK(err, "perf_event_open", "err %d errno %d\n", err, errno))
1000 goto close_pmu;
1001
1002 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
1003 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
1004 errno))
1005 goto close_pmu;
1006
1007 err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
1008 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
1009 errno))
1010 goto close_pmu;
1011
1012 /* query (getpid(), pmu_fd) */
1013 len = sizeof(buf);
1014 err = bpf_task_fd_query(getpid(), pmu_fd, 0, buf, &len, &prog_id,
1015 &fd_type, &probe_offset, &probe_addr);
1016 if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
1017 errno))
1018 goto close_pmu;
1019
1020 err = (fd_type == BPF_FD_TYPE_TRACEPOINT) && !strcmp(buf, tp_name);
1021 if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
1022 fd_type, buf))
1023 goto close_pmu;
1024
1025 close(pmu_fd);
1026 goto close_prog_noerr;
1027
1028close_pmu:
1029 close(pmu_fd);
1030close_prog:
1031 error_cnt++;
1032close_prog_noerr:
1033 bpf_object__close(obj);
1034}
1035
1036static void test_task_fd_query_tp(void)
1037{
1038 test_task_fd_query_tp_core("sched/sched_switch",
1039 "sched_switch");
1040 test_task_fd_query_tp_core("syscalls/sys_enter_read",
1041 "sys_enter_read");
1042}
1043
1044static int libbpf_debug_print(enum libbpf_print_level level, 621static int libbpf_debug_print(enum libbpf_print_level level,
1045 const char *format, va_list args) 622 const char *format, va_list args)
1046{ 623{
@@ -1423,10 +1000,6 @@ int main(void)
1423 test_tcp_estats(); 1000 test_tcp_estats();
1424 test_bpf_obj_id(); 1001 test_bpf_obj_id();
1425 test_obj_name(); 1002 test_obj_name();
1426 test_tp_attach_query();
1427 test_get_stack_raw_tp();
1428 test_task_fd_query_rawtp();
1429 test_task_fd_query_tp();
1430 test_reference_tracking(); 1003 test_reference_tracking();
1431 test_queue_stack_map(QUEUE); 1004 test_queue_stack_map(QUEUE);
1432 test_queue_stack_map(STACK); 1005 test_queue_stack_map(STACK);