aboutsummaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/bpf/prog_tests
diff options
context:
space:
mode:
authorStanislav Fomichev <sdf@google.com>2019-03-01 22:42:17 -0500
committerAlexei Starovoitov <ast@kernel.org>2019-03-02 14:10:40 -0500
commit20cb14ff9c49fcb189daf82246fb6fcd3923542c (patch)
treece0ecc585e176a6f4bae1ea851d69f694b248584 /tools/testing/selftests/bpf/prog_tests
parent615741d81de6c16aa466c4eb37805caa868a9bb8 (diff)
selftests: bpf: break up test_progs - tracepoint
Move tracepoint prog tests into separate files. Signed-off-by: Stanislav Fomichev <sdf@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'tools/testing/selftests/bpf/prog_tests')
-rw-r--r--tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c139
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c78
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c82
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tp_attach_query.c132
4 files changed, 431 insertions, 0 deletions
diff --git a/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
new file mode 100644
index 000000000000..d7bb5beb1c57
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
@@ -0,0 +1,139 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <test_progs.h>
3
4#define MAX_CNT_RAWTP 10ull
5#define MAX_STACK_RAWTP 100
6struct get_stack_trace_t {
7 int pid;
8 int kern_stack_size;
9 int user_stack_size;
10 int user_stack_buildid_size;
11 __u64 kern_stack[MAX_STACK_RAWTP];
12 __u64 user_stack[MAX_STACK_RAWTP];
13 struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
14};
15
16static int get_stack_print_output(void *data, int size)
17{
18 bool good_kern_stack = false, good_user_stack = false;
19 const char *nonjit_func = "___bpf_prog_run";
20 struct get_stack_trace_t *e = data;
21 int i, num_stack;
22 static __u64 cnt;
23 struct ksym *ks;
24
25 cnt++;
26
27 if (size < sizeof(struct get_stack_trace_t)) {
28 __u64 *raw_data = data;
29 bool found = false;
30
31 num_stack = size / sizeof(__u64);
32 /* If jit is enabled, we do not have a good way to
33 * verify the sanity of the kernel stack. So we
34 * just assume it is good if the stack is not empty.
35 * This could be improved in the future.
36 */
37 if (jit_enabled) {
38 found = num_stack > 0;
39 } else {
40 for (i = 0; i < num_stack; i++) {
41 ks = ksym_search(raw_data[i]);
42 if (strcmp(ks->name, nonjit_func) == 0) {
43 found = true;
44 break;
45 }
46 }
47 }
48 if (found) {
49 good_kern_stack = true;
50 good_user_stack = true;
51 }
52 } else {
53 num_stack = e->kern_stack_size / sizeof(__u64);
54 if (jit_enabled) {
55 good_kern_stack = num_stack > 0;
56 } else {
57 for (i = 0; i < num_stack; i++) {
58 ks = ksym_search(e->kern_stack[i]);
59 if (strcmp(ks->name, nonjit_func) == 0) {
60 good_kern_stack = true;
61 break;
62 }
63 }
64 }
65 if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0)
66 good_user_stack = true;
67 }
68 if (!good_kern_stack || !good_user_stack)
69 return LIBBPF_PERF_EVENT_ERROR;
70
71 if (cnt == MAX_CNT_RAWTP)
72 return LIBBPF_PERF_EVENT_DONE;
73
74 return LIBBPF_PERF_EVENT_CONT;
75}
76
77void test_get_stack_raw_tp(void)
78{
79 const char *file = "./test_get_stack_rawtp.o";
80 int i, efd, err, prog_fd, pmu_fd, perfmap_fd;
81 struct perf_event_attr attr = {};
82 struct timespec tv = {0, 10};
83 __u32 key = 0, duration = 0;
84 struct bpf_object *obj;
85
86 err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
87 if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
88 return;
89
90 efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
91 if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
92 goto close_prog;
93
94 perfmap_fd = bpf_find_map(__func__, obj, "perfmap");
95 if (CHECK(perfmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
96 perfmap_fd, errno))
97 goto close_prog;
98
99 err = load_kallsyms();
100 if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
101 goto close_prog;
102
103 attr.sample_type = PERF_SAMPLE_RAW;
104 attr.type = PERF_TYPE_SOFTWARE;
105 attr.config = PERF_COUNT_SW_BPF_OUTPUT;
106 pmu_fd = syscall(__NR_perf_event_open, &attr, getpid()/*pid*/, -1/*cpu*/,
107 -1/*group_fd*/, 0);
108 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
109 errno))
110 goto close_prog;
111
112 err = bpf_map_update_elem(perfmap_fd, &key, &pmu_fd, BPF_ANY);
113 if (CHECK(err < 0, "bpf_map_update_elem", "err %d errno %d\n", err,
114 errno))
115 goto close_prog;
116
117 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
118 if (CHECK(err < 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n",
119 err, errno))
120 goto close_prog;
121
122 err = perf_event_mmap(pmu_fd);
123 if (CHECK(err < 0, "perf_event_mmap", "err %d errno %d\n", err, errno))
124 goto close_prog;
125
126 /* trigger some syscall action */
127 for (i = 0; i < MAX_CNT_RAWTP; i++)
128 nanosleep(&tv, NULL);
129
130 err = perf_event_poller(pmu_fd, get_stack_print_output);
131 if (CHECK(err < 0, "perf_event_poller", "err %d errno %d\n", err, errno))
132 goto close_prog;
133
134 goto close_prog_noerr;
135close_prog:
136 error_cnt++;
137close_prog_noerr:
138 bpf_object__close(obj);
139}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c b/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c
new file mode 100644
index 000000000000..958a3d88de99
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c
@@ -0,0 +1,78 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <test_progs.h>
3
4void test_task_fd_query_rawtp(void)
5{
6 const char *file = "./test_get_stack_rawtp.o";
7 __u64 probe_offset, probe_addr;
8 __u32 len, prog_id, fd_type;
9 struct bpf_object *obj;
10 int efd, err, prog_fd;
11 __u32 duration = 0;
12 char buf[256];
13
14 err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
15 if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
16 return;
17
18 efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
19 if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
20 goto close_prog;
21
22 /* query (getpid(), efd) */
23 len = sizeof(buf);
24 err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
25 &fd_type, &probe_offset, &probe_addr);
26 if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
27 errno))
28 goto close_prog;
29
30 err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
31 strcmp(buf, "sys_enter") == 0;
32 if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
33 fd_type, buf))
34 goto close_prog;
35
36 /* test zero len */
37 len = 0;
38 err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
39 &fd_type, &probe_offset, &probe_addr);
40 if (CHECK(err < 0, "bpf_task_fd_query (len = 0)", "err %d errno %d\n",
41 err, errno))
42 goto close_prog;
43 err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
44 len == strlen("sys_enter");
45 if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
46 goto close_prog;
47
48 /* test empty buffer */
49 len = sizeof(buf);
50 err = bpf_task_fd_query(getpid(), efd, 0, 0, &len, &prog_id,
51 &fd_type, &probe_offset, &probe_addr);
52 if (CHECK(err < 0, "bpf_task_fd_query (buf = 0)", "err %d errno %d\n",
53 err, errno))
54 goto close_prog;
55 err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
56 len == strlen("sys_enter");
57 if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
58 goto close_prog;
59
60 /* test smaller buffer */
61 len = 3;
62 err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
63 &fd_type, &probe_offset, &probe_addr);
64 if (CHECK(err >= 0 || errno != ENOSPC, "bpf_task_fd_query (len = 3)",
65 "err %d errno %d\n", err, errno))
66 goto close_prog;
67 err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
68 len == strlen("sys_enter") &&
69 strcmp(buf, "sy") == 0;
70 if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
71 goto close_prog;
72
73 goto close_prog_noerr;
74close_prog:
75 error_cnt++;
76close_prog_noerr:
77 bpf_object__close(obj);
78}
diff --git a/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c b/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c
new file mode 100644
index 000000000000..d636a4f39476
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c
@@ -0,0 +1,82 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <test_progs.h>
3
4static void test_task_fd_query_tp_core(const char *probe_name,
5 const char *tp_name)
6{
7 const char *file = "./test_tracepoint.o";
8 int err, bytes, efd, prog_fd, pmu_fd;
9 struct perf_event_attr attr = {};
10 __u64 probe_offset, probe_addr;
11 __u32 len, prog_id, fd_type;
12 struct bpf_object *obj;
13 __u32 duration = 0;
14 char buf[256];
15
16 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
17 if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
18 goto close_prog;
19
20 snprintf(buf, sizeof(buf),
21 "/sys/kernel/debug/tracing/events/%s/id", probe_name);
22 efd = open(buf, O_RDONLY, 0);
23 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
24 goto close_prog;
25 bytes = read(efd, buf, sizeof(buf));
26 close(efd);
27 if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
28 "bytes %d errno %d\n", bytes, errno))
29 goto close_prog;
30
31 attr.config = strtol(buf, NULL, 0);
32 attr.type = PERF_TYPE_TRACEPOINT;
33 attr.sample_type = PERF_SAMPLE_RAW;
34 attr.sample_period = 1;
35 attr.wakeup_events = 1;
36 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
37 0 /* cpu 0 */, -1 /* group id */,
38 0 /* flags */);
39 if (CHECK(err, "perf_event_open", "err %d errno %d\n", err, errno))
40 goto close_pmu;
41
42 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
43 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
44 errno))
45 goto close_pmu;
46
47 err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
48 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
49 errno))
50 goto close_pmu;
51
52 /* query (getpid(), pmu_fd) */
53 len = sizeof(buf);
54 err = bpf_task_fd_query(getpid(), pmu_fd, 0, buf, &len, &prog_id,
55 &fd_type, &probe_offset, &probe_addr);
56 if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
57 errno))
58 goto close_pmu;
59
60 err = (fd_type == BPF_FD_TYPE_TRACEPOINT) && !strcmp(buf, tp_name);
61 if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
62 fd_type, buf))
63 goto close_pmu;
64
65 close(pmu_fd);
66 goto close_prog_noerr;
67
68close_pmu:
69 close(pmu_fd);
70close_prog:
71 error_cnt++;
72close_prog_noerr:
73 bpf_object__close(obj);
74}
75
76void test_task_fd_query_tp(void)
77{
78 test_task_fd_query_tp_core("sched/sched_switch",
79 "sched_switch");
80 test_task_fd_query_tp_core("syscalls/sys_enter_read",
81 "sys_enter_read");
82}
diff --git a/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c
new file mode 100644
index 000000000000..a2f476f91637
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tp_attach_query.c
@@ -0,0 +1,132 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <test_progs.h>
3
4void test_tp_attach_query(void)
5{
6 const int num_progs = 3;
7 int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
8 __u32 duration = 0, info_len, saved_prog_ids[num_progs];
9 const char *file = "./test_tracepoint.o";
10 struct perf_event_query_bpf *query;
11 struct perf_event_attr attr = {};
12 struct bpf_object *obj[num_progs];
13 struct bpf_prog_info prog_info;
14 char buf[256];
15
16 snprintf(buf, sizeof(buf),
17 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
18 efd = open(buf, O_RDONLY, 0);
19 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
20 return;
21 bytes = read(efd, buf, sizeof(buf));
22 close(efd);
23 if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
24 "read", "bytes %d errno %d\n", bytes, errno))
25 return;
26
27 attr.config = strtol(buf, NULL, 0);
28 attr.type = PERF_TYPE_TRACEPOINT;
29 attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
30 attr.sample_period = 1;
31 attr.wakeup_events = 1;
32
33 query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
34 for (i = 0; i < num_progs; i++) {
35 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
36 &prog_fd[i]);
37 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
38 goto cleanup1;
39
40 bzero(&prog_info, sizeof(prog_info));
41 prog_info.jited_prog_len = 0;
42 prog_info.xlated_prog_len = 0;
43 prog_info.nr_map_ids = 0;
44 info_len = sizeof(prog_info);
45 err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
46 if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
47 err, errno))
48 goto cleanup1;
49 saved_prog_ids[i] = prog_info.id;
50
51 pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
52 0 /* cpu 0 */, -1 /* group id */,
53 0 /* flags */);
54 if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
55 pmu_fd[i], errno))
56 goto cleanup2;
57 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
58 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
59 err, errno))
60 goto cleanup3;
61
62 if (i == 0) {
63 /* check NULL prog array query */
64 query->ids_len = num_progs;
65 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
66 if (CHECK(err || query->prog_cnt != 0,
67 "perf_event_ioc_query_bpf",
68 "err %d errno %d query->prog_cnt %u\n",
69 err, errno, query->prog_cnt))
70 goto cleanup3;
71 }
72
73 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
74 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
75 err, errno))
76 goto cleanup3;
77
78 if (i == 1) {
79 /* try to get # of programs only */
80 query->ids_len = 0;
81 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
82 if (CHECK(err || query->prog_cnt != 2,
83 "perf_event_ioc_query_bpf",
84 "err %d errno %d query->prog_cnt %u\n",
85 err, errno, query->prog_cnt))
86 goto cleanup3;
87
88 /* try a few negative tests */
89 /* invalid query pointer */
90 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
91 (struct perf_event_query_bpf *)0x1);
92 if (CHECK(!err || errno != EFAULT,
93 "perf_event_ioc_query_bpf",
94 "err %d errno %d\n", err, errno))
95 goto cleanup3;
96
97 /* no enough space */
98 query->ids_len = 1;
99 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
100 if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
101 "perf_event_ioc_query_bpf",
102 "err %d errno %d query->prog_cnt %u\n",
103 err, errno, query->prog_cnt))
104 goto cleanup3;
105 }
106
107 query->ids_len = num_progs;
108 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
109 if (CHECK(err || query->prog_cnt != (i + 1),
110 "perf_event_ioc_query_bpf",
111 "err %d errno %d query->prog_cnt %u\n",
112 err, errno, query->prog_cnt))
113 goto cleanup3;
114 for (j = 0; j < i + 1; j++)
115 if (CHECK(saved_prog_ids[j] != query->ids[j],
116 "perf_event_ioc_query_bpf",
117 "#%d saved_prog_id %x query prog_id %x\n",
118 j, saved_prog_ids[j], query->ids[j]))
119 goto cleanup3;
120 }
121
122 i = num_progs - 1;
123 for (; i >= 0; i--) {
124 cleanup3:
125 ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
126 cleanup2:
127 close(pmu_fd[i]);
128 cleanup1:
129 bpf_object__close(obj[i]);
130 }
131 free(query);
132}