aboutsummaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/bpf/test_progs.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/selftests/bpf/test_progs.c')
-rw-r--r--tools/testing/selftests/bpf/test_progs.c242
1 files changed, 225 insertions, 17 deletions
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index fac581f1c57f..ed197eef1cfc 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -38,8 +38,10 @@ typedef __u16 __sum16;
38#include "bpf_util.h" 38#include "bpf_util.h"
39#include "bpf_endian.h" 39#include "bpf_endian.h"
40#include "bpf_rlimit.h" 40#include "bpf_rlimit.h"
41#include "trace_helpers.h"
41 42
42static int error_cnt, pass_cnt; 43static int error_cnt, pass_cnt;
44static bool jit_enabled;
43 45
44#define MAGIC_BYTES 123 46#define MAGIC_BYTES 123
45 47
@@ -391,13 +393,30 @@ static inline __u64 ptr_to_u64(const void *ptr)
391 return (__u64) (unsigned long) ptr; 393 return (__u64) (unsigned long) ptr;
392} 394}
393 395
396static bool is_jit_enabled(void)
397{
398 const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
399 bool enabled = false;
400 int sysctl_fd;
401
402 sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
403 if (sysctl_fd != -1) {
404 char tmpc;
405
406 if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
407 enabled = (tmpc != '0');
408 close(sysctl_fd);
409 }
410
411 return enabled;
412}
413
394static void test_bpf_obj_id(void) 414static void test_bpf_obj_id(void)
395{ 415{
396 const __u64 array_magic_value = 0xfaceb00c; 416 const __u64 array_magic_value = 0xfaceb00c;
397 const __u32 array_key = 0; 417 const __u32 array_key = 0;
398 const int nr_iters = 2; 418 const int nr_iters = 2;
399 const char *file = "./test_obj_id.o"; 419 const char *file = "./test_obj_id.o";
400 const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
401 const char *expected_prog_name = "test_obj_id"; 420 const char *expected_prog_name = "test_obj_id";
402 const char *expected_map_name = "test_map_id"; 421 const char *expected_map_name = "test_map_id";
403 const __u64 nsec_per_sec = 1000000000; 422 const __u64 nsec_per_sec = 1000000000;
@@ -414,20 +433,11 @@ static void test_bpf_obj_id(void)
414 char jited_insns[128], xlated_insns[128], zeros[128]; 433 char jited_insns[128], xlated_insns[128], zeros[128];
415 __u32 i, next_id, info_len, nr_id_found, duration = 0; 434 __u32 i, next_id, info_len, nr_id_found, duration = 0;
416 struct timespec real_time_ts, boot_time_ts; 435 struct timespec real_time_ts, boot_time_ts;
417 int sysctl_fd, jit_enabled = 0, err = 0; 436 int err = 0;
418 __u64 array_value; 437 __u64 array_value;
419 uid_t my_uid = getuid(); 438 uid_t my_uid = getuid();
420 time_t now, load_time; 439 time_t now, load_time;
421 440
422 sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
423 if (sysctl_fd != -1) {
424 char tmpc;
425
426 if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
427 jit_enabled = (tmpc != '0');
428 close(sysctl_fd);
429 }
430
431 err = bpf_prog_get_fd_by_id(0); 441 err = bpf_prog_get_fd_by_id(0);
432 CHECK(err >= 0 || errno != ENOENT, 442 CHECK(err >= 0 || errno != ENOENT,
433 "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno); 443 "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
@@ -896,11 +906,47 @@ static int compare_map_keys(int map1_fd, int map2_fd)
896 return 0; 906 return 0;
897} 907}
898 908
909static int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
910{
911 __u32 key, next_key, *cur_key_p, *next_key_p;
912 char *val_buf1, *val_buf2;
913 int i, err = 0;
914
915 val_buf1 = malloc(stack_trace_len);
916 val_buf2 = malloc(stack_trace_len);
917 cur_key_p = NULL;
918 next_key_p = &key;
919 while (bpf_map_get_next_key(smap_fd, cur_key_p, next_key_p) == 0) {
920 err = bpf_map_lookup_elem(smap_fd, next_key_p, val_buf1);
921 if (err)
922 goto out;
923 err = bpf_map_lookup_elem(amap_fd, next_key_p, val_buf2);
924 if (err)
925 goto out;
926 for (i = 0; i < stack_trace_len; i++) {
927 if (val_buf1[i] != val_buf2[i]) {
928 err = -1;
929 goto out;
930 }
931 }
932 key = *next_key_p;
933 cur_key_p = &key;
934 next_key_p = &next_key;
935 }
936 if (errno != ENOENT)
937 err = -1;
938
939out:
940 free(val_buf1);
941 free(val_buf2);
942 return err;
943}
944
899static void test_stacktrace_map() 945static void test_stacktrace_map()
900{ 946{
901 int control_map_fd, stackid_hmap_fd, stackmap_fd; 947 int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
902 const char *file = "./test_stacktrace_map.o"; 948 const char *file = "./test_stacktrace_map.o";
903 int bytes, efd, err, pmu_fd, prog_fd; 949 int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
904 struct perf_event_attr attr = {}; 950 struct perf_event_attr attr = {};
905 __u32 key, val, duration = 0; 951 __u32 key, val, duration = 0;
906 struct bpf_object *obj; 952 struct bpf_object *obj;
@@ -956,6 +1002,10 @@ static void test_stacktrace_map()
956 if (stackmap_fd < 0) 1002 if (stackmap_fd < 0)
957 goto disable_pmu; 1003 goto disable_pmu;
958 1004
1005 stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
1006 if (stack_amap_fd < 0)
1007 goto disable_pmu;
1008
959 /* give some time for bpf program run */ 1009 /* give some time for bpf program run */
960 sleep(1); 1010 sleep(1);
961 1011
@@ -977,6 +1027,12 @@ static void test_stacktrace_map()
977 "err %d errno %d\n", err, errno)) 1027 "err %d errno %d\n", err, errno))
978 goto disable_pmu_noerr; 1028 goto disable_pmu_noerr;
979 1029
1030 stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
1031 err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
1032 if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
1033 "err %d errno %d\n", err, errno))
1034 goto disable_pmu_noerr;
1035
980 goto disable_pmu_noerr; 1036 goto disable_pmu_noerr;
981disable_pmu: 1037disable_pmu:
982 error_cnt++; 1038 error_cnt++;
@@ -1070,9 +1126,9 @@ err:
1070 1126
1071static void test_stacktrace_build_id(void) 1127static void test_stacktrace_build_id(void)
1072{ 1128{
1073 int control_map_fd, stackid_hmap_fd, stackmap_fd; 1129 int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
1074 const char *file = "./test_stacktrace_build_id.o"; 1130 const char *file = "./test_stacktrace_build_id.o";
1075 int bytes, efd, err, pmu_fd, prog_fd; 1131 int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
1076 struct perf_event_attr attr = {}; 1132 struct perf_event_attr attr = {};
1077 __u32 key, previous_key, val, duration = 0; 1133 __u32 key, previous_key, val, duration = 0;
1078 struct bpf_object *obj; 1134 struct bpf_object *obj;
@@ -1137,6 +1193,11 @@ static void test_stacktrace_build_id(void)
1137 err, errno)) 1193 err, errno))
1138 goto disable_pmu; 1194 goto disable_pmu;
1139 1195
1196 stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
1197 if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
1198 "err %d errno %d\n", err, errno))
1199 goto disable_pmu;
1200
1140 assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null") 1201 assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
1141 == 0); 1202 == 0);
1142 assert(system("./urandom_read") == 0); 1203 assert(system("./urandom_read") == 0);
@@ -1188,8 +1249,15 @@ static void test_stacktrace_build_id(void)
1188 previous_key = key; 1249 previous_key = key;
1189 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); 1250 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
1190 1251
1191 CHECK(build_id_matches < 1, "build id match", 1252 if (CHECK(build_id_matches < 1, "build id match",
1192 "Didn't find expected build ID from the map\n"); 1253 "Didn't find expected build ID from the map\n"))
1254 goto disable_pmu;
1255
1256 stack_trace_len = PERF_MAX_STACK_DEPTH
1257 * sizeof(struct bpf_stack_build_id);
1258 err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
1259 CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
1260 "err %d errno %d\n", err, errno);
1193 1261
1194disable_pmu: 1262disable_pmu:
1195 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); 1263 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
@@ -1204,8 +1272,147 @@ out:
1204 return; 1272 return;
1205} 1273}
1206 1274
1275#define MAX_CNT_RAWTP 10ull
1276#define MAX_STACK_RAWTP 100
1277struct get_stack_trace_t {
1278 int pid;
1279 int kern_stack_size;
1280 int user_stack_size;
1281 int user_stack_buildid_size;
1282 __u64 kern_stack[MAX_STACK_RAWTP];
1283 __u64 user_stack[MAX_STACK_RAWTP];
1284 struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
1285};
1286
1287static int get_stack_print_output(void *data, int size)
1288{
1289 bool good_kern_stack = false, good_user_stack = false;
1290 const char *nonjit_func = "___bpf_prog_run";
1291 struct get_stack_trace_t *e = data;
1292 int i, num_stack;
1293 static __u64 cnt;
1294 struct ksym *ks;
1295
1296 cnt++;
1297
1298 if (size < sizeof(struct get_stack_trace_t)) {
1299 __u64 *raw_data = data;
1300 bool found = false;
1301
1302 num_stack = size / sizeof(__u64);
1303 /* If jit is enabled, we do not have a good way to
1304 * verify the sanity of the kernel stack. So we
1305 * just assume it is good if the stack is not empty.
1306 * This could be improved in the future.
1307 */
1308 if (jit_enabled) {
1309 found = num_stack > 0;
1310 } else {
1311 for (i = 0; i < num_stack; i++) {
1312 ks = ksym_search(raw_data[i]);
1313 if (strcmp(ks->name, nonjit_func) == 0) {
1314 found = true;
1315 break;
1316 }
1317 }
1318 }
1319 if (found) {
1320 good_kern_stack = true;
1321 good_user_stack = true;
1322 }
1323 } else {
1324 num_stack = e->kern_stack_size / sizeof(__u64);
1325 if (jit_enabled) {
1326 good_kern_stack = num_stack > 0;
1327 } else {
1328 for (i = 0; i < num_stack; i++) {
1329 ks = ksym_search(e->kern_stack[i]);
1330 if (strcmp(ks->name, nonjit_func) == 0) {
1331 good_kern_stack = true;
1332 break;
1333 }
1334 }
1335 }
1336 if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0)
1337 good_user_stack = true;
1338 }
1339 if (!good_kern_stack || !good_user_stack)
1340 return PERF_EVENT_ERROR;
1341
1342 if (cnt == MAX_CNT_RAWTP)
1343 return PERF_EVENT_DONE;
1344
1345 return PERF_EVENT_CONT;
1346}
1347
1348static void test_get_stack_raw_tp(void)
1349{
1350 const char *file = "./test_get_stack_rawtp.o";
1351 int i, efd, err, prog_fd, pmu_fd, perfmap_fd;
1352 struct perf_event_attr attr = {};
1353 struct timespec tv = {0, 10};
1354 __u32 key = 0, duration = 0;
1355 struct bpf_object *obj;
1356
1357 err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
1358 if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
1359 return;
1360
1361 efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
1362 if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
1363 goto close_prog;
1364
1365 perfmap_fd = bpf_find_map(__func__, obj, "perfmap");
1366 if (CHECK(perfmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
1367 perfmap_fd, errno))
1368 goto close_prog;
1369
1370 err = load_kallsyms();
1371 if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
1372 goto close_prog;
1373
1374 attr.sample_type = PERF_SAMPLE_RAW;
1375 attr.type = PERF_TYPE_SOFTWARE;
1376 attr.config = PERF_COUNT_SW_BPF_OUTPUT;
1377 pmu_fd = syscall(__NR_perf_event_open, &attr, getpid()/*pid*/, -1/*cpu*/,
1378 -1/*group_fd*/, 0);
1379 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
1380 errno))
1381 goto close_prog;
1382
1383 err = bpf_map_update_elem(perfmap_fd, &key, &pmu_fd, BPF_ANY);
1384 if (CHECK(err < 0, "bpf_map_update_elem", "err %d errno %d\n", err,
1385 errno))
1386 goto close_prog;
1387
1388 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
1389 if (CHECK(err < 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n",
1390 err, errno))
1391 goto close_prog;
1392
1393 err = perf_event_mmap(pmu_fd);
1394 if (CHECK(err < 0, "perf_event_mmap", "err %d errno %d\n", err, errno))
1395 goto close_prog;
1396
1397 /* trigger some syscall action */
1398 for (i = 0; i < MAX_CNT_RAWTP; i++)
1399 nanosleep(&tv, NULL);
1400
1401 err = perf_event_poller(pmu_fd, get_stack_print_output);
1402 if (CHECK(err < 0, "perf_event_poller", "err %d errno %d\n", err, errno))
1403 goto close_prog;
1404
1405 goto close_prog_noerr;
1406close_prog:
1407 error_cnt++;
1408close_prog_noerr:
1409 bpf_object__close(obj);
1410}
1411
1207int main(void) 1412int main(void)
1208{ 1413{
1414 jit_enabled = is_jit_enabled();
1415
1209 test_pkt_access(); 1416 test_pkt_access();
1210 test_xdp(); 1417 test_xdp();
1211 test_xdp_adjust_tail(); 1418 test_xdp_adjust_tail();
@@ -1219,6 +1426,7 @@ int main(void)
1219 test_stacktrace_map(); 1426 test_stacktrace_map();
1220 test_stacktrace_build_id(); 1427 test_stacktrace_build_id();
1221 test_stacktrace_map_raw_tp(); 1428 test_stacktrace_map_raw_tp();
1429 test_get_stack_raw_tp();
1222 1430
1223 printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt); 1431 printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
1224 return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS; 1432 return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;