aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--tools/perf/util/bpf-loader.c138
1 files changed, 131 insertions, 7 deletions
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index 190a1c7f0649..36544e5ece43 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -38,6 +38,8 @@ struct bpf_prog_priv {
38 struct perf_probe_event pev; 38 struct perf_probe_event pev;
39 bool need_prologue; 39 bool need_prologue;
40 struct bpf_insn *insns_buf; 40 struct bpf_insn *insns_buf;
41 int nr_types;
42 int *type_mapping;
41}; 43};
42 44
43static bool libbpf_initialized; 45static bool libbpf_initialized;
@@ -113,6 +115,7 @@ bpf_prog_priv__clear(struct bpf_program *prog __maybe_unused,
113 115
114 cleanup_perf_probe_events(&priv->pev, 1); 116 cleanup_perf_probe_events(&priv->pev, 1);
115 zfree(&priv->insns_buf); 117 zfree(&priv->insns_buf);
118 zfree(&priv->type_mapping);
116 free(priv); 119 free(priv);
117} 120}
118 121
@@ -381,7 +384,7 @@ preproc_gen_prologue(struct bpf_program *prog, int n,
381 struct bpf_prog_priv *priv; 384 struct bpf_prog_priv *priv;
382 struct bpf_insn *buf; 385 struct bpf_insn *buf;
383 size_t prologue_cnt = 0; 386 size_t prologue_cnt = 0;
384 int err; 387 int i, err;
385 388
386 err = bpf_program__get_private(prog, (void **)&priv); 389 err = bpf_program__get_private(prog, (void **)&priv);
387 if (err || !priv) 390 if (err || !priv)
@@ -389,10 +392,21 @@ preproc_gen_prologue(struct bpf_program *prog, int n,
389 392
390 pev = &priv->pev; 393 pev = &priv->pev;
391 394
392 if (n < 0 || n >= pev->ntevs) 395 if (n < 0 || n >= priv->nr_types)
393 goto errout; 396 goto errout;
394 397
395 tev = &pev->tevs[n]; 398 /* Find a tev belongs to that type */
399 for (i = 0; i < pev->ntevs; i++) {
400 if (priv->type_mapping[i] == n)
401 break;
402 }
403
404 if (i >= pev->ntevs) {
405 pr_debug("Internal error: prologue type %d not found\n", n);
406 return -BPF_LOADER_ERRNO__PROLOGUE;
407 }
408
409 tev = &pev->tevs[i];
396 410
397 buf = priv->insns_buf; 411 buf = priv->insns_buf;
398 err = bpf__gen_prologue(tev->args, tev->nargs, 412 err = bpf__gen_prologue(tev->args, tev->nargs,
@@ -423,6 +437,101 @@ errout:
423 return -BPF_LOADER_ERRNO__PROLOGUE; 437 return -BPF_LOADER_ERRNO__PROLOGUE;
424} 438}
425 439
440/*
441 * compare_tev_args is reflexive, transitive and antisymmetric.
442 * I can proof it but this margin is too narrow to contain.
443 */
444static int compare_tev_args(const void *ptev1, const void *ptev2)
445{
446 int i, ret;
447 const struct probe_trace_event *tev1 =
448 *(const struct probe_trace_event **)ptev1;
449 const struct probe_trace_event *tev2 =
450 *(const struct probe_trace_event **)ptev2;
451
452 ret = tev2->nargs - tev1->nargs;
453 if (ret)
454 return ret;
455
456 for (i = 0; i < tev1->nargs; i++) {
457 struct probe_trace_arg *arg1, *arg2;
458 struct probe_trace_arg_ref *ref1, *ref2;
459
460 arg1 = &tev1->args[i];
461 arg2 = &tev2->args[i];
462
463 ret = strcmp(arg1->value, arg2->value);
464 if (ret)
465 return ret;
466
467 ref1 = arg1->ref;
468 ref2 = arg2->ref;
469
470 while (ref1 && ref2) {
471 ret = ref2->offset - ref1->offset;
472 if (ret)
473 return ret;
474
475 ref1 = ref1->next;
476 ref2 = ref2->next;
477 }
478
479 if (ref1 || ref2)
480 return ref2 ? 1 : -1;
481 }
482
483 return 0;
484}
485
486/*
487 * Assign a type number to each tevs in a pev.
488 * mapping is an array with same slots as tevs in that pev.
489 * nr_types will be set to number of types.
490 */
491static int map_prologue(struct perf_probe_event *pev, int *mapping,
492 int *nr_types)
493{
494 int i, type = 0;
495 struct probe_trace_event **ptevs;
496
497 size_t array_sz = sizeof(*ptevs) * pev->ntevs;
498
499 ptevs = malloc(array_sz);
500 if (!ptevs) {
501 pr_debug("No ehough memory: alloc ptevs failed\n");
502 return -ENOMEM;
503 }
504
505 pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs);
506 for (i = 0; i < pev->ntevs; i++)
507 ptevs[i] = &pev->tevs[i];
508
509 qsort(ptevs, pev->ntevs, sizeof(*ptevs),
510 compare_tev_args);
511
512 for (i = 0; i < pev->ntevs; i++) {
513 int n;
514
515 n = ptevs[i] - pev->tevs;
516 if (i == 0) {
517 mapping[n] = type;
518 pr_debug("mapping[%d]=%d\n", n, type);
519 continue;
520 }
521
522 if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0)
523 mapping[n] = type;
524 else
525 mapping[n] = ++type;
526
527 pr_debug("mapping[%d]=%d\n", n, mapping[n]);
528 }
529 free(ptevs);
530 *nr_types = type + 1;
531
532 return 0;
533}
534
426static int hook_load_preprocessor(struct bpf_program *prog) 535static int hook_load_preprocessor(struct bpf_program *prog)
427{ 536{
428 struct perf_probe_event *pev; 537 struct perf_probe_event *pev;
@@ -462,7 +571,19 @@ static int hook_load_preprocessor(struct bpf_program *prog)
462 return -ENOMEM; 571 return -ENOMEM;
463 } 572 }
464 573
465 err = bpf_program__set_prep(prog, pev->ntevs, 574 priv->type_mapping = malloc(sizeof(int) * pev->ntevs);
575 if (!priv->type_mapping) {
576 pr_debug("No enough memory: alloc type_mapping failed\n");
577 return -ENOMEM;
578 }
579 memset(priv->type_mapping, -1,
580 sizeof(int) * pev->ntevs);
581
582 err = map_prologue(pev, priv->type_mapping, &priv->nr_types);
583 if (err)
584 return err;
585
586 err = bpf_program__set_prep(prog, priv->nr_types,
466 preproc_gen_prologue); 587 preproc_gen_prologue);
467 return err; 588 return err;
468} 589}
@@ -596,10 +717,13 @@ int bpf__foreach_tev(struct bpf_object *obj,
596 for (i = 0; i < pev->ntevs; i++) { 717 for (i = 0; i < pev->ntevs; i++) {
597 tev = &pev->tevs[i]; 718 tev = &pev->tevs[i];
598 719
599 if (priv->need_prologue) 720 if (priv->need_prologue) {
600 fd = bpf_program__nth_fd(prog, i); 721 int type = priv->type_mapping[i];
601 else 722
723 fd = bpf_program__nth_fd(prog, type);
724 } else {
602 fd = bpf_program__fd(prog); 725 fd = bpf_program__fd(prog);
726 }
603 727
604 if (fd < 0) { 728 if (fd < 0) {
605 pr_debug("bpf: failed to get file descriptor\n"); 729 pr_debug("bpf: failed to get file descriptor\n");