diff options
author | Wang Nan <wangnan0@huawei.com> | 2016-04-08 11:07:24 -0400 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2016-04-11 21:17:45 -0400 |
commit | d78885739a7df111dc7b081f8a09e08a5fcfecc2 (patch) | |
tree | b1bf3df9c925992b7e2192835d62c3dedfaa7e9d /tools | |
parent | f9383452a26fc47f62c4ddcfa20ccebb7a09c2d8 (diff) |
perf bpf: Clone bpf stdout events in multiple bpf scripts
This patch allows cloning bpf-output event configuration among multiple
bpf scripts. If there exist a map named '__bpf_output__' and not
configured using 'map:__bpf_output__.event=', this patch clones the
configuration of another '__bpf_stdout__' map. For example, following
command:
# perf trace --ev bpf-output/no-inherit,name=evt/ \
--ev ./test_bpf_trace.c/map:__bpf_stdout__.event=evt/ \
--ev ./test_bpf_trace2.c usleep 100000
equals to:
# perf trace --ev bpf-output/no-inherit,name=evt/ \
--ev ./test_bpf_trace.c/map:__bpf_stdout__.event=evt/ \
--ev ./test_bpf_trace2.c/map:__bpf_stdout__.event=evt/ \
usleep 100000
Signed-off-by: Wang Nan <wangnan0@huawei.com>
Suggested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Zefan Li <lizefan@huawei.com>
Cc: pi3orama@163.com
Link: http://lkml.kernel.org/r/1460128045-97310-4-git-send-email-wangnan0@huawei.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools')
-rw-r--r-- | tools/perf/builtin-record.c | 8 | ||||
-rw-r--r-- | tools/perf/builtin-trace.c | 7 | ||||
-rw-r--r-- | tools/perf/util/bpf-loader.c | 124 | ||||
-rw-r--r-- | tools/perf/util/bpf-loader.h | 19 |
4 files changed, 158 insertions, 0 deletions
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 410035c6e300..e64bd1ee5acb 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c | |||
@@ -1276,6 +1276,14 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused) | |||
1276 | if (err) | 1276 | if (err) |
1277 | return err; | 1277 | return err; |
1278 | 1278 | ||
1279 | err = bpf__setup_stdout(rec->evlist); | ||
1280 | if (err) { | ||
1281 | bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf)); | ||
1282 | pr_err("ERROR: Setup BPF stdout failed: %s\n", | ||
1283 | errbuf); | ||
1284 | return err; | ||
1285 | } | ||
1286 | |||
1279 | err = -ENOMEM; | 1287 | err = -ENOMEM; |
1280 | 1288 | ||
1281 | symbol__init(NULL); | 1289 | symbol__init(NULL); |
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 11290b57ce04..27d987030627 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c | |||
@@ -3273,6 +3273,13 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused) | |||
3273 | argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands, | 3273 | argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands, |
3274 | trace_usage, PARSE_OPT_STOP_AT_NON_OPTION); | 3274 | trace_usage, PARSE_OPT_STOP_AT_NON_OPTION); |
3275 | 3275 | ||
3276 | err = bpf__setup_stdout(trace.evlist); | ||
3277 | if (err) { | ||
3278 | bpf__strerror_setup_stdout(trace.evlist, err, bf, sizeof(bf)); | ||
3279 | pr_err("ERROR: Setup BPF stdout failed: %s\n", bf); | ||
3280 | goto out; | ||
3281 | } | ||
3282 | |||
3276 | if (trace.trace_pgfaults) { | 3283 | if (trace.trace_pgfaults) { |
3277 | trace.opts.sample_address = true; | 3284 | trace.opts.sample_address = true; |
3278 | trace.opts.sample_time = true; | 3285 | trace.opts.sample_time = true; |
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c index 0967ce601931..67f61a902a08 100644 --- a/tools/perf/util/bpf-loader.c +++ b/tools/perf/util/bpf-loader.c | |||
@@ -842,6 +842,58 @@ bpf_map_op__new(struct parse_events_term *term) | |||
842 | return op; | 842 | return op; |
843 | } | 843 | } |
844 | 844 | ||
845 | static struct bpf_map_op * | ||
846 | bpf_map_op__clone(struct bpf_map_op *op) | ||
847 | { | ||
848 | struct bpf_map_op *newop; | ||
849 | |||
850 | newop = memdup(op, sizeof(*op)); | ||
851 | if (!newop) { | ||
852 | pr_debug("Failed to alloc bpf_map_op\n"); | ||
853 | return NULL; | ||
854 | } | ||
855 | |||
856 | INIT_LIST_HEAD(&newop->list); | ||
857 | if (op->key_type == BPF_MAP_KEY_RANGES) { | ||
858 | size_t memsz = op->k.array.nr_ranges * | ||
859 | sizeof(op->k.array.ranges[0]); | ||
860 | |||
861 | newop->k.array.ranges = memdup(op->k.array.ranges, memsz); | ||
862 | if (!newop->k.array.ranges) { | ||
863 | pr_debug("Failed to alloc indices for map\n"); | ||
864 | free(newop); | ||
865 | return NULL; | ||
866 | } | ||
867 | } | ||
868 | |||
869 | return newop; | ||
870 | } | ||
871 | |||
872 | static struct bpf_map_priv * | ||
873 | bpf_map_priv__clone(struct bpf_map_priv *priv) | ||
874 | { | ||
875 | struct bpf_map_priv *newpriv; | ||
876 | struct bpf_map_op *pos, *newop; | ||
877 | |||
878 | newpriv = zalloc(sizeof(*newpriv)); | ||
879 | if (!newpriv) { | ||
880 | pr_debug("No enough memory to alloc map private\n"); | ||
881 | return NULL; | ||
882 | } | ||
883 | INIT_LIST_HEAD(&newpriv->ops_list); | ||
884 | |||
885 | list_for_each_entry(pos, &priv->ops_list, list) { | ||
886 | newop = bpf_map_op__clone(pos); | ||
887 | if (!newop) { | ||
888 | bpf_map_priv__purge(newpriv); | ||
889 | return NULL; | ||
890 | } | ||
891 | list_add_tail(&newop->list, &newpriv->ops_list); | ||
892 | } | ||
893 | |||
894 | return newpriv; | ||
895 | } | ||
896 | |||
845 | static int | 897 | static int |
846 | bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op) | 898 | bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op) |
847 | { | 899 | { |
@@ -1417,6 +1469,70 @@ int bpf__apply_obj_config(void) | |||
1417 | return 0; | 1469 | return 0; |
1418 | } | 1470 | } |
1419 | 1471 | ||
1472 | #define bpf__for_each_map(pos, obj, objtmp) \ | ||
1473 | bpf_object__for_each_safe(obj, objtmp) \ | ||
1474 | bpf_map__for_each(pos, obj) | ||
1475 | |||
1476 | #define bpf__for_each_stdout_map(pos, obj, objtmp) \ | ||
1477 | bpf__for_each_map(pos, obj, objtmp) \ | ||
1478 | if (bpf_map__get_name(pos) && \ | ||
1479 | (strcmp("__bpf_stdout__", \ | ||
1480 | bpf_map__get_name(pos)) == 0)) | ||
1481 | |||
1482 | int bpf__setup_stdout(struct perf_evlist *evlist __maybe_unused) | ||
1483 | { | ||
1484 | struct bpf_map_priv *tmpl_priv = NULL; | ||
1485 | struct bpf_object *obj, *tmp; | ||
1486 | struct bpf_map *map; | ||
1487 | int err; | ||
1488 | bool need_init = false; | ||
1489 | |||
1490 | bpf__for_each_stdout_map(map, obj, tmp) { | ||
1491 | struct bpf_map_priv *priv; | ||
1492 | |||
1493 | err = bpf_map__get_private(map, (void **)&priv); | ||
1494 | if (err) | ||
1495 | return -BPF_LOADER_ERRNO__INTERNAL; | ||
1496 | |||
1497 | /* | ||
1498 | * No need to check map type: type should have been | ||
1499 | * verified by kernel. | ||
1500 | */ | ||
1501 | if (!need_init && !priv) | ||
1502 | need_init = !priv; | ||
1503 | if (!tmpl_priv && priv) | ||
1504 | tmpl_priv = priv; | ||
1505 | } | ||
1506 | |||
1507 | if (!need_init) | ||
1508 | return 0; | ||
1509 | |||
1510 | if (!tmpl_priv) | ||
1511 | return 0; | ||
1512 | |||
1513 | bpf__for_each_stdout_map(map, obj, tmp) { | ||
1514 | struct bpf_map_priv *priv; | ||
1515 | |||
1516 | err = bpf_map__get_private(map, (void **)&priv); | ||
1517 | if (err) | ||
1518 | return -BPF_LOADER_ERRNO__INTERNAL; | ||
1519 | if (priv) | ||
1520 | continue; | ||
1521 | |||
1522 | priv = bpf_map_priv__clone(tmpl_priv); | ||
1523 | if (!priv) | ||
1524 | return -ENOMEM; | ||
1525 | |||
1526 | err = bpf_map__set_private(map, priv, bpf_map_priv__clear); | ||
1527 | if (err) { | ||
1528 | bpf_map_priv__clear(map, priv); | ||
1529 | return err; | ||
1530 | } | ||
1531 | } | ||
1532 | |||
1533 | return 0; | ||
1534 | } | ||
1535 | |||
1420 | #define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START) | 1536 | #define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START) |
1421 | #define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c) | 1537 | #define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c) |
1422 | #define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START) | 1538 | #define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START) |
@@ -1590,3 +1706,11 @@ int bpf__strerror_apply_obj_config(int err, char *buf, size_t size) | |||
1590 | bpf__strerror_end(buf, size); | 1706 | bpf__strerror_end(buf, size); |
1591 | return 0; | 1707 | return 0; |
1592 | } | 1708 | } |
1709 | |||
1710 | int bpf__strerror_setup_stdout(struct perf_evlist *evlist __maybe_unused, | ||
1711 | int err, char *buf, size_t size) | ||
1712 | { | ||
1713 | bpf__strerror_head(err, buf, size); | ||
1714 | bpf__strerror_end(buf, size); | ||
1715 | return 0; | ||
1716 | } | ||
diff --git a/tools/perf/util/bpf-loader.h b/tools/perf/util/bpf-loader.h index be4311944e3d..941e17275aa7 100644 --- a/tools/perf/util/bpf-loader.h +++ b/tools/perf/util/bpf-loader.h | |||
@@ -79,6 +79,11 @@ int bpf__strerror_config_obj(struct bpf_object *obj, | |||
79 | size_t size); | 79 | size_t size); |
80 | int bpf__apply_obj_config(void); | 80 | int bpf__apply_obj_config(void); |
81 | int bpf__strerror_apply_obj_config(int err, char *buf, size_t size); | 81 | int bpf__strerror_apply_obj_config(int err, char *buf, size_t size); |
82 | |||
83 | int bpf__setup_stdout(struct perf_evlist *evlist); | ||
84 | int bpf__strerror_setup_stdout(struct perf_evlist *evlist, int err, | ||
85 | char *buf, size_t size); | ||
86 | |||
82 | #else | 87 | #else |
83 | static inline struct bpf_object * | 88 | static inline struct bpf_object * |
84 | bpf__prepare_load(const char *filename __maybe_unused, | 89 | bpf__prepare_load(const char *filename __maybe_unused, |
@@ -125,6 +130,12 @@ bpf__apply_obj_config(void) | |||
125 | } | 130 | } |
126 | 131 | ||
127 | static inline int | 132 | static inline int |
133 | bpf__setup_stdout(struct perf_evlist *evlist __maybe_unused) | ||
134 | { | ||
135 | return 0; | ||
136 | } | ||
137 | |||
138 | static inline int | ||
128 | __bpf_strerror(char *buf, size_t size) | 139 | __bpf_strerror(char *buf, size_t size) |
129 | { | 140 | { |
130 | if (!size) | 141 | if (!size) |
@@ -177,5 +188,13 @@ bpf__strerror_apply_obj_config(int err __maybe_unused, | |||
177 | { | 188 | { |
178 | return __bpf_strerror(buf, size); | 189 | return __bpf_strerror(buf, size); |
179 | } | 190 | } |
191 | |||
192 | static inline int | ||
193 | bpf__strerror_setup_stdout(struct perf_evlist *evlist __maybe_unused, | ||
194 | int err __maybe_unused, char *buf, | ||
195 | size_t size) | ||
196 | { | ||
197 | return __bpf_strerror(buf, size); | ||
198 | } | ||
180 | #endif | 199 | #endif |
181 | #endif | 200 | #endif |