aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util/bpf-loader.c
diff options
context:
space:
mode:
authorWang Nan <wangnan0@huawei.com>2016-02-22 04:10:32 -0500
committerArnaldo Carvalho de Melo <acme@redhat.com>2016-02-22 10:28:02 -0500
commit8690a2a773703e4ad2a07a7f3912ea6b131307cc (patch)
treece027362189bf92d61b7b1dfd072a26dfe055ac7 /tools/perf/util/bpf-loader.c
parenta34f3be70cdf986850552e62b9f22d659bfbcef3 (diff)
perf record: Apply config to BPF objects before recording
bpf__apply_obj_config() is introduced as the core API to apply object config options to all BPF objects. This patch also does the real work for setting values for BPF_MAP_TYPE_PERF_ARRAY maps by inserting value stored in map's private field into the BPF map. This patch is required because we are not always able to set all BPF config during parsing. Further patch will set events created by perf to BPF_MAP_TYPE_PERF_EVENT_ARRAY maps, which is not exist until perf_evsel__open(). bpf_map_foreach_key() is introduced to iterate over each key needs to be configured. This function would be extended to support more map types and different key settings. In perf record, before start recording, call bpf__apply_config() to turn on all BPF config options. Test result: # cat ./test_bpf_map_1.c /************************ BEGIN **************************/ #include <uapi/linux/bpf.h> #define SEC(NAME) __attribute__((section(NAME), used)) struct bpf_map_def { unsigned int type; unsigned int key_size; unsigned int value_size; unsigned int max_entries; }; static void *(*map_lookup_elem)(struct bpf_map_def *, void *) = (void *)BPF_FUNC_map_lookup_elem; static int (*trace_printk)(const char *fmt, int fmt_size, ...) = (void *)BPF_FUNC_trace_printk; struct bpf_map_def SEC("maps") channel = { .type = BPF_MAP_TYPE_ARRAY, .key_size = sizeof(int), .value_size = sizeof(int), .max_entries = 1, }; SEC("func=sys_nanosleep") int func(void *ctx) { int key = 0; char fmt[] = "%d\n"; int *pval = map_lookup_elem(&channel, &key); if (!pval) return 0; trace_printk(fmt, sizeof(fmt), *pval); return 0; } char _license[] SEC("license") = "GPL"; int _version SEC("version") = LINUX_VERSION_CODE; /************************* END ***************************/ # echo "" > /sys/kernel/debug/tracing/trace # ./perf record -e './test_bpf_map_1.c/map:channel.value=11/' usleep 10 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.012 MB perf.data ] # cat /sys/kernel/debug/tracing/trace # tracer: nop # # entries-in-buffer/entries-written: 1/1 #P:8 [SNIP] # TASK-PID CPU# |||| TIMESTAMP FUNCTION # | | | |||| | | usleep-18593 [007] d... 2394714.395539: : 11 # ./perf record -e './test_bpf_map_1.c/map:channel.value=101/' usleep 10 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.012 MB perf.data ] # cat /sys/kernel/debug/tracing/trace # tracer: nop # # entries-in-buffer/entries-written: 1/1 #P:8 [SNIP] # TASK-PID CPU# |||| TIMESTAMP FUNCTION # | | | |||| | | usleep-18593 [007] d... 2394714.395539: : 11 usleep-19000 [006] d... 2394831.057840: : 101 Signed-off-by: Wang Nan <wangnan0@huawei.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Brendan Gregg <brendan.d.gregg@gmail.com> Cc: Cody P Schafer <dev@codyps.com> Cc: He Kuang <hekuang@huawei.com> Cc: Jeremie Galarneau <jeremie.galarneau@efficios.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kirill Smelkov <kirr@nexedi.com> Cc: Li Zefan <lizefan@huawei.com> Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Zefan Li <lizefan@huawei.com> Cc: pi3orama@163.com Link: http://lkml.kernel.org/r/1456132275-98875-6-git-send-email-wangnan0@huawei.com Signed-off-by: He Kuang <hekuang@huawei.com> Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/util/bpf-loader.c')
-rw-r--r--tools/perf/util/bpf-loader.c184
1 files changed, 184 insertions, 0 deletions
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index caeef9ec0124..dbbd17ca6d6f 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -7,6 +7,7 @@
7 7
8#include <linux/bpf.h> 8#include <linux/bpf.h>
9#include <bpf/libbpf.h> 9#include <bpf/libbpf.h>
10#include <bpf/bpf.h>
10#include <linux/err.h> 11#include <linux/err.h>
11#include <linux/string.h> 12#include <linux/string.h>
12#include "perf.h" 13#include "perf.h"
@@ -994,6 +995,182 @@ out:
994 995
995} 996}
996 997
998typedef int (*map_config_func_t)(const char *name, int map_fd,
999 struct bpf_map_def *pdef,
1000 struct bpf_map_op *op,
1001 void *pkey, void *arg);
1002
1003static int
1004foreach_key_array_all(map_config_func_t func,
1005 void *arg, const char *name,
1006 int map_fd, struct bpf_map_def *pdef,
1007 struct bpf_map_op *op)
1008{
1009 unsigned int i;
1010 int err;
1011
1012 for (i = 0; i < pdef->max_entries; i++) {
1013 err = func(name, map_fd, pdef, op, &i, arg);
1014 if (err) {
1015 pr_debug("ERROR: failed to insert value to %s[%u]\n",
1016 name, i);
1017 return err;
1018 }
1019 }
1020 return 0;
1021}
1022
1023static int
1024bpf_map_config_foreach_key(struct bpf_map *map,
1025 map_config_func_t func,
1026 void *arg)
1027{
1028 int err, map_fd;
1029 const char *name;
1030 struct bpf_map_op *op;
1031 struct bpf_map_def def;
1032 struct bpf_map_priv *priv;
1033
1034 name = bpf_map__get_name(map);
1035
1036 err = bpf_map__get_private(map, (void **)&priv);
1037 if (err) {
1038 pr_debug("ERROR: failed to get private from map %s\n", name);
1039 return -BPF_LOADER_ERRNO__INTERNAL;
1040 }
1041 if (!priv || list_empty(&priv->ops_list)) {
1042 pr_debug("INFO: nothing to config for map %s\n", name);
1043 return 0;
1044 }
1045
1046 err = bpf_map__get_def(map, &def);
1047 if (err) {
1048 pr_debug("ERROR: failed to get definition from map %s\n", name);
1049 return -BPF_LOADER_ERRNO__INTERNAL;
1050 }
1051 map_fd = bpf_map__get_fd(map);
1052 if (map_fd < 0) {
1053 pr_debug("ERROR: failed to get fd from map %s\n", name);
1054 return map_fd;
1055 }
1056
1057 list_for_each_entry(op, &priv->ops_list, list) {
1058 switch (def.type) {
1059 case BPF_MAP_TYPE_ARRAY:
1060 switch (op->key_type) {
1061 case BPF_MAP_KEY_ALL:
1062 err = foreach_key_array_all(func, arg, name,
1063 map_fd, &def, op);
1064 if (err)
1065 return err;
1066 break;
1067 default:
1068 pr_debug("ERROR: keytype for map '%s' invalid\n",
1069 name);
1070 return -BPF_LOADER_ERRNO__INTERNAL;
1071 }
1072 break;
1073 default:
1074 pr_debug("ERROR: type of '%s' incorrect\n", name);
1075 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1076 }
1077 }
1078
1079 return 0;
1080}
1081
1082static int
1083apply_config_value_for_key(int map_fd, void *pkey,
1084 size_t val_size, u64 val)
1085{
1086 int err = 0;
1087
1088 switch (val_size) {
1089 case 1: {
1090 u8 _val = (u8)(val);
1091 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1092 break;
1093 }
1094 case 2: {
1095 u16 _val = (u16)(val);
1096 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1097 break;
1098 }
1099 case 4: {
1100 u32 _val = (u32)(val);
1101 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1102 break;
1103 }
1104 case 8: {
1105 err = bpf_map_update_elem(map_fd, pkey, &val, BPF_ANY);
1106 break;
1107 }
1108 default:
1109 pr_debug("ERROR: invalid value size\n");
1110 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1111 }
1112 if (err && errno)
1113 err = -errno;
1114 return err;
1115}
1116
1117static int
1118apply_obj_config_map_for_key(const char *name, int map_fd,
1119 struct bpf_map_def *pdef __maybe_unused,
1120 struct bpf_map_op *op,
1121 void *pkey, void *arg __maybe_unused)
1122{
1123 int err;
1124
1125 switch (op->op_type) {
1126 case BPF_MAP_OP_SET_VALUE:
1127 err = apply_config_value_for_key(map_fd, pkey,
1128 pdef->value_size,
1129 op->v.value);
1130 break;
1131 default:
1132 pr_debug("ERROR: unknown value type for '%s'\n", name);
1133 err = -BPF_LOADER_ERRNO__INTERNAL;
1134 }
1135 return err;
1136}
1137
1138static int
1139apply_obj_config_map(struct bpf_map *map)
1140{
1141 return bpf_map_config_foreach_key(map,
1142 apply_obj_config_map_for_key,
1143 NULL);
1144}
1145
1146static int
1147apply_obj_config_object(struct bpf_object *obj)
1148{
1149 struct bpf_map *map;
1150 int err;
1151
1152 bpf_map__for_each(map, obj) {
1153 err = apply_obj_config_map(map);
1154 if (err)
1155 return err;
1156 }
1157 return 0;
1158}
1159
1160int bpf__apply_obj_config(void)
1161{
1162 struct bpf_object *obj, *tmp;
1163 int err;
1164
1165 bpf_object__for_each_safe(obj, tmp) {
1166 err = apply_obj_config_object(obj);
1167 if (err)
1168 return err;
1169 }
1170
1171 return 0;
1172}
1173
997#define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START) 1174#define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START)
998#define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c) 1175#define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
999#define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START) 1176#define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
@@ -1148,3 +1325,10 @@ int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused,
1148 bpf__strerror_end(buf, size); 1325 bpf__strerror_end(buf, size);
1149 return 0; 1326 return 0;
1150} 1327}
1328
1329int bpf__strerror_apply_obj_config(int err, char *buf, size_t size)
1330{
1331 bpf__strerror_head(err, buf, size);
1332 bpf__strerror_end(buf, size);
1333 return 0;
1334}