diff options
author | Kaixu Xia <xiakaixu@huawei.com> | 2015-08-06 03:02:35 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-08-10 01:50:06 -0400 |
commit | 35578d7984003097af2b1e34502bc943d40c1804 (patch) | |
tree | b2eca5ddc9446e771dd5a9e1629b12f98b9f2bf0 /kernel/trace | |
parent | ea317b267e9d03a8241893aa176fba7661d07579 (diff) |
bpf: Implement function bpf_perf_event_read() that get the selected hardware PMU conuter
According to the perf_event_map_fd and index, the function
bpf_perf_event_read() can convert the corresponding map
value to the pointer to struct perf_event and return the
Hardware PMU counter value.
Signed-off-by: Kaixu Xia <xiakaixu@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/bpf_trace.c | 31 |
1 files changed, 31 insertions, 0 deletions
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 88a041adee90..ef9936df1b04 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c | |||
@@ -158,6 +158,35 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void) | |||
158 | return &bpf_trace_printk_proto; | 158 | return &bpf_trace_printk_proto; |
159 | } | 159 | } |
160 | 160 | ||
161 | static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5) | ||
162 | { | ||
163 | struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; | ||
164 | struct bpf_array *array = container_of(map, struct bpf_array, map); | ||
165 | struct perf_event *event; | ||
166 | |||
167 | if (unlikely(index >= array->map.max_entries)) | ||
168 | return -E2BIG; | ||
169 | |||
170 | event = (struct perf_event *)array->ptrs[index]; | ||
171 | if (!event) | ||
172 | return -ENOENT; | ||
173 | |||
174 | /* | ||
175 | * we don't know if the function is run successfully by the | ||
176 | * return value. It can be judged in other places, such as | ||
177 | * eBPF programs. | ||
178 | */ | ||
179 | return perf_event_read_local(event); | ||
180 | } | ||
181 | |||
182 | const struct bpf_func_proto bpf_perf_event_read_proto = { | ||
183 | .func = bpf_perf_event_read, | ||
184 | .gpl_only = false, | ||
185 | .ret_type = RET_INTEGER, | ||
186 | .arg1_type = ARG_CONST_MAP_PTR, | ||
187 | .arg2_type = ARG_ANYTHING, | ||
188 | }; | ||
189 | |||
161 | static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id) | 190 | static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id) |
162 | { | 191 | { |
163 | switch (func_id) { | 192 | switch (func_id) { |
@@ -183,6 +212,8 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func | |||
183 | return bpf_get_trace_printk_proto(); | 212 | return bpf_get_trace_printk_proto(); |
184 | case BPF_FUNC_get_smp_processor_id: | 213 | case BPF_FUNC_get_smp_processor_id: |
185 | return &bpf_get_smp_processor_id_proto; | 214 | return &bpf_get_smp_processor_id_proto; |
215 | case BPF_FUNC_perf_event_read: | ||
216 | return &bpf_perf_event_read_proto; | ||
186 | default: | 217 | default: |
187 | return NULL; | 218 | return NULL; |
188 | } | 219 | } |