diff options
author | Kaixu Xia <xiakaixu@huawei.com> | 2015-08-06 03:02:32 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-08-10 01:50:05 -0400 |
commit | ffe8690c85b8426db7783064724d106702f1b1e8 (patch) | |
tree | baf9499372ba05151d168ecf4aff23591ab981ba /kernel/events | |
parent | f1d5ca434413b20cd3f8c18ff2b634b7782149a5 (diff) |
perf: add the necessary core perf APIs when accessing events counters in eBPF programs
This patch add three core perf APIs:
- perf_event_attrs(): export the struct perf_event_attr from struct
perf_event;
- perf_event_get(): get the struct perf_event from the given fd;
- perf_event_read_local(): read the events counters active on the
current CPU;
These APIs are needed when accessing events counters in eBPF programs.
The API perf_event_read_local() comes from Peter and I add the
corresponding SOB.
Signed-off-by: Kaixu Xia <xiakaixu@huawei.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/events')
-rw-r--r-- | kernel/events/core.c | 78 |
1 files changed, 78 insertions, 0 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index d3dae3419b99..e2c6a8886d4d 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -3212,6 +3212,59 @@ static inline u64 perf_event_count(struct perf_event *event) | |||
3212 | return __perf_event_count(event); | 3212 | return __perf_event_count(event); |
3213 | } | 3213 | } |
3214 | 3214 | ||
3215 | /* | ||
3216 | * NMI-safe method to read a local event, that is an event that | ||
3217 | * is: | ||
3218 | * - either for the current task, or for this CPU | ||
3219 | * - does not have inherit set, for inherited task events | ||
3220 | * will not be local and we cannot read them atomically | ||
3221 | * - must not have a pmu::count method | ||
3222 | */ | ||
3223 | u64 perf_event_read_local(struct perf_event *event) | ||
3224 | { | ||
3225 | unsigned long flags; | ||
3226 | u64 val; | ||
3227 | |||
3228 | /* | ||
3229 | * Disabling interrupts avoids all counter scheduling (context | ||
3230 | * switches, timer based rotation and IPIs). | ||
3231 | */ | ||
3232 | local_irq_save(flags); | ||
3233 | |||
3234 | /* If this is a per-task event, it must be for current */ | ||
3235 | WARN_ON_ONCE((event->attach_state & PERF_ATTACH_TASK) && | ||
3236 | event->hw.target != current); | ||
3237 | |||
3238 | /* If this is a per-CPU event, it must be for this CPU */ | ||
3239 | WARN_ON_ONCE(!(event->attach_state & PERF_ATTACH_TASK) && | ||
3240 | event->cpu != smp_processor_id()); | ||
3241 | |||
3242 | /* | ||
3243 | * It must not be an event with inherit set, we cannot read | ||
3244 | * all child counters from atomic context. | ||
3245 | */ | ||
3246 | WARN_ON_ONCE(event->attr.inherit); | ||
3247 | |||
3248 | /* | ||
3249 | * It must not have a pmu::count method, those are not | ||
3250 | * NMI safe. | ||
3251 | */ | ||
3252 | WARN_ON_ONCE(event->pmu->count); | ||
3253 | |||
3254 | /* | ||
3255 | * If the event is currently on this CPU, its either a per-task event, | ||
3256 | * or local to this CPU. Furthermore it means its ACTIVE (otherwise | ||
3257 | * oncpu == -1). | ||
3258 | */ | ||
3259 | if (event->oncpu == smp_processor_id()) | ||
3260 | event->pmu->read(event); | ||
3261 | |||
3262 | val = local64_read(&event->count); | ||
3263 | local_irq_restore(flags); | ||
3264 | |||
3265 | return val; | ||
3266 | } | ||
3267 | |||
3215 | static u64 perf_event_read(struct perf_event *event) | 3268 | static u64 perf_event_read(struct perf_event *event) |
3216 | { | 3269 | { |
3217 | /* | 3270 | /* |
@@ -8574,6 +8627,31 @@ void perf_event_delayed_put(struct task_struct *task) | |||
8574 | WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); | 8627 | WARN_ON_ONCE(task->perf_event_ctxp[ctxn]); |
8575 | } | 8628 | } |
8576 | 8629 | ||
8630 | struct perf_event *perf_event_get(unsigned int fd) | ||
8631 | { | ||
8632 | int err; | ||
8633 | struct fd f; | ||
8634 | struct perf_event *event; | ||
8635 | |||
8636 | err = perf_fget_light(fd, &f); | ||
8637 | if (err) | ||
8638 | return ERR_PTR(err); | ||
8639 | |||
8640 | event = f.file->private_data; | ||
8641 | atomic_long_inc(&event->refcount); | ||
8642 | fdput(f); | ||
8643 | |||
8644 | return event; | ||
8645 | } | ||
8646 | |||
8647 | const struct perf_event_attr *perf_event_attrs(struct perf_event *event) | ||
8648 | { | ||
8649 | if (!event) | ||
8650 | return ERR_PTR(-EINVAL); | ||
8651 | |||
8652 | return &event->attr; | ||
8653 | } | ||
8654 | |||
8577 | /* | 8655 | /* |
8578 | * inherit a event from parent task to child task: | 8656 | * inherit a event from parent task to child task: |
8579 | */ | 8657 | */ |