diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-06-23 14:13:11 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-06-25 15:39:07 -0400 |
commit | 38b200d67636a30cb8dc1508137908e7a649b5c9 (patch) | |
tree | 74f88d57a7f55075d4cd0b46690ff4b319dca642 /kernel/perf_counter.c | |
parent | 194002b274e9169a04beb1b23dcc132159bb566c (diff) |
perf_counter: Add PERF_EVENT_READ
Provide a read() like event which can be used to log the
counter value at specific sites such as child->parent
folding on exit.
In order to be useful, we log the counter parent ID, not the
actual counter ID, since userspace can only relate parent
IDs to perf_counter_attr constructs.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 72 |
1 files changed, 68 insertions, 4 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 02994a719e27..a72c20e91953 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -2624,6 +2624,66 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2624 | } | 2624 | } |
2625 | 2625 | ||
2626 | /* | 2626 | /* |
2627 | * read event | ||
2628 | */ | ||
2629 | |||
2630 | struct perf_read_event { | ||
2631 | struct perf_event_header header; | ||
2632 | |||
2633 | u32 pid; | ||
2634 | u32 tid; | ||
2635 | u64 value; | ||
2636 | u64 format[3]; | ||
2637 | }; | ||
2638 | |||
2639 | static void | ||
2640 | perf_counter_read_event(struct perf_counter *counter, | ||
2641 | struct task_struct *task) | ||
2642 | { | ||
2643 | struct perf_output_handle handle; | ||
2644 | struct perf_read_event event = { | ||
2645 | .header = { | ||
2646 | .type = PERF_EVENT_READ, | ||
2647 | .misc = 0, | ||
2648 | .size = sizeof(event) - sizeof(event.format), | ||
2649 | }, | ||
2650 | .pid = perf_counter_pid(counter, task), | ||
2651 | .tid = perf_counter_tid(counter, task), | ||
2652 | .value = atomic64_read(&counter->count), | ||
2653 | }; | ||
2654 | int ret, i = 0; | ||
2655 | |||
2656 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | ||
2657 | event.header.size += sizeof(u64); | ||
2658 | event.format[i++] = counter->total_time_enabled; | ||
2659 | } | ||
2660 | |||
2661 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | ||
2662 | event.header.size += sizeof(u64); | ||
2663 | event.format[i++] = counter->total_time_running; | ||
2664 | } | ||
2665 | |||
2666 | if (counter->attr.read_format & PERF_FORMAT_ID) { | ||
2667 | u64 id; | ||
2668 | |||
2669 | event.header.size += sizeof(u64); | ||
2670 | if (counter->parent) | ||
2671 | id = counter->parent->id; | ||
2672 | else | ||
2673 | id = counter->id; | ||
2674 | |||
2675 | event.format[i++] = id; | ||
2676 | } | ||
2677 | |||
2678 | ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); | ||
2679 | if (ret) | ||
2680 | return; | ||
2681 | |||
2682 | perf_output_copy(&handle, &event, event.header.size); | ||
2683 | perf_output_end(&handle); | ||
2684 | } | ||
2685 | |||
2686 | /* | ||
2627 | * fork tracking | 2687 | * fork tracking |
2628 | */ | 2688 | */ |
2629 | 2689 | ||
@@ -3985,10 +4045,13 @@ static int inherit_group(struct perf_counter *parent_counter, | |||
3985 | } | 4045 | } |
3986 | 4046 | ||
3987 | static void sync_child_counter(struct perf_counter *child_counter, | 4047 | static void sync_child_counter(struct perf_counter *child_counter, |
3988 | struct perf_counter *parent_counter) | 4048 | struct task_struct *child) |
3989 | { | 4049 | { |
4050 | struct perf_counter *parent_counter = child_counter->parent; | ||
3990 | u64 child_val; | 4051 | u64 child_val; |
3991 | 4052 | ||
4053 | perf_counter_read_event(child_counter, child); | ||
4054 | |||
3992 | child_val = atomic64_read(&child_counter->count); | 4055 | child_val = atomic64_read(&child_counter->count); |
3993 | 4056 | ||
3994 | /* | 4057 | /* |
@@ -4017,7 +4080,8 @@ static void sync_child_counter(struct perf_counter *child_counter, | |||
4017 | 4080 | ||
4018 | static void | 4081 | static void |
4019 | __perf_counter_exit_task(struct perf_counter *child_counter, | 4082 | __perf_counter_exit_task(struct perf_counter *child_counter, |
4020 | struct perf_counter_context *child_ctx) | 4083 | struct perf_counter_context *child_ctx, |
4084 | struct task_struct *child) | ||
4021 | { | 4085 | { |
4022 | struct perf_counter *parent_counter; | 4086 | struct perf_counter *parent_counter; |
4023 | 4087 | ||
@@ -4031,7 +4095,7 @@ __perf_counter_exit_task(struct perf_counter *child_counter, | |||
4031 | * counters need to be zapped - but otherwise linger. | 4095 | * counters need to be zapped - but otherwise linger. |
4032 | */ | 4096 | */ |
4033 | if (parent_counter) { | 4097 | if (parent_counter) { |
4034 | sync_child_counter(child_counter, parent_counter); | 4098 | sync_child_counter(child_counter, child); |
4035 | free_counter(child_counter); | 4099 | free_counter(child_counter); |
4036 | } | 4100 | } |
4037 | } | 4101 | } |
@@ -4093,7 +4157,7 @@ void perf_counter_exit_task(struct task_struct *child) | |||
4093 | again: | 4157 | again: |
4094 | list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list, | 4158 | list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list, |
4095 | list_entry) | 4159 | list_entry) |
4096 | __perf_counter_exit_task(child_counter, child_ctx); | 4160 | __perf_counter_exit_task(child_counter, child_ctx, child); |
4097 | 4161 | ||
4098 | /* | 4162 | /* |
4099 | * If the last counter was a group counter, it will have appended all | 4163 | * If the last counter was a group counter, it will have appended all |