diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-06-05 09:05:43 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-06-05 12:07:47 -0400 |
commit | 689802b2d0536e72281dc959ab9cb34fb3c304cf (patch) | |
tree | 9be866e918391e86eaecd8a05124f86693504d3f /kernel | |
parent | ac4bcf889469ffbca88f234d3184452886a47905 (diff) |
perf_counter: Add PERF_SAMPLE_PERIOD
In order to allow easy tracking of the period, also provide means of
adding it to the sample data.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_counter.c | 10 |
1 files changed, 10 insertions, 0 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index e75b91a76a58..f8390668c391 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -2404,6 +2404,11 @@ static void perf_counter_output(struct perf_counter *counter, | |||
2404 | cpu_entry.cpu = raw_smp_processor_id(); | 2404 | cpu_entry.cpu = raw_smp_processor_id(); |
2405 | } | 2405 | } |
2406 | 2406 | ||
2407 | if (sample_type & PERF_SAMPLE_PERIOD) { | ||
2408 | header.type |= PERF_SAMPLE_PERIOD; | ||
2409 | header.size += sizeof(u64); | ||
2410 | } | ||
2411 | |||
2407 | if (sample_type & PERF_SAMPLE_GROUP) { | 2412 | if (sample_type & PERF_SAMPLE_GROUP) { |
2408 | header.type |= PERF_SAMPLE_GROUP; | 2413 | header.type |= PERF_SAMPLE_GROUP; |
2409 | header.size += sizeof(u64) + | 2414 | header.size += sizeof(u64) + |
@@ -2445,6 +2450,9 @@ static void perf_counter_output(struct perf_counter *counter, | |||
2445 | if (sample_type & PERF_SAMPLE_CPU) | 2450 | if (sample_type & PERF_SAMPLE_CPU) |
2446 | perf_output_put(&handle, cpu_entry); | 2451 | perf_output_put(&handle, cpu_entry); |
2447 | 2452 | ||
2453 | if (sample_type & PERF_SAMPLE_PERIOD) | ||
2454 | perf_output_put(&handle, counter->hw.sample_period); | ||
2455 | |||
2448 | /* | 2456 | /* |
2449 | * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult. | 2457 | * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult. |
2450 | */ | 2458 | */ |
@@ -2835,6 +2843,7 @@ static void perf_log_period(struct perf_counter *counter, u64 period) | |||
2835 | struct { | 2843 | struct { |
2836 | struct perf_event_header header; | 2844 | struct perf_event_header header; |
2837 | u64 time; | 2845 | u64 time; |
2846 | u64 id; | ||
2838 | u64 period; | 2847 | u64 period; |
2839 | } freq_event = { | 2848 | } freq_event = { |
2840 | .header = { | 2849 | .header = { |
@@ -2843,6 +2852,7 @@ static void perf_log_period(struct perf_counter *counter, u64 period) | |||
2843 | .size = sizeof(freq_event), | 2852 | .size = sizeof(freq_event), |
2844 | }, | 2853 | }, |
2845 | .time = sched_clock(), | 2854 | .time = sched_clock(), |
2855 | .id = counter->id, | ||
2846 | .period = period, | 2856 | .period = period, |
2847 | }; | 2857 | }; |
2848 | 2858 | ||