diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-05-05 11:50:26 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-05-05 14:18:33 -0400 |
commit | 2023b359214bbc5bad31571cf50d7fb83b535c0a (patch) | |
tree | 991ca078cd79b883cd09da2b349055e136e13a0c /kernel/perf_counter.c | |
parent | 22c1558e51c210787c6cf75d8905246fc91ec030 (diff) |
perf_counter: inheritable sample counters
Redirect the output to the parent counter and put in some sanity checks.
[ Impact: new perfcounter feature - inherited sampling counters ]
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
LKML-Reference: <20090505155437.331556171@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 32 |
1 files changed, 30 insertions, 2 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index c881afef997b..60e55f0b48f4 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -738,10 +738,18 @@ static void perf_counter_enable(struct perf_counter *counter) | |||
738 | spin_unlock_irq(&ctx->lock); | 738 | spin_unlock_irq(&ctx->lock); |
739 | } | 739 | } |
740 | 740 | ||
741 | static void perf_counter_refresh(struct perf_counter *counter, int refresh) | 741 | static int perf_counter_refresh(struct perf_counter *counter, int refresh) |
742 | { | 742 | { |
743 | /* | ||
744 | * not supported on inherited counters | ||
745 | */ | ||
746 | if (counter->hw_event.inherit) | ||
747 | return -EINVAL; | ||
748 | |||
743 | atomic_add(refresh, &counter->event_limit); | 749 | atomic_add(refresh, &counter->event_limit); |
744 | perf_counter_enable(counter); | 750 | perf_counter_enable(counter); |
751 | |||
752 | return 0; | ||
745 | } | 753 | } |
746 | 754 | ||
747 | /* | 755 | /* |
@@ -1307,7 +1315,7 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
1307 | perf_counter_disable_family(counter); | 1315 | perf_counter_disable_family(counter); |
1308 | break; | 1316 | break; |
1309 | case PERF_COUNTER_IOC_REFRESH: | 1317 | case PERF_COUNTER_IOC_REFRESH: |
1310 | perf_counter_refresh(counter, arg); | 1318 | err = perf_counter_refresh(counter, arg); |
1311 | break; | 1319 | break; |
1312 | case PERF_COUNTER_IOC_RESET: | 1320 | case PERF_COUNTER_IOC_RESET: |
1313 | perf_counter_reset(counter); | 1321 | perf_counter_reset(counter); |
@@ -1814,6 +1822,12 @@ static int perf_output_begin(struct perf_output_handle *handle, | |||
1814 | struct perf_mmap_data *data; | 1822 | struct perf_mmap_data *data; |
1815 | unsigned int offset, head; | 1823 | unsigned int offset, head; |
1816 | 1824 | ||
1825 | /* | ||
1826 | * For inherited counters we send all the output towards the parent. | ||
1827 | */ | ||
1828 | if (counter->parent) | ||
1829 | counter = counter->parent; | ||
1830 | |||
1817 | rcu_read_lock(); | 1831 | rcu_read_lock(); |
1818 | data = rcu_dereference(counter->data); | 1832 | data = rcu_dereference(counter->data); |
1819 | if (!data) | 1833 | if (!data) |
@@ -1995,6 +2009,9 @@ static void perf_counter_output(struct perf_counter *counter, | |||
1995 | if (record_type & PERF_RECORD_ADDR) | 2009 | if (record_type & PERF_RECORD_ADDR) |
1996 | perf_output_put(&handle, addr); | 2010 | perf_output_put(&handle, addr); |
1997 | 2011 | ||
2012 | /* | ||
2013 | * XXX PERF_RECORD_GROUP vs inherited counters seems difficult. | ||
2014 | */ | ||
1998 | if (record_type & PERF_RECORD_GROUP) { | 2015 | if (record_type & PERF_RECORD_GROUP) { |
1999 | struct perf_counter *leader, *sub; | 2016 | struct perf_counter *leader, *sub; |
2000 | u64 nr = counter->nr_siblings; | 2017 | u64 nr = counter->nr_siblings; |
@@ -2281,6 +2298,11 @@ int perf_counter_overflow(struct perf_counter *counter, | |||
2281 | int events = atomic_read(&counter->event_limit); | 2298 | int events = atomic_read(&counter->event_limit); |
2282 | int ret = 0; | 2299 | int ret = 0; |
2283 | 2300 | ||
2301 | /* | ||
2302 | * XXX event_limit might not quite work as expected on inherited | ||
2303 | * counters | ||
2304 | */ | ||
2305 | |||
2284 | counter->pending_kill = POLL_IN; | 2306 | counter->pending_kill = POLL_IN; |
2285 | if (events && atomic_dec_and_test(&counter->event_limit)) { | 2307 | if (events && atomic_dec_and_test(&counter->event_limit)) { |
2286 | ret = 1; | 2308 | ret = 1; |
@@ -2801,6 +2823,12 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event, | |||
2801 | 2823 | ||
2802 | pmu = NULL; | 2824 | pmu = NULL; |
2803 | 2825 | ||
2826 | /* | ||
2827 | * we currently do not support PERF_RECORD_GROUP on inherited counters | ||
2828 | */ | ||
2829 | if (hw_event->inherit && (hw_event->record_type & PERF_RECORD_GROUP)) | ||
2830 | goto done; | ||
2831 | |||
2804 | if (perf_event_raw(hw_event)) { | 2832 | if (perf_event_raw(hw_event)) { |
2805 | pmu = hw_perf_counter_init(counter); | 2833 | pmu = hw_perf_counter_init(counter); |
2806 | goto done; | 2834 | goto done; |