aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-04-06 05:45:09 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-07 04:48:59 -0400
commit4c9e25428ff46b968a30f1dfafdba550cb6e4141 (patch)
tree273e74f4f99dc626ddb960f0cbbe9b64d47bbfe9
parent0c593b3411341e3a05a61f5527df36ab02bd11e8 (diff)
perf_counter: change event definition
Currently the definition of an event is slightly ambiguous. We have wakeup events, for poll() and SIGIO, which are either generated when a record crosses a page boundary (hw_events.wakeup_events == 0), or every wakeup_events new records. Now a record can be either a counter overflow record, or a number of different things, like the mmap PROT_EXEC region notifications. Then there is the PERF_COUNTER_IOC_REFRESH event limit, which only considers counter overflows. This patch changes then wakeup_events and SIGIO notification to only consider overflow events. Furthermore it changes the SIGIO notification to report SIGHUP when the event limit is reached and the counter will be disabled. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <20090406094518.266679874@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/perf_counter.h1
-rw-r--r--kernel/perf_counter.c22
2 files changed, 16 insertions, 7 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 81220188d058..0f5a4005048f 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -439,6 +439,7 @@ struct perf_counter {
439 439
440 /* delayed work for NMIs and such */ 440 /* delayed work for NMIs and such */
441 int pending_wakeup; 441 int pending_wakeup;
442 int pending_kill;
442 int pending_disable; 443 int pending_disable;
443 struct perf_pending_entry pending; 444 struct perf_pending_entry pending;
444 445
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index c05e10354bc9..8c8eaf0625f9 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1596,7 +1596,11 @@ void perf_counter_wakeup(struct perf_counter *counter)
1596 rcu_read_unlock(); 1596 rcu_read_unlock();
1597 1597
1598 wake_up_all(&counter->waitq); 1598 wake_up_all(&counter->waitq);
1599 kill_fasync(&counter->fasync, SIGIO, POLL_IN); 1599
1600 if (counter->pending_kill) {
1601 kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
1602 counter->pending_kill = 0;
1603 }
1600} 1604}
1601 1605
1602/* 1606/*
@@ -1727,6 +1731,7 @@ struct perf_output_handle {
1727 unsigned int head; 1731 unsigned int head;
1728 int wakeup; 1732 int wakeup;
1729 int nmi; 1733 int nmi;
1734 int overflow;
1730}; 1735};
1731 1736
1732static inline void __perf_output_wakeup(struct perf_output_handle *handle) 1737static inline void __perf_output_wakeup(struct perf_output_handle *handle)
@@ -1741,7 +1746,7 @@ static inline void __perf_output_wakeup(struct perf_output_handle *handle)
1741 1746
1742static int perf_output_begin(struct perf_output_handle *handle, 1747static int perf_output_begin(struct perf_output_handle *handle,
1743 struct perf_counter *counter, unsigned int size, 1748 struct perf_counter *counter, unsigned int size,
1744 int nmi) 1749 int nmi, int overflow)
1745{ 1750{
1746 struct perf_mmap_data *data; 1751 struct perf_mmap_data *data;
1747 unsigned int offset, head; 1752 unsigned int offset, head;
@@ -1751,8 +1756,9 @@ static int perf_output_begin(struct perf_output_handle *handle,
1751 if (!data) 1756 if (!data)
1752 goto out; 1757 goto out;
1753 1758
1754 handle->counter = counter; 1759 handle->counter = counter;
1755 handle->nmi = nmi; 1760 handle->nmi = nmi;
1761 handle->overflow = overflow;
1756 1762
1757 if (!data->nr_pages) 1763 if (!data->nr_pages)
1758 goto fail; 1764 goto fail;
@@ -1816,7 +1822,7 @@ static void perf_output_end(struct perf_output_handle *handle)
1816{ 1822{
1817 int wakeup_events = handle->counter->hw_event.wakeup_events; 1823 int wakeup_events = handle->counter->hw_event.wakeup_events;
1818 1824
1819 if (wakeup_events) { 1825 if (handle->overflow && wakeup_events) {
1820 int events = atomic_inc_return(&handle->data->events); 1826 int events = atomic_inc_return(&handle->data->events);
1821 if (events >= wakeup_events) { 1827 if (events >= wakeup_events) {
1822 atomic_sub(wakeup_events, &handle->data->events); 1828 atomic_sub(wakeup_events, &handle->data->events);
@@ -1891,7 +1897,7 @@ static void perf_counter_output(struct perf_counter *counter,
1891 header.size += sizeof(u64); 1897 header.size += sizeof(u64);
1892 } 1898 }
1893 1899
1894 ret = perf_output_begin(&handle, counter, header.size, nmi); 1900 ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
1895 if (ret) 1901 if (ret)
1896 return; 1902 return;
1897 1903
@@ -1955,7 +1961,7 @@ static void perf_counter_mmap_output(struct perf_counter *counter,
1955{ 1961{
1956 struct perf_output_handle handle; 1962 struct perf_output_handle handle;
1957 int size = mmap_event->event.header.size; 1963 int size = mmap_event->event.header.size;
1958 int ret = perf_output_begin(&handle, counter, size, 0); 1964 int ret = perf_output_begin(&handle, counter, size, 0, 0);
1959 1965
1960 if (ret) 1966 if (ret)
1961 return; 1967 return;
@@ -2084,8 +2090,10 @@ int perf_counter_overflow(struct perf_counter *counter,
2084 int events = atomic_read(&counter->event_limit); 2090 int events = atomic_read(&counter->event_limit);
2085 int ret = 0; 2091 int ret = 0;
2086 2092
2093 counter->pending_kill = POLL_IN;
2087 if (events && atomic_dec_and_test(&counter->event_limit)) { 2094 if (events && atomic_dec_and_test(&counter->event_limit)) {
2088 ret = 1; 2095 ret = 1;
2096 counter->pending_kill = POLL_HUP;
2089 if (nmi) { 2097 if (nmi) {
2090 counter->pending_disable = 1; 2098 counter->pending_disable = 1;
2091 perf_pending_queue(&counter->pending, 2099 perf_pending_queue(&counter->pending,