diff options
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 22 |
1 files changed, 15 insertions, 7 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index c05e10354bc9..8c8eaf0625f9 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -1596,7 +1596,11 @@ void perf_counter_wakeup(struct perf_counter *counter) | |||
1596 | rcu_read_unlock(); | 1596 | rcu_read_unlock(); |
1597 | 1597 | ||
1598 | wake_up_all(&counter->waitq); | 1598 | wake_up_all(&counter->waitq); |
1599 | kill_fasync(&counter->fasync, SIGIO, POLL_IN); | 1599 | |
1600 | if (counter->pending_kill) { | ||
1601 | kill_fasync(&counter->fasync, SIGIO, counter->pending_kill); | ||
1602 | counter->pending_kill = 0; | ||
1603 | } | ||
1600 | } | 1604 | } |
1601 | 1605 | ||
1602 | /* | 1606 | /* |
@@ -1727,6 +1731,7 @@ struct perf_output_handle { | |||
1727 | unsigned int head; | 1731 | unsigned int head; |
1728 | int wakeup; | 1732 | int wakeup; |
1729 | int nmi; | 1733 | int nmi; |
1734 | int overflow; | ||
1730 | }; | 1735 | }; |
1731 | 1736 | ||
1732 | static inline void __perf_output_wakeup(struct perf_output_handle *handle) | 1737 | static inline void __perf_output_wakeup(struct perf_output_handle *handle) |
@@ -1741,7 +1746,7 @@ static inline void __perf_output_wakeup(struct perf_output_handle *handle) | |||
1741 | 1746 | ||
1742 | static int perf_output_begin(struct perf_output_handle *handle, | 1747 | static int perf_output_begin(struct perf_output_handle *handle, |
1743 | struct perf_counter *counter, unsigned int size, | 1748 | struct perf_counter *counter, unsigned int size, |
1744 | int nmi) | 1749 | int nmi, int overflow) |
1745 | { | 1750 | { |
1746 | struct perf_mmap_data *data; | 1751 | struct perf_mmap_data *data; |
1747 | unsigned int offset, head; | 1752 | unsigned int offset, head; |
@@ -1751,8 +1756,9 @@ static int perf_output_begin(struct perf_output_handle *handle, | |||
1751 | if (!data) | 1756 | if (!data) |
1752 | goto out; | 1757 | goto out; |
1753 | 1758 | ||
1754 | handle->counter = counter; | 1759 | handle->counter = counter; |
1755 | handle->nmi = nmi; | 1760 | handle->nmi = nmi; |
1761 | handle->overflow = overflow; | ||
1756 | 1762 | ||
1757 | if (!data->nr_pages) | 1763 | if (!data->nr_pages) |
1758 | goto fail; | 1764 | goto fail; |
@@ -1816,7 +1822,7 @@ static void perf_output_end(struct perf_output_handle *handle) | |||
1816 | { | 1822 | { |
1817 | int wakeup_events = handle->counter->hw_event.wakeup_events; | 1823 | int wakeup_events = handle->counter->hw_event.wakeup_events; |
1818 | 1824 | ||
1819 | if (wakeup_events) { | 1825 | if (handle->overflow && wakeup_events) { |
1820 | int events = atomic_inc_return(&handle->data->events); | 1826 | int events = atomic_inc_return(&handle->data->events); |
1821 | if (events >= wakeup_events) { | 1827 | if (events >= wakeup_events) { |
1822 | atomic_sub(wakeup_events, &handle->data->events); | 1828 | atomic_sub(wakeup_events, &handle->data->events); |
@@ -1891,7 +1897,7 @@ static void perf_counter_output(struct perf_counter *counter, | |||
1891 | header.size += sizeof(u64); | 1897 | header.size += sizeof(u64); |
1892 | } | 1898 | } |
1893 | 1899 | ||
1894 | ret = perf_output_begin(&handle, counter, header.size, nmi); | 1900 | ret = perf_output_begin(&handle, counter, header.size, nmi, 1); |
1895 | if (ret) | 1901 | if (ret) |
1896 | return; | 1902 | return; |
1897 | 1903 | ||
@@ -1955,7 +1961,7 @@ static void perf_counter_mmap_output(struct perf_counter *counter, | |||
1955 | { | 1961 | { |
1956 | struct perf_output_handle handle; | 1962 | struct perf_output_handle handle; |
1957 | int size = mmap_event->event.header.size; | 1963 | int size = mmap_event->event.header.size; |
1958 | int ret = perf_output_begin(&handle, counter, size, 0); | 1964 | int ret = perf_output_begin(&handle, counter, size, 0, 0); |
1959 | 1965 | ||
1960 | if (ret) | 1966 | if (ret) |
1961 | return; | 1967 | return; |
@@ -2084,8 +2090,10 @@ int perf_counter_overflow(struct perf_counter *counter, | |||
2084 | int events = atomic_read(&counter->event_limit); | 2090 | int events = atomic_read(&counter->event_limit); |
2085 | int ret = 0; | 2091 | int ret = 0; |
2086 | 2092 | ||
2093 | counter->pending_kill = POLL_IN; | ||
2087 | if (events && atomic_dec_and_test(&counter->event_limit)) { | 2094 | if (events && atomic_dec_and_test(&counter->event_limit)) { |
2088 | ret = 1; | 2095 | ret = 1; |
2096 | counter->pending_kill = POLL_HUP; | ||
2089 | if (nmi) { | 2097 | if (nmi) { |
2090 | counter->pending_disable = 1; | 2098 | counter->pending_disable = 1; |
2091 | perf_pending_queue(&counter->pending, | 2099 | perf_pending_queue(&counter->pending, |