diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-05-05 11:50:22 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-05-05 14:18:30 -0400 |
commit | c66de4a5be7913247bd83d79168f8e4420c9cfbc (patch) | |
tree | c671172166d94be75ad9d6df1fdafa0c006c4609 /kernel | |
parent | 066d7dea32c9bffe6decc0abe465627656cdd84e (diff) |
perf_counter: uncouple data_head updates from wakeups
Keep data_head up-to-date irrespective of notifications. This fixes
the case where you disable a counter and don't get a notification for
the last few pending events, and it also allows polling usage.
[ Impact: increase precision of perfcounter mmap-ed fields ]
Suggested-by: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <20090505155436.925084300@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_counter.c | 20 |
1 files changed, 9 insertions, 11 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 5f86a1156c94..ba5e921e1f36 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -1696,7 +1696,6 @@ struct perf_output_handle { | |||
1696 | struct perf_mmap_data *data; | 1696 | struct perf_mmap_data *data; |
1697 | unsigned int offset; | 1697 | unsigned int offset; |
1698 | unsigned int head; | 1698 | unsigned int head; |
1699 | int wakeup; | ||
1700 | int nmi; | 1699 | int nmi; |
1701 | int overflow; | 1700 | int overflow; |
1702 | int locked; | 1701 | int locked; |
@@ -1752,8 +1751,7 @@ static void perf_output_unlock(struct perf_output_handle *handle) | |||
1752 | struct perf_mmap_data *data = handle->data; | 1751 | struct perf_mmap_data *data = handle->data; |
1753 | int head, cpu; | 1752 | int head, cpu; |
1754 | 1753 | ||
1755 | if (handle->wakeup) | 1754 | data->done_head = data->head; |
1756 | data->wakeup_head = data->head; | ||
1757 | 1755 | ||
1758 | if (!handle->locked) | 1756 | if (!handle->locked) |
1759 | goto out; | 1757 | goto out; |
@@ -1764,13 +1762,11 @@ again: | |||
1764 | * before we publish the new head, matched by a rmb() in userspace when | 1762 | * before we publish the new head, matched by a rmb() in userspace when |
1765 | * reading this position. | 1763 | * reading this position. |
1766 | */ | 1764 | */ |
1767 | while ((head = atomic_xchg(&data->wakeup_head, 0))) { | 1765 | while ((head = atomic_xchg(&data->done_head, 0))) |
1768 | data->user_page->data_head = head; | 1766 | data->user_page->data_head = head; |
1769 | handle->wakeup = 1; | ||
1770 | } | ||
1771 | 1767 | ||
1772 | /* | 1768 | /* |
1773 | * NMI can happen here, which means we can miss a wakeup_head update. | 1769 | * NMI can happen here, which means we can miss a done_head update. |
1774 | */ | 1770 | */ |
1775 | 1771 | ||
1776 | cpu = atomic_xchg(&data->lock, 0); | 1772 | cpu = atomic_xchg(&data->lock, 0); |
@@ -1779,7 +1775,7 @@ again: | |||
1779 | /* | 1775 | /* |
1780 | * Therefore we have to validate we did not indeed do so. | 1776 | * Therefore we have to validate we did not indeed do so. |
1781 | */ | 1777 | */ |
1782 | if (unlikely(atomic_read(&data->wakeup_head))) { | 1778 | if (unlikely(atomic_read(&data->done_head))) { |
1783 | /* | 1779 | /* |
1784 | * Since we had it locked, we can lock it again. | 1780 | * Since we had it locked, we can lock it again. |
1785 | */ | 1781 | */ |
@@ -1789,7 +1785,7 @@ again: | |||
1789 | goto again; | 1785 | goto again; |
1790 | } | 1786 | } |
1791 | 1787 | ||
1792 | if (handle->wakeup) | 1788 | if (atomic_xchg(&data->wakeup, 0)) |
1793 | perf_output_wakeup(handle); | 1789 | perf_output_wakeup(handle); |
1794 | out: | 1790 | out: |
1795 | local_irq_restore(handle->flags); | 1791 | local_irq_restore(handle->flags); |
@@ -1824,7 +1820,9 @@ static int perf_output_begin(struct perf_output_handle *handle, | |||
1824 | 1820 | ||
1825 | handle->offset = offset; | 1821 | handle->offset = offset; |
1826 | handle->head = head; | 1822 | handle->head = head; |
1827 | handle->wakeup = (offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT); | 1823 | |
1824 | if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT)) | ||
1825 | atomic_set(&data->wakeup, 1); | ||
1828 | 1826 | ||
1829 | return 0; | 1827 | return 0; |
1830 | 1828 | ||
@@ -1882,7 +1880,7 @@ static void perf_output_end(struct perf_output_handle *handle) | |||
1882 | int events = atomic_inc_return(&data->events); | 1880 | int events = atomic_inc_return(&data->events); |
1883 | if (events >= wakeup_events) { | 1881 | if (events >= wakeup_events) { |
1884 | atomic_sub(wakeup_events, &data->events); | 1882 | atomic_sub(wakeup_events, &data->events); |
1885 | handle->wakeup = 1; | 1883 | atomic_set(&data->wakeup, 1); |
1886 | } | 1884 | } |
1887 | } | 1885 | } |
1888 | 1886 | ||