diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_counter.c | 51 |
1 files changed, 41 insertions, 10 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 19990d1f0215..c05e10354bc9 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -744,6 +744,12 @@ static void perf_counter_enable(struct perf_counter *counter) | |||
744 | spin_unlock_irq(&ctx->lock); | 744 | spin_unlock_irq(&ctx->lock); |
745 | } | 745 | } |
746 | 746 | ||
747 | static void perf_counter_refresh(struct perf_counter *counter, int refresh) | ||
748 | { | ||
749 | atomic_add(refresh, &counter->event_limit); | ||
750 | perf_counter_enable(counter); | ||
751 | } | ||
752 | |||
747 | /* | 753 | /* |
748 | * Enable a counter and all its children. | 754 | * Enable a counter and all its children. |
749 | */ | 755 | */ |
@@ -1311,6 +1317,9 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
1311 | case PERF_COUNTER_IOC_DISABLE: | 1317 | case PERF_COUNTER_IOC_DISABLE: |
1312 | perf_counter_disable_family(counter); | 1318 | perf_counter_disable_family(counter); |
1313 | break; | 1319 | break; |
1320 | case PERF_COUNTER_IOC_REFRESH: | ||
1321 | perf_counter_refresh(counter, arg); | ||
1322 | break; | ||
1314 | default: | 1323 | default: |
1315 | err = -ENOTTY; | 1324 | err = -ENOTTY; |
1316 | } | 1325 | } |
@@ -1590,14 +1599,6 @@ void perf_counter_wakeup(struct perf_counter *counter) | |||
1590 | kill_fasync(&counter->fasync, SIGIO, POLL_IN); | 1599 | kill_fasync(&counter->fasync, SIGIO, POLL_IN); |
1591 | } | 1600 | } |
1592 | 1601 | ||
1593 | static void perf_pending_wakeup(struct perf_pending_entry *entry) | ||
1594 | { | ||
1595 | struct perf_counter *counter = container_of(entry, | ||
1596 | struct perf_counter, pending); | ||
1597 | |||
1598 | perf_counter_wakeup(counter); | ||
1599 | } | ||
1600 | |||
1601 | /* | 1602 | /* |
1602 | * Pending wakeups | 1603 | * Pending wakeups |
1603 | * | 1604 | * |
@@ -1607,6 +1608,22 @@ static void perf_pending_wakeup(struct perf_pending_entry *entry) | |||
1607 | * single linked list and use cmpxchg() to add entries lockless. | 1608 | * single linked list and use cmpxchg() to add entries lockless. |
1608 | */ | 1609 | */ |
1609 | 1610 | ||
1611 | static void perf_pending_counter(struct perf_pending_entry *entry) | ||
1612 | { | ||
1613 | struct perf_counter *counter = container_of(entry, | ||
1614 | struct perf_counter, pending); | ||
1615 | |||
1616 | if (counter->pending_disable) { | ||
1617 | counter->pending_disable = 0; | ||
1618 | perf_counter_disable(counter); | ||
1619 | } | ||
1620 | |||
1621 | if (counter->pending_wakeup) { | ||
1622 | counter->pending_wakeup = 0; | ||
1623 | perf_counter_wakeup(counter); | ||
1624 | } | ||
1625 | } | ||
1626 | |||
1610 | #define PENDING_TAIL ((struct perf_pending_entry *)-1UL) | 1627 | #define PENDING_TAIL ((struct perf_pending_entry *)-1UL) |
1611 | 1628 | ||
1612 | static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = { | 1629 | static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = { |
@@ -1715,8 +1732,9 @@ struct perf_output_handle { | |||
1715 | static inline void __perf_output_wakeup(struct perf_output_handle *handle) | 1732 | static inline void __perf_output_wakeup(struct perf_output_handle *handle) |
1716 | { | 1733 | { |
1717 | if (handle->nmi) { | 1734 | if (handle->nmi) { |
1735 | handle->counter->pending_wakeup = 1; | ||
1718 | perf_pending_queue(&handle->counter->pending, | 1736 | perf_pending_queue(&handle->counter->pending, |
1719 | perf_pending_wakeup); | 1737 | perf_pending_counter); |
1720 | } else | 1738 | } else |
1721 | perf_counter_wakeup(handle->counter); | 1739 | perf_counter_wakeup(handle->counter); |
1722 | } | 1740 | } |
@@ -2063,8 +2081,21 @@ void perf_counter_munmap(unsigned long addr, unsigned long len, | |||
2063 | int perf_counter_overflow(struct perf_counter *counter, | 2081 | int perf_counter_overflow(struct perf_counter *counter, |
2064 | int nmi, struct pt_regs *regs) | 2082 | int nmi, struct pt_regs *regs) |
2065 | { | 2083 | { |
2084 | int events = atomic_read(&counter->event_limit); | ||
2085 | int ret = 0; | ||
2086 | |||
2087 | if (events && atomic_dec_and_test(&counter->event_limit)) { | ||
2088 | ret = 1; | ||
2089 | if (nmi) { | ||
2090 | counter->pending_disable = 1; | ||
2091 | perf_pending_queue(&counter->pending, | ||
2092 | perf_pending_counter); | ||
2093 | } else | ||
2094 | perf_counter_disable(counter); | ||
2095 | } | ||
2096 | |||
2066 | perf_counter_output(counter, nmi, regs); | 2097 | perf_counter_output(counter, nmi, regs); |
2067 | return 0; | 2098 | return ret; |
2068 | } | 2099 | } |
2069 | 2100 | ||
2070 | /* | 2101 | /* |