aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-04-06 05:45:07 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-07 04:48:58 -0400
commit79f146415623fe74f39af67c0f6adc208939a410 (patch)
tree2b8b3ac045b21cce1169b55bd298a93dba4a19b3 /kernel/perf_counter.c
parent339f7c90b8a2f3aa2dd4267e79f797999e8a3c59 (diff)
perf_counter: counter overflow limit
Provide means to auto-disable the counter after 'n' overflow events. Create the counter with hw_event.disabled = 1, and then issue an ioctl(fd, PREF_COUNTER_IOC_REFRESH, n); to set the limit and enable the counter. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <20090406094518.083139737@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c51
1 files changed, 41 insertions, 10 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 19990d1f0215..c05e10354bc9 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -744,6 +744,12 @@ static void perf_counter_enable(struct perf_counter *counter)
744 spin_unlock_irq(&ctx->lock); 744 spin_unlock_irq(&ctx->lock);
745} 745}
746 746
747static void perf_counter_refresh(struct perf_counter *counter, int refresh)
748{
749 atomic_add(refresh, &counter->event_limit);
750 perf_counter_enable(counter);
751}
752
747/* 753/*
748 * Enable a counter and all its children. 754 * Enable a counter and all its children.
749 */ 755 */
@@ -1311,6 +1317,9 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1311 case PERF_COUNTER_IOC_DISABLE: 1317 case PERF_COUNTER_IOC_DISABLE:
1312 perf_counter_disable_family(counter); 1318 perf_counter_disable_family(counter);
1313 break; 1319 break;
1320 case PERF_COUNTER_IOC_REFRESH:
1321 perf_counter_refresh(counter, arg);
1322 break;
1314 default: 1323 default:
1315 err = -ENOTTY; 1324 err = -ENOTTY;
1316 } 1325 }
@@ -1590,14 +1599,6 @@ void perf_counter_wakeup(struct perf_counter *counter)
1590 kill_fasync(&counter->fasync, SIGIO, POLL_IN); 1599 kill_fasync(&counter->fasync, SIGIO, POLL_IN);
1591} 1600}
1592 1601
1593static void perf_pending_wakeup(struct perf_pending_entry *entry)
1594{
1595 struct perf_counter *counter = container_of(entry,
1596 struct perf_counter, pending);
1597
1598 perf_counter_wakeup(counter);
1599}
1600
1601/* 1602/*
1602 * Pending wakeups 1603 * Pending wakeups
1603 * 1604 *
@@ -1607,6 +1608,22 @@ static void perf_pending_wakeup(struct perf_pending_entry *entry)
1607 * single linked list and use cmpxchg() to add entries lockless. 1608 * single linked list and use cmpxchg() to add entries lockless.
1608 */ 1609 */
1609 1610
1611static void perf_pending_counter(struct perf_pending_entry *entry)
1612{
1613 struct perf_counter *counter = container_of(entry,
1614 struct perf_counter, pending);
1615
1616 if (counter->pending_disable) {
1617 counter->pending_disable = 0;
1618 perf_counter_disable(counter);
1619 }
1620
1621 if (counter->pending_wakeup) {
1622 counter->pending_wakeup = 0;
1623 perf_counter_wakeup(counter);
1624 }
1625}
1626
1610#define PENDING_TAIL ((struct perf_pending_entry *)-1UL) 1627#define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
1611 1628
1612static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = { 1629static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
@@ -1715,8 +1732,9 @@ struct perf_output_handle {
1715static inline void __perf_output_wakeup(struct perf_output_handle *handle) 1732static inline void __perf_output_wakeup(struct perf_output_handle *handle)
1716{ 1733{
1717 if (handle->nmi) { 1734 if (handle->nmi) {
1735 handle->counter->pending_wakeup = 1;
1718 perf_pending_queue(&handle->counter->pending, 1736 perf_pending_queue(&handle->counter->pending,
1719 perf_pending_wakeup); 1737 perf_pending_counter);
1720 } else 1738 } else
1721 perf_counter_wakeup(handle->counter); 1739 perf_counter_wakeup(handle->counter);
1722} 1740}
@@ -2063,8 +2081,21 @@ void perf_counter_munmap(unsigned long addr, unsigned long len,
2063int perf_counter_overflow(struct perf_counter *counter, 2081int perf_counter_overflow(struct perf_counter *counter,
2064 int nmi, struct pt_regs *regs) 2082 int nmi, struct pt_regs *regs)
2065{ 2083{
2084 int events = atomic_read(&counter->event_limit);
2085 int ret = 0;
2086
2087 if (events && atomic_dec_and_test(&counter->event_limit)) {
2088 ret = 1;
2089 if (nmi) {
2090 counter->pending_disable = 1;
2091 perf_pending_queue(&counter->pending,
2092 perf_pending_counter);
2093 } else
2094 perf_counter_disable(counter);
2095 }
2096
2066 perf_counter_output(counter, nmi, regs); 2097 perf_counter_output(counter, nmi, regs);
2067 return 0; 2098 return ret;
2068} 2099}
2069 2100
2070/* 2101/*