aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-04-06 05:45:02 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-07 04:48:55 -0400
commit671dec5daf3b3c43c5777be282f00120a44cf37f (patch)
treea7563d041c7c0bd87d6d118e1063f9aa50ff5fec /kernel/perf_counter.c
parent3c446b3d3b38f991f97e9d2df0ad26a60a94dcff (diff)
perf_counter: generalize pending infrastructure
Prepare the pending infrastructure to do more than wakeups. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <20090406094517.634732847@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c53
1 files changed, 32 insertions, 21 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index c58cc64319e1..0a2ade2e4f11 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1581,6 +1581,14 @@ void perf_counter_wakeup(struct perf_counter *counter)
1581 kill_fasync(&counter->fasync, SIGIO, POLL_IN); 1581 kill_fasync(&counter->fasync, SIGIO, POLL_IN);
1582} 1582}
1583 1583
1584static void perf_pending_wakeup(struct perf_pending_entry *entry)
1585{
1586 struct perf_counter *counter = container_of(entry,
1587 struct perf_counter, pending);
1588
1589 perf_counter_wakeup(counter);
1590}
1591
1584/* 1592/*
1585 * Pending wakeups 1593 * Pending wakeups
1586 * 1594 *
@@ -1590,45 +1598,47 @@ void perf_counter_wakeup(struct perf_counter *counter)
1590 * single linked list and use cmpxchg() to add entries lockless. 1598 * single linked list and use cmpxchg() to add entries lockless.
1591 */ 1599 */
1592 1600
1593#define PENDING_TAIL ((struct perf_wakeup_entry *)-1UL) 1601#define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
1594 1602
1595static DEFINE_PER_CPU(struct perf_wakeup_entry *, perf_wakeup_head) = { 1603static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
1596 PENDING_TAIL, 1604 PENDING_TAIL,
1597}; 1605};
1598 1606
1599static void perf_pending_queue(struct perf_counter *counter) 1607static void perf_pending_queue(struct perf_pending_entry *entry,
1608 void (*func)(struct perf_pending_entry *))
1600{ 1609{
1601 struct perf_wakeup_entry **head; 1610 struct perf_pending_entry **head;
1602 struct perf_wakeup_entry *prev, *next;
1603 1611
1604 if (cmpxchg(&counter->wakeup.next, NULL, PENDING_TAIL) != NULL) 1612 if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
1605 return; 1613 return;
1606 1614
1607 head = &get_cpu_var(perf_wakeup_head); 1615 entry->func = func;
1616
1617 head = &get_cpu_var(perf_pending_head);
1608 1618
1609 do { 1619 do {
1610 prev = counter->wakeup.next = *head; 1620 entry->next = *head;
1611 next = &counter->wakeup; 1621 } while (cmpxchg(head, entry->next, entry) != entry->next);
1612 } while (cmpxchg(head, prev, next) != prev);
1613 1622
1614 set_perf_counter_pending(); 1623 set_perf_counter_pending();
1615 1624
1616 put_cpu_var(perf_wakeup_head); 1625 put_cpu_var(perf_pending_head);
1617} 1626}
1618 1627
1619static int __perf_pending_run(void) 1628static int __perf_pending_run(void)
1620{ 1629{
1621 struct perf_wakeup_entry *list; 1630 struct perf_pending_entry *list;
1622 int nr = 0; 1631 int nr = 0;
1623 1632
1624 list = xchg(&__get_cpu_var(perf_wakeup_head), PENDING_TAIL); 1633 list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
1625 while (list != PENDING_TAIL) { 1634 while (list != PENDING_TAIL) {
1626 struct perf_counter *counter = container_of(list, 1635 void (*func)(struct perf_pending_entry *);
1627 struct perf_counter, wakeup); 1636 struct perf_pending_entry *entry = list;
1628 1637
1629 list = list->next; 1638 list = list->next;
1630 1639
1631 counter->wakeup.next = NULL; 1640 func = entry->func;
1641 entry->next = NULL;
1632 /* 1642 /*
1633 * Ensure we observe the unqueue before we issue the wakeup, 1643 * Ensure we observe the unqueue before we issue the wakeup,
1634 * so that we won't be waiting forever. 1644 * so that we won't be waiting forever.
@@ -1636,7 +1646,7 @@ static int __perf_pending_run(void)
1636 */ 1646 */
1637 smp_wmb(); 1647 smp_wmb();
1638 1648
1639 perf_counter_wakeup(counter); 1649 func(entry);
1640 nr++; 1650 nr++;
1641 } 1651 }
1642 1652
@@ -1658,7 +1668,7 @@ static inline int perf_not_pending(struct perf_counter *counter)
1658 * so that we do not miss the wakeup. -- see perf_pending_handle() 1668 * so that we do not miss the wakeup. -- see perf_pending_handle()
1659 */ 1669 */
1660 smp_rmb(); 1670 smp_rmb();
1661 return counter->wakeup.next == NULL; 1671 return counter->pending.next == NULL;
1662} 1672}
1663 1673
1664static void perf_pending_sync(struct perf_counter *counter) 1674static void perf_pending_sync(struct perf_counter *counter)
@@ -1695,9 +1705,10 @@ struct perf_output_handle {
1695 1705
1696static inline void __perf_output_wakeup(struct perf_output_handle *handle) 1706static inline void __perf_output_wakeup(struct perf_output_handle *handle)
1697{ 1707{
1698 if (handle->nmi) 1708 if (handle->nmi) {
1699 perf_pending_queue(handle->counter); 1709 perf_pending_queue(&handle->counter->pending,
1700 else 1710 perf_pending_wakeup);
1711 } else
1701 perf_counter_wakeup(handle->counter); 1712 perf_counter_wakeup(handle->counter);
1702} 1713}
1703 1714