diff options
-rw-r--r-- | include/linux/perf_counter.h | 7 | ||||
-rw-r--r-- | kernel/perf_counter.c | 53 |
2 files changed, 36 insertions, 24 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index 8d5d11b8d011..977fb15a53f3 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h | |||
@@ -321,8 +321,9 @@ struct perf_mmap_data { | |||
321 | void *data_pages[0]; | 321 | void *data_pages[0]; |
322 | }; | 322 | }; |
323 | 323 | ||
324 | struct perf_wakeup_entry { | 324 | struct perf_pending_entry { |
325 | struct perf_wakeup_entry *next; | 325 | struct perf_pending_entry *next; |
326 | void (*func)(struct perf_pending_entry *); | ||
326 | }; | 327 | }; |
327 | 328 | ||
328 | /** | 329 | /** |
@@ -401,7 +402,7 @@ struct perf_counter { | |||
401 | wait_queue_head_t waitq; | 402 | wait_queue_head_t waitq; |
402 | struct fasync_struct *fasync; | 403 | struct fasync_struct *fasync; |
403 | /* optional: for NMIs */ | 404 | /* optional: for NMIs */ |
404 | struct perf_wakeup_entry wakeup; | 405 | struct perf_pending_entry pending; |
405 | 406 | ||
406 | void (*destroy)(struct perf_counter *); | 407 | void (*destroy)(struct perf_counter *); |
407 | struct rcu_head rcu_head; | 408 | struct rcu_head rcu_head; |
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index c58cc64319e1..0a2ade2e4f11 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -1581,6 +1581,14 @@ void perf_counter_wakeup(struct perf_counter *counter) | |||
1581 | kill_fasync(&counter->fasync, SIGIO, POLL_IN); | 1581 | kill_fasync(&counter->fasync, SIGIO, POLL_IN); |
1582 | } | 1582 | } |
1583 | 1583 | ||
1584 | static void perf_pending_wakeup(struct perf_pending_entry *entry) | ||
1585 | { | ||
1586 | struct perf_counter *counter = container_of(entry, | ||
1587 | struct perf_counter, pending); | ||
1588 | |||
1589 | perf_counter_wakeup(counter); | ||
1590 | } | ||
1591 | |||
1584 | /* | 1592 | /* |
1585 | * Pending wakeups | 1593 | * Pending wakeups |
1586 | * | 1594 | * |
@@ -1590,45 +1598,47 @@ void perf_counter_wakeup(struct perf_counter *counter) | |||
1590 | * single linked list and use cmpxchg() to add entries lockless. | 1598 | * single linked list and use cmpxchg() to add entries lockless. |
1591 | */ | 1599 | */ |
1592 | 1600 | ||
1593 | #define PENDING_TAIL ((struct perf_wakeup_entry *)-1UL) | 1601 | #define PENDING_TAIL ((struct perf_pending_entry *)-1UL) |
1594 | 1602 | ||
1595 | static DEFINE_PER_CPU(struct perf_wakeup_entry *, perf_wakeup_head) = { | 1603 | static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = { |
1596 | PENDING_TAIL, | 1604 | PENDING_TAIL, |
1597 | }; | 1605 | }; |
1598 | 1606 | ||
1599 | static void perf_pending_queue(struct perf_counter *counter) | 1607 | static void perf_pending_queue(struct perf_pending_entry *entry, |
1608 | void (*func)(struct perf_pending_entry *)) | ||
1600 | { | 1609 | { |
1601 | struct perf_wakeup_entry **head; | 1610 | struct perf_pending_entry **head; |
1602 | struct perf_wakeup_entry *prev, *next; | ||
1603 | 1611 | ||
1604 | if (cmpxchg(&counter->wakeup.next, NULL, PENDING_TAIL) != NULL) | 1612 | if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL) |
1605 | return; | 1613 | return; |
1606 | 1614 | ||
1607 | head = &get_cpu_var(perf_wakeup_head); | 1615 | entry->func = func; |
1616 | |||
1617 | head = &get_cpu_var(perf_pending_head); | ||
1608 | 1618 | ||
1609 | do { | 1619 | do { |
1610 | prev = counter->wakeup.next = *head; | 1620 | entry->next = *head; |
1611 | next = &counter->wakeup; | 1621 | } while (cmpxchg(head, entry->next, entry) != entry->next); |
1612 | } while (cmpxchg(head, prev, next) != prev); | ||
1613 | 1622 | ||
1614 | set_perf_counter_pending(); | 1623 | set_perf_counter_pending(); |
1615 | 1624 | ||
1616 | put_cpu_var(perf_wakeup_head); | 1625 | put_cpu_var(perf_pending_head); |
1617 | } | 1626 | } |
1618 | 1627 | ||
1619 | static int __perf_pending_run(void) | 1628 | static int __perf_pending_run(void) |
1620 | { | 1629 | { |
1621 | struct perf_wakeup_entry *list; | 1630 | struct perf_pending_entry *list; |
1622 | int nr = 0; | 1631 | int nr = 0; |
1623 | 1632 | ||
1624 | list = xchg(&__get_cpu_var(perf_wakeup_head), PENDING_TAIL); | 1633 | list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL); |
1625 | while (list != PENDING_TAIL) { | 1634 | while (list != PENDING_TAIL) { |
1626 | struct perf_counter *counter = container_of(list, | 1635 | void (*func)(struct perf_pending_entry *); |
1627 | struct perf_counter, wakeup); | 1636 | struct perf_pending_entry *entry = list; |
1628 | 1637 | ||
1629 | list = list->next; | 1638 | list = list->next; |
1630 | 1639 | ||
1631 | counter->wakeup.next = NULL; | 1640 | func = entry->func; |
1641 | entry->next = NULL; | ||
1632 | /* | 1642 | /* |
1633 | * Ensure we observe the unqueue before we issue the wakeup, | 1643 | * Ensure we observe the unqueue before we issue the wakeup, |
1634 | * so that we won't be waiting forever. | 1644 | * so that we won't be waiting forever. |
@@ -1636,7 +1646,7 @@ static int __perf_pending_run(void) | |||
1636 | */ | 1646 | */ |
1637 | smp_wmb(); | 1647 | smp_wmb(); |
1638 | 1648 | ||
1639 | perf_counter_wakeup(counter); | 1649 | func(entry); |
1640 | nr++; | 1650 | nr++; |
1641 | } | 1651 | } |
1642 | 1652 | ||
@@ -1658,7 +1668,7 @@ static inline int perf_not_pending(struct perf_counter *counter) | |||
1658 | * so that we do not miss the wakeup. -- see perf_pending_handle() | 1668 | * so that we do not miss the wakeup. -- see perf_pending_handle() |
1659 | */ | 1669 | */ |
1660 | smp_rmb(); | 1670 | smp_rmb(); |
1661 | return counter->wakeup.next == NULL; | 1671 | return counter->pending.next == NULL; |
1662 | } | 1672 | } |
1663 | 1673 | ||
1664 | static void perf_pending_sync(struct perf_counter *counter) | 1674 | static void perf_pending_sync(struct perf_counter *counter) |
@@ -1695,9 +1705,10 @@ struct perf_output_handle { | |||
1695 | 1705 | ||
1696 | static inline void __perf_output_wakeup(struct perf_output_handle *handle) | 1706 | static inline void __perf_output_wakeup(struct perf_output_handle *handle) |
1697 | { | 1707 | { |
1698 | if (handle->nmi) | 1708 | if (handle->nmi) { |
1699 | perf_pending_queue(handle->counter); | 1709 | perf_pending_queue(&handle->counter->pending, |
1700 | else | 1710 | perf_pending_wakeup); |
1711 | } else | ||
1701 | perf_counter_wakeup(handle->counter); | 1712 | perf_counter_wakeup(handle->counter); |
1702 | } | 1713 | } |
1703 | 1714 | ||