diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_counter.c | 338 |
1 files changed, 236 insertions, 102 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index b0b20a07f394..534e20d14d63 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -88,6 +88,7 @@ void __weak hw_perf_disable(void) { barrier(); } | |||
88 | void __weak hw_perf_enable(void) { barrier(); } | 88 | void __weak hw_perf_enable(void) { barrier(); } |
89 | 89 | ||
90 | void __weak hw_perf_counter_setup(int cpu) { barrier(); } | 90 | void __weak hw_perf_counter_setup(int cpu) { barrier(); } |
91 | void __weak hw_perf_counter_setup_online(int cpu) { barrier(); } | ||
91 | 92 | ||
92 | int __weak | 93 | int __weak |
93 | hw_perf_group_sched_in(struct perf_counter *group_leader, | 94 | hw_perf_group_sched_in(struct perf_counter *group_leader, |
@@ -306,6 +307,10 @@ counter_sched_out(struct perf_counter *counter, | |||
306 | return; | 307 | return; |
307 | 308 | ||
308 | counter->state = PERF_COUNTER_STATE_INACTIVE; | 309 | counter->state = PERF_COUNTER_STATE_INACTIVE; |
310 | if (counter->pending_disable) { | ||
311 | counter->pending_disable = 0; | ||
312 | counter->state = PERF_COUNTER_STATE_OFF; | ||
313 | } | ||
309 | counter->tstamp_stopped = ctx->time; | 314 | counter->tstamp_stopped = ctx->time; |
310 | counter->pmu->disable(counter); | 315 | counter->pmu->disable(counter); |
311 | counter->oncpu = -1; | 316 | counter->oncpu = -1; |
@@ -1691,7 +1696,32 @@ static int perf_release(struct inode *inode, struct file *file) | |||
1691 | return 0; | 1696 | return 0; |
1692 | } | 1697 | } |
1693 | 1698 | ||
1694 | static u64 perf_counter_read_tree(struct perf_counter *counter) | 1699 | static int perf_counter_read_size(struct perf_counter *counter) |
1700 | { | ||
1701 | int entry = sizeof(u64); /* value */ | ||
1702 | int size = 0; | ||
1703 | int nr = 1; | ||
1704 | |||
1705 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | ||
1706 | size += sizeof(u64); | ||
1707 | |||
1708 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | ||
1709 | size += sizeof(u64); | ||
1710 | |||
1711 | if (counter->attr.read_format & PERF_FORMAT_ID) | ||
1712 | entry += sizeof(u64); | ||
1713 | |||
1714 | if (counter->attr.read_format & PERF_FORMAT_GROUP) { | ||
1715 | nr += counter->group_leader->nr_siblings; | ||
1716 | size += sizeof(u64); | ||
1717 | } | ||
1718 | |||
1719 | size += entry * nr; | ||
1720 | |||
1721 | return size; | ||
1722 | } | ||
1723 | |||
1724 | static u64 perf_counter_read_value(struct perf_counter *counter) | ||
1695 | { | 1725 | { |
1696 | struct perf_counter *child; | 1726 | struct perf_counter *child; |
1697 | u64 total = 0; | 1727 | u64 total = 0; |
@@ -1703,14 +1733,96 @@ static u64 perf_counter_read_tree(struct perf_counter *counter) | |||
1703 | return total; | 1733 | return total; |
1704 | } | 1734 | } |
1705 | 1735 | ||
1736 | static int perf_counter_read_entry(struct perf_counter *counter, | ||
1737 | u64 read_format, char __user *buf) | ||
1738 | { | ||
1739 | int n = 0, count = 0; | ||
1740 | u64 values[2]; | ||
1741 | |||
1742 | values[n++] = perf_counter_read_value(counter); | ||
1743 | if (read_format & PERF_FORMAT_ID) | ||
1744 | values[n++] = primary_counter_id(counter); | ||
1745 | |||
1746 | count = n * sizeof(u64); | ||
1747 | |||
1748 | if (copy_to_user(buf, values, count)) | ||
1749 | return -EFAULT; | ||
1750 | |||
1751 | return count; | ||
1752 | } | ||
1753 | |||
1754 | static int perf_counter_read_group(struct perf_counter *counter, | ||
1755 | u64 read_format, char __user *buf) | ||
1756 | { | ||
1757 | struct perf_counter *leader = counter->group_leader, *sub; | ||
1758 | int n = 0, size = 0, err = -EFAULT; | ||
1759 | u64 values[3]; | ||
1760 | |||
1761 | values[n++] = 1 + leader->nr_siblings; | ||
1762 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | ||
1763 | values[n++] = leader->total_time_enabled + | ||
1764 | atomic64_read(&leader->child_total_time_enabled); | ||
1765 | } | ||
1766 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | ||
1767 | values[n++] = leader->total_time_running + | ||
1768 | atomic64_read(&leader->child_total_time_running); | ||
1769 | } | ||
1770 | |||
1771 | size = n * sizeof(u64); | ||
1772 | |||
1773 | if (copy_to_user(buf, values, size)) | ||
1774 | return -EFAULT; | ||
1775 | |||
1776 | err = perf_counter_read_entry(leader, read_format, buf + size); | ||
1777 | if (err < 0) | ||
1778 | return err; | ||
1779 | |||
1780 | size += err; | ||
1781 | |||
1782 | list_for_each_entry(sub, &leader->sibling_list, list_entry) { | ||
1783 | err = perf_counter_read_entry(counter, read_format, | ||
1784 | buf + size); | ||
1785 | if (err < 0) | ||
1786 | return err; | ||
1787 | |||
1788 | size += err; | ||
1789 | } | ||
1790 | |||
1791 | return size; | ||
1792 | } | ||
1793 | |||
1794 | static int perf_counter_read_one(struct perf_counter *counter, | ||
1795 | u64 read_format, char __user *buf) | ||
1796 | { | ||
1797 | u64 values[4]; | ||
1798 | int n = 0; | ||
1799 | |||
1800 | values[n++] = perf_counter_read_value(counter); | ||
1801 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | ||
1802 | values[n++] = counter->total_time_enabled + | ||
1803 | atomic64_read(&counter->child_total_time_enabled); | ||
1804 | } | ||
1805 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | ||
1806 | values[n++] = counter->total_time_running + | ||
1807 | atomic64_read(&counter->child_total_time_running); | ||
1808 | } | ||
1809 | if (read_format & PERF_FORMAT_ID) | ||
1810 | values[n++] = primary_counter_id(counter); | ||
1811 | |||
1812 | if (copy_to_user(buf, values, n * sizeof(u64))) | ||
1813 | return -EFAULT; | ||
1814 | |||
1815 | return n * sizeof(u64); | ||
1816 | } | ||
1817 | |||
1706 | /* | 1818 | /* |
1707 | * Read the performance counter - simple non blocking version for now | 1819 | * Read the performance counter - simple non blocking version for now |
1708 | */ | 1820 | */ |
1709 | static ssize_t | 1821 | static ssize_t |
1710 | perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) | 1822 | perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) |
1711 | { | 1823 | { |
1712 | u64 values[4]; | 1824 | u64 read_format = counter->attr.read_format; |
1713 | int n; | 1825 | int ret; |
1714 | 1826 | ||
1715 | /* | 1827 | /* |
1716 | * Return end-of-file for a read on a counter that is in | 1828 | * Return end-of-file for a read on a counter that is in |
@@ -1720,28 +1832,18 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) | |||
1720 | if (counter->state == PERF_COUNTER_STATE_ERROR) | 1832 | if (counter->state == PERF_COUNTER_STATE_ERROR) |
1721 | return 0; | 1833 | return 0; |
1722 | 1834 | ||
1835 | if (count < perf_counter_read_size(counter)) | ||
1836 | return -ENOSPC; | ||
1837 | |||
1723 | WARN_ON_ONCE(counter->ctx->parent_ctx); | 1838 | WARN_ON_ONCE(counter->ctx->parent_ctx); |
1724 | mutex_lock(&counter->child_mutex); | 1839 | mutex_lock(&counter->child_mutex); |
1725 | values[0] = perf_counter_read_tree(counter); | 1840 | if (read_format & PERF_FORMAT_GROUP) |
1726 | n = 1; | 1841 | ret = perf_counter_read_group(counter, read_format, buf); |
1727 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | 1842 | else |
1728 | values[n++] = counter->total_time_enabled + | 1843 | ret = perf_counter_read_one(counter, read_format, buf); |
1729 | atomic64_read(&counter->child_total_time_enabled); | ||
1730 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | ||
1731 | values[n++] = counter->total_time_running + | ||
1732 | atomic64_read(&counter->child_total_time_running); | ||
1733 | if (counter->attr.read_format & PERF_FORMAT_ID) | ||
1734 | values[n++] = primary_counter_id(counter); | ||
1735 | mutex_unlock(&counter->child_mutex); | 1844 | mutex_unlock(&counter->child_mutex); |
1736 | 1845 | ||
1737 | if (count < n * sizeof(u64)) | 1846 | return ret; |
1738 | return -EINVAL; | ||
1739 | count = n * sizeof(u64); | ||
1740 | |||
1741 | if (copy_to_user(buf, values, count)) | ||
1742 | return -EFAULT; | ||
1743 | |||
1744 | return count; | ||
1745 | } | 1847 | } |
1746 | 1848 | ||
1747 | static ssize_t | 1849 | static ssize_t |
@@ -2245,7 +2347,7 @@ static void perf_pending_counter(struct perf_pending_entry *entry) | |||
2245 | 2347 | ||
2246 | if (counter->pending_disable) { | 2348 | if (counter->pending_disable) { |
2247 | counter->pending_disable = 0; | 2349 | counter->pending_disable = 0; |
2248 | perf_counter_disable(counter); | 2350 | __perf_counter_disable(counter); |
2249 | } | 2351 | } |
2250 | 2352 | ||
2251 | if (counter->pending_wakeup) { | 2353 | if (counter->pending_wakeup) { |
@@ -2630,7 +2732,80 @@ static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p) | |||
2630 | return task_pid_nr_ns(p, counter->ns); | 2732 | return task_pid_nr_ns(p, counter->ns); |
2631 | } | 2733 | } |
2632 | 2734 | ||
2633 | static void perf_counter_output(struct perf_counter *counter, int nmi, | 2735 | static void perf_output_read_one(struct perf_output_handle *handle, |
2736 | struct perf_counter *counter) | ||
2737 | { | ||
2738 | u64 read_format = counter->attr.read_format; | ||
2739 | u64 values[4]; | ||
2740 | int n = 0; | ||
2741 | |||
2742 | values[n++] = atomic64_read(&counter->count); | ||
2743 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | ||
2744 | values[n++] = counter->total_time_enabled + | ||
2745 | atomic64_read(&counter->child_total_time_enabled); | ||
2746 | } | ||
2747 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | ||
2748 | values[n++] = counter->total_time_running + | ||
2749 | atomic64_read(&counter->child_total_time_running); | ||
2750 | } | ||
2751 | if (read_format & PERF_FORMAT_ID) | ||
2752 | values[n++] = primary_counter_id(counter); | ||
2753 | |||
2754 | perf_output_copy(handle, values, n * sizeof(u64)); | ||
2755 | } | ||
2756 | |||
2757 | /* | ||
2758 | * XXX PERF_FORMAT_GROUP vs inherited counters seems difficult. | ||
2759 | */ | ||
2760 | static void perf_output_read_group(struct perf_output_handle *handle, | ||
2761 | struct perf_counter *counter) | ||
2762 | { | ||
2763 | struct perf_counter *leader = counter->group_leader, *sub; | ||
2764 | u64 read_format = counter->attr.read_format; | ||
2765 | u64 values[5]; | ||
2766 | int n = 0; | ||
2767 | |||
2768 | values[n++] = 1 + leader->nr_siblings; | ||
2769 | |||
2770 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | ||
2771 | values[n++] = leader->total_time_enabled; | ||
2772 | |||
2773 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | ||
2774 | values[n++] = leader->total_time_running; | ||
2775 | |||
2776 | if (leader != counter) | ||
2777 | leader->pmu->read(leader); | ||
2778 | |||
2779 | values[n++] = atomic64_read(&leader->count); | ||
2780 | if (read_format & PERF_FORMAT_ID) | ||
2781 | values[n++] = primary_counter_id(leader); | ||
2782 | |||
2783 | perf_output_copy(handle, values, n * sizeof(u64)); | ||
2784 | |||
2785 | list_for_each_entry(sub, &leader->sibling_list, list_entry) { | ||
2786 | n = 0; | ||
2787 | |||
2788 | if (sub != counter) | ||
2789 | sub->pmu->read(sub); | ||
2790 | |||
2791 | values[n++] = atomic64_read(&sub->count); | ||
2792 | if (read_format & PERF_FORMAT_ID) | ||
2793 | values[n++] = primary_counter_id(sub); | ||
2794 | |||
2795 | perf_output_copy(handle, values, n * sizeof(u64)); | ||
2796 | } | ||
2797 | } | ||
2798 | |||
2799 | static void perf_output_read(struct perf_output_handle *handle, | ||
2800 | struct perf_counter *counter) | ||
2801 | { | ||
2802 | if (counter->attr.read_format & PERF_FORMAT_GROUP) | ||
2803 | perf_output_read_group(handle, counter); | ||
2804 | else | ||
2805 | perf_output_read_one(handle, counter); | ||
2806 | } | ||
2807 | |||
2808 | void perf_counter_output(struct perf_counter *counter, int nmi, | ||
2634 | struct perf_sample_data *data) | 2809 | struct perf_sample_data *data) |
2635 | { | 2810 | { |
2636 | int ret; | 2811 | int ret; |
@@ -2641,10 +2816,6 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2641 | struct { | 2816 | struct { |
2642 | u32 pid, tid; | 2817 | u32 pid, tid; |
2643 | } tid_entry; | 2818 | } tid_entry; |
2644 | struct { | ||
2645 | u64 id; | ||
2646 | u64 counter; | ||
2647 | } group_entry; | ||
2648 | struct perf_callchain_entry *callchain = NULL; | 2819 | struct perf_callchain_entry *callchain = NULL; |
2649 | int callchain_size = 0; | 2820 | int callchain_size = 0; |
2650 | u64 time; | 2821 | u64 time; |
@@ -2699,10 +2870,8 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2699 | if (sample_type & PERF_SAMPLE_PERIOD) | 2870 | if (sample_type & PERF_SAMPLE_PERIOD) |
2700 | header.size += sizeof(u64); | 2871 | header.size += sizeof(u64); |
2701 | 2872 | ||
2702 | if (sample_type & PERF_SAMPLE_GROUP) { | 2873 | if (sample_type & PERF_SAMPLE_READ) |
2703 | header.size += sizeof(u64) + | 2874 | header.size += perf_counter_read_size(counter); |
2704 | counter->nr_siblings * sizeof(group_entry); | ||
2705 | } | ||
2706 | 2875 | ||
2707 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { | 2876 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { |
2708 | callchain = perf_callchain(data->regs); | 2877 | callchain = perf_callchain(data->regs); |
@@ -2759,26 +2928,8 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2759 | if (sample_type & PERF_SAMPLE_PERIOD) | 2928 | if (sample_type & PERF_SAMPLE_PERIOD) |
2760 | perf_output_put(&handle, data->period); | 2929 | perf_output_put(&handle, data->period); |
2761 | 2930 | ||
2762 | /* | 2931 | if (sample_type & PERF_SAMPLE_READ) |
2763 | * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult. | 2932 | perf_output_read(&handle, counter); |
2764 | */ | ||
2765 | if (sample_type & PERF_SAMPLE_GROUP) { | ||
2766 | struct perf_counter *leader, *sub; | ||
2767 | u64 nr = counter->nr_siblings; | ||
2768 | |||
2769 | perf_output_put(&handle, nr); | ||
2770 | |||
2771 | leader = counter->group_leader; | ||
2772 | list_for_each_entry(sub, &leader->sibling_list, list_entry) { | ||
2773 | if (sub != counter) | ||
2774 | sub->pmu->read(sub); | ||
2775 | |||
2776 | group_entry.id = primary_counter_id(sub); | ||
2777 | group_entry.counter = atomic64_read(&sub->count); | ||
2778 | |||
2779 | perf_output_put(&handle, group_entry); | ||
2780 | } | ||
2781 | } | ||
2782 | 2933 | ||
2783 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { | 2934 | if (sample_type & PERF_SAMPLE_CALLCHAIN) { |
2784 | if (callchain) | 2935 | if (callchain) |
@@ -2817,8 +2968,6 @@ struct perf_read_event { | |||
2817 | 2968 | ||
2818 | u32 pid; | 2969 | u32 pid; |
2819 | u32 tid; | 2970 | u32 tid; |
2820 | u64 value; | ||
2821 | u64 format[3]; | ||
2822 | }; | 2971 | }; |
2823 | 2972 | ||
2824 | static void | 2973 | static void |
@@ -2830,34 +2979,20 @@ perf_counter_read_event(struct perf_counter *counter, | |||
2830 | .header = { | 2979 | .header = { |
2831 | .type = PERF_EVENT_READ, | 2980 | .type = PERF_EVENT_READ, |
2832 | .misc = 0, | 2981 | .misc = 0, |
2833 | .size = sizeof(event) - sizeof(event.format), | 2982 | .size = sizeof(event) + perf_counter_read_size(counter), |
2834 | }, | 2983 | }, |
2835 | .pid = perf_counter_pid(counter, task), | 2984 | .pid = perf_counter_pid(counter, task), |
2836 | .tid = perf_counter_tid(counter, task), | 2985 | .tid = perf_counter_tid(counter, task), |
2837 | .value = atomic64_read(&counter->count), | ||
2838 | }; | 2986 | }; |
2839 | int ret, i = 0; | 2987 | int ret; |
2840 | |||
2841 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | ||
2842 | event.header.size += sizeof(u64); | ||
2843 | event.format[i++] = counter->total_time_enabled; | ||
2844 | } | ||
2845 | |||
2846 | if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | ||
2847 | event.header.size += sizeof(u64); | ||
2848 | event.format[i++] = counter->total_time_running; | ||
2849 | } | ||
2850 | |||
2851 | if (counter->attr.read_format & PERF_FORMAT_ID) { | ||
2852 | event.header.size += sizeof(u64); | ||
2853 | event.format[i++] = primary_counter_id(counter); | ||
2854 | } | ||
2855 | 2988 | ||
2856 | ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); | 2989 | ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); |
2857 | if (ret) | 2990 | if (ret) |
2858 | return; | 2991 | return; |
2859 | 2992 | ||
2860 | perf_output_copy(&handle, &event, event.header.size); | 2993 | perf_output_put(&handle, event); |
2994 | perf_output_read(&handle, counter); | ||
2995 | |||
2861 | perf_output_end(&handle); | 2996 | perf_output_end(&handle); |
2862 | } | 2997 | } |
2863 | 2998 | ||
@@ -2893,10 +3028,10 @@ static void perf_counter_task_output(struct perf_counter *counter, | |||
2893 | return; | 3028 | return; |
2894 | 3029 | ||
2895 | task_event->event.pid = perf_counter_pid(counter, task); | 3030 | task_event->event.pid = perf_counter_pid(counter, task); |
2896 | task_event->event.ppid = perf_counter_pid(counter, task->real_parent); | 3031 | task_event->event.ppid = perf_counter_pid(counter, current); |
2897 | 3032 | ||
2898 | task_event->event.tid = perf_counter_tid(counter, task); | 3033 | task_event->event.tid = perf_counter_tid(counter, task); |
2899 | task_event->event.ptid = perf_counter_tid(counter, task->real_parent); | 3034 | task_event->event.ptid = perf_counter_tid(counter, current); |
2900 | 3035 | ||
2901 | perf_output_put(&handle, task_event->event); | 3036 | perf_output_put(&handle, task_event->event); |
2902 | perf_output_end(&handle); | 3037 | perf_output_end(&handle); |
@@ -3443,40 +3578,32 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr, | |||
3443 | 3578 | ||
3444 | static int perf_swcounter_is_counting(struct perf_counter *counter) | 3579 | static int perf_swcounter_is_counting(struct perf_counter *counter) |
3445 | { | 3580 | { |
3446 | struct perf_counter_context *ctx; | 3581 | /* |
3447 | unsigned long flags; | 3582 | * The counter is active, we're good! |
3448 | int count; | 3583 | */ |
3449 | |||
3450 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) | 3584 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) |
3451 | return 1; | 3585 | return 1; |
3452 | 3586 | ||
3587 | /* | ||
3588 | * The counter is off/error, not counting. | ||
3589 | */ | ||
3453 | if (counter->state != PERF_COUNTER_STATE_INACTIVE) | 3590 | if (counter->state != PERF_COUNTER_STATE_INACTIVE) |
3454 | return 0; | 3591 | return 0; |
3455 | 3592 | ||
3456 | /* | 3593 | /* |
3457 | * If the counter is inactive, it could be just because | 3594 | * The counter is inactive, if the context is active |
3458 | * its task is scheduled out, or because it's in a group | 3595 | * we're part of a group that didn't make it on the 'pmu', |
3459 | * which could not go on the PMU. We want to count in | 3596 | * not counting. |
3460 | * the first case but not the second. If the context is | ||
3461 | * currently active then an inactive software counter must | ||
3462 | * be the second case. If it's not currently active then | ||
3463 | * we need to know whether the counter was active when the | ||
3464 | * context was last active, which we can determine by | ||
3465 | * comparing counter->tstamp_stopped with ctx->time. | ||
3466 | * | ||
3467 | * We are within an RCU read-side critical section, | ||
3468 | * which protects the existence of *ctx. | ||
3469 | */ | 3597 | */ |
3470 | ctx = counter->ctx; | 3598 | if (counter->ctx->is_active) |
3471 | spin_lock_irqsave(&ctx->lock, flags); | 3599 | return 0; |
3472 | count = 1; | 3600 | |
3473 | /* Re-check state now we have the lock */ | 3601 | /* |
3474 | if (counter->state < PERF_COUNTER_STATE_INACTIVE || | 3602 | * We're inactive and the context is too, this means the |
3475 | counter->ctx->is_active || | 3603 | * task is scheduled out, we're counting events that happen |
3476 | counter->tstamp_stopped < ctx->time) | 3604 | * to us, like migration events. |
3477 | count = 0; | 3605 | */ |
3478 | spin_unlock_irqrestore(&ctx->lock, flags); | 3606 | return 1; |
3479 | return count; | ||
3480 | } | 3607 | } |
3481 | 3608 | ||
3482 | static int perf_swcounter_match(struct perf_counter *counter, | 3609 | static int perf_swcounter_match(struct perf_counter *counter, |
@@ -3928,9 +4055,9 @@ perf_counter_alloc(struct perf_counter_attr *attr, | |||
3928 | atomic64_set(&hwc->period_left, hwc->sample_period); | 4055 | atomic64_set(&hwc->period_left, hwc->sample_period); |
3929 | 4056 | ||
3930 | /* | 4057 | /* |
3931 | * we currently do not support PERF_SAMPLE_GROUP on inherited counters | 4058 | * we currently do not support PERF_FORMAT_GROUP on inherited counters |
3932 | */ | 4059 | */ |
3933 | if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP)) | 4060 | if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) |
3934 | goto done; | 4061 | goto done; |
3935 | 4062 | ||
3936 | switch (attr->type) { | 4063 | switch (attr->type) { |
@@ -4592,6 +4719,11 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | |||
4592 | perf_counter_init_cpu(cpu); | 4719 | perf_counter_init_cpu(cpu); |
4593 | break; | 4720 | break; |
4594 | 4721 | ||
4722 | case CPU_ONLINE: | ||
4723 | case CPU_ONLINE_FROZEN: | ||
4724 | hw_perf_counter_setup_online(cpu); | ||
4725 | break; | ||
4726 | |||
4595 | case CPU_DOWN_PREPARE: | 4727 | case CPU_DOWN_PREPARE: |
4596 | case CPU_DOWN_PREPARE_FROZEN: | 4728 | case CPU_DOWN_PREPARE_FROZEN: |
4597 | perf_counter_exit_cpu(cpu); | 4729 | perf_counter_exit_cpu(cpu); |
@@ -4616,6 +4748,8 @@ void __init perf_counter_init(void) | |||
4616 | { | 4748 | { |
4617 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, | 4749 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, |
4618 | (void *)(long)smp_processor_id()); | 4750 | (void *)(long)smp_processor_id()); |
4751 | perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE, | ||
4752 | (void *)(long)smp_processor_id()); | ||
4619 | register_cpu_notifier(&perf_cpu_nb); | 4753 | register_cpu_notifier(&perf_cpu_nb); |
4620 | } | 4754 | } |
4621 | 4755 | ||