aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c366
1 files changed, 259 insertions, 107 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index bf8110b35c51..534e20d14d63 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -307,6 +307,10 @@ counter_sched_out(struct perf_counter *counter,
307 return; 307 return;
308 308
309 counter->state = PERF_COUNTER_STATE_INACTIVE; 309 counter->state = PERF_COUNTER_STATE_INACTIVE;
310 if (counter->pending_disable) {
311 counter->pending_disable = 0;
312 counter->state = PERF_COUNTER_STATE_OFF;
313 }
310 counter->tstamp_stopped = ctx->time; 314 counter->tstamp_stopped = ctx->time;
311 counter->pmu->disable(counter); 315 counter->pmu->disable(counter);
312 counter->oncpu = -1; 316 counter->oncpu = -1;
@@ -1692,7 +1696,32 @@ static int perf_release(struct inode *inode, struct file *file)
1692 return 0; 1696 return 0;
1693} 1697}
1694 1698
1695static u64 perf_counter_read_tree(struct perf_counter *counter) 1699static int perf_counter_read_size(struct perf_counter *counter)
1700{
1701 int entry = sizeof(u64); /* value */
1702 int size = 0;
1703 int nr = 1;
1704
1705 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1706 size += sizeof(u64);
1707
1708 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1709 size += sizeof(u64);
1710
1711 if (counter->attr.read_format & PERF_FORMAT_ID)
1712 entry += sizeof(u64);
1713
1714 if (counter->attr.read_format & PERF_FORMAT_GROUP) {
1715 nr += counter->group_leader->nr_siblings;
1716 size += sizeof(u64);
1717 }
1718
1719 size += entry * nr;
1720
1721 return size;
1722}
1723
1724static u64 perf_counter_read_value(struct perf_counter *counter)
1696{ 1725{
1697 struct perf_counter *child; 1726 struct perf_counter *child;
1698 u64 total = 0; 1727 u64 total = 0;
@@ -1704,14 +1733,96 @@ static u64 perf_counter_read_tree(struct perf_counter *counter)
1704 return total; 1733 return total;
1705} 1734}
1706 1735
1736static int perf_counter_read_entry(struct perf_counter *counter,
1737 u64 read_format, char __user *buf)
1738{
1739 int n = 0, count = 0;
1740 u64 values[2];
1741
1742 values[n++] = perf_counter_read_value(counter);
1743 if (read_format & PERF_FORMAT_ID)
1744 values[n++] = primary_counter_id(counter);
1745
1746 count = n * sizeof(u64);
1747
1748 if (copy_to_user(buf, values, count))
1749 return -EFAULT;
1750
1751 return count;
1752}
1753
1754static int perf_counter_read_group(struct perf_counter *counter,
1755 u64 read_format, char __user *buf)
1756{
1757 struct perf_counter *leader = counter->group_leader, *sub;
1758 int n = 0, size = 0, err = -EFAULT;
1759 u64 values[3];
1760
1761 values[n++] = 1 + leader->nr_siblings;
1762 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1763 values[n++] = leader->total_time_enabled +
1764 atomic64_read(&leader->child_total_time_enabled);
1765 }
1766 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1767 values[n++] = leader->total_time_running +
1768 atomic64_read(&leader->child_total_time_running);
1769 }
1770
1771 size = n * sizeof(u64);
1772
1773 if (copy_to_user(buf, values, size))
1774 return -EFAULT;
1775
1776 err = perf_counter_read_entry(leader, read_format, buf + size);
1777 if (err < 0)
1778 return err;
1779
1780 size += err;
1781
1782 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
1783 err = perf_counter_read_entry(counter, read_format,
1784 buf + size);
1785 if (err < 0)
1786 return err;
1787
1788 size += err;
1789 }
1790
1791 return size;
1792}
1793
1794static int perf_counter_read_one(struct perf_counter *counter,
1795 u64 read_format, char __user *buf)
1796{
1797 u64 values[4];
1798 int n = 0;
1799
1800 values[n++] = perf_counter_read_value(counter);
1801 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1802 values[n++] = counter->total_time_enabled +
1803 atomic64_read(&counter->child_total_time_enabled);
1804 }
1805 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1806 values[n++] = counter->total_time_running +
1807 atomic64_read(&counter->child_total_time_running);
1808 }
1809 if (read_format & PERF_FORMAT_ID)
1810 values[n++] = primary_counter_id(counter);
1811
1812 if (copy_to_user(buf, values, n * sizeof(u64)))
1813 return -EFAULT;
1814
1815 return n * sizeof(u64);
1816}
1817
1707/* 1818/*
1708 * Read the performance counter - simple non blocking version for now 1819 * Read the performance counter - simple non blocking version for now
1709 */ 1820 */
1710static ssize_t 1821static ssize_t
1711perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) 1822perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1712{ 1823{
1713 u64 values[4]; 1824 u64 read_format = counter->attr.read_format;
1714 int n; 1825 int ret;
1715 1826
1716 /* 1827 /*
1717 * Return end-of-file for a read on a counter that is in 1828 * Return end-of-file for a read on a counter that is in
@@ -1721,28 +1832,18 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1721 if (counter->state == PERF_COUNTER_STATE_ERROR) 1832 if (counter->state == PERF_COUNTER_STATE_ERROR)
1722 return 0; 1833 return 0;
1723 1834
1835 if (count < perf_counter_read_size(counter))
1836 return -ENOSPC;
1837
1724 WARN_ON_ONCE(counter->ctx->parent_ctx); 1838 WARN_ON_ONCE(counter->ctx->parent_ctx);
1725 mutex_lock(&counter->child_mutex); 1839 mutex_lock(&counter->child_mutex);
1726 values[0] = perf_counter_read_tree(counter); 1840 if (read_format & PERF_FORMAT_GROUP)
1727 n = 1; 1841 ret = perf_counter_read_group(counter, read_format, buf);
1728 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1842 else
1729 values[n++] = counter->total_time_enabled + 1843 ret = perf_counter_read_one(counter, read_format, buf);
1730 atomic64_read(&counter->child_total_time_enabled);
1731 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1732 values[n++] = counter->total_time_running +
1733 atomic64_read(&counter->child_total_time_running);
1734 if (counter->attr.read_format & PERF_FORMAT_ID)
1735 values[n++] = primary_counter_id(counter);
1736 mutex_unlock(&counter->child_mutex); 1844 mutex_unlock(&counter->child_mutex);
1737 1845
1738 if (count < n * sizeof(u64)) 1846 return ret;
1739 return -EINVAL;
1740 count = n * sizeof(u64);
1741
1742 if (copy_to_user(buf, values, count))
1743 return -EFAULT;
1744
1745 return count;
1746} 1847}
1747 1848
1748static ssize_t 1849static ssize_t
@@ -2246,7 +2347,7 @@ static void perf_pending_counter(struct perf_pending_entry *entry)
2246 2347
2247 if (counter->pending_disable) { 2348 if (counter->pending_disable) {
2248 counter->pending_disable = 0; 2349 counter->pending_disable = 0;
2249 perf_counter_disable(counter); 2350 __perf_counter_disable(counter);
2250 } 2351 }
2251 2352
2252 if (counter->pending_wakeup) { 2353 if (counter->pending_wakeup) {
@@ -2631,6 +2732,79 @@ static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
2631 return task_pid_nr_ns(p, counter->ns); 2732 return task_pid_nr_ns(p, counter->ns);
2632} 2733}
2633 2734
2735static void perf_output_read_one(struct perf_output_handle *handle,
2736 struct perf_counter *counter)
2737{
2738 u64 read_format = counter->attr.read_format;
2739 u64 values[4];
2740 int n = 0;
2741
2742 values[n++] = atomic64_read(&counter->count);
2743 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2744 values[n++] = counter->total_time_enabled +
2745 atomic64_read(&counter->child_total_time_enabled);
2746 }
2747 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2748 values[n++] = counter->total_time_running +
2749 atomic64_read(&counter->child_total_time_running);
2750 }
2751 if (read_format & PERF_FORMAT_ID)
2752 values[n++] = primary_counter_id(counter);
2753
2754 perf_output_copy(handle, values, n * sizeof(u64));
2755}
2756
2757/*
2758 * XXX PERF_FORMAT_GROUP vs inherited counters seems difficult.
2759 */
2760static void perf_output_read_group(struct perf_output_handle *handle,
2761 struct perf_counter *counter)
2762{
2763 struct perf_counter *leader = counter->group_leader, *sub;
2764 u64 read_format = counter->attr.read_format;
2765 u64 values[5];
2766 int n = 0;
2767
2768 values[n++] = 1 + leader->nr_siblings;
2769
2770 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2771 values[n++] = leader->total_time_enabled;
2772
2773 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2774 values[n++] = leader->total_time_running;
2775
2776 if (leader != counter)
2777 leader->pmu->read(leader);
2778
2779 values[n++] = atomic64_read(&leader->count);
2780 if (read_format & PERF_FORMAT_ID)
2781 values[n++] = primary_counter_id(leader);
2782
2783 perf_output_copy(handle, values, n * sizeof(u64));
2784
2785 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2786 n = 0;
2787
2788 if (sub != counter)
2789 sub->pmu->read(sub);
2790
2791 values[n++] = atomic64_read(&sub->count);
2792 if (read_format & PERF_FORMAT_ID)
2793 values[n++] = primary_counter_id(sub);
2794
2795 perf_output_copy(handle, values, n * sizeof(u64));
2796 }
2797}
2798
2799static void perf_output_read(struct perf_output_handle *handle,
2800 struct perf_counter *counter)
2801{
2802 if (counter->attr.read_format & PERF_FORMAT_GROUP)
2803 perf_output_read_group(handle, counter);
2804 else
2805 perf_output_read_one(handle, counter);
2806}
2807
2634void perf_counter_output(struct perf_counter *counter, int nmi, 2808void perf_counter_output(struct perf_counter *counter, int nmi,
2635 struct perf_sample_data *data) 2809 struct perf_sample_data *data)
2636{ 2810{
@@ -2642,12 +2816,7 @@ void perf_counter_output(struct perf_counter *counter, int nmi,
2642 struct { 2816 struct {
2643 u32 pid, tid; 2817 u32 pid, tid;
2644 } tid_entry; 2818 } tid_entry;
2645 struct {
2646 u64 id;
2647 u64 counter;
2648 } group_entry;
2649 struct perf_callchain_entry *callchain = NULL; 2819 struct perf_callchain_entry *callchain = NULL;
2650 struct perf_raw_record *raw = NULL;
2651 int callchain_size = 0; 2820 int callchain_size = 0;
2652 u64 time; 2821 u64 time;
2653 struct { 2822 struct {
@@ -2701,10 +2870,8 @@ void perf_counter_output(struct perf_counter *counter, int nmi,
2701 if (sample_type & PERF_SAMPLE_PERIOD) 2870 if (sample_type & PERF_SAMPLE_PERIOD)
2702 header.size += sizeof(u64); 2871 header.size += sizeof(u64);
2703 2872
2704 if (sample_type & PERF_SAMPLE_GROUP) { 2873 if (sample_type & PERF_SAMPLE_READ)
2705 header.size += sizeof(u64) + 2874 header.size += perf_counter_read_size(counter);
2706 counter->nr_siblings * sizeof(group_entry);
2707 }
2708 2875
2709 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 2876 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2710 callchain = perf_callchain(data->regs); 2877 callchain = perf_callchain(data->regs);
@@ -2717,9 +2884,15 @@ void perf_counter_output(struct perf_counter *counter, int nmi,
2717 } 2884 }
2718 2885
2719 if (sample_type & PERF_SAMPLE_RAW) { 2886 if (sample_type & PERF_SAMPLE_RAW) {
2720 raw = data->raw; 2887 int size = sizeof(u32);
2721 if (raw) 2888
2722 header.size += raw->size; 2889 if (data->raw)
2890 size += data->raw->size;
2891 else
2892 size += sizeof(u32);
2893
2894 WARN_ON_ONCE(size & (sizeof(u64)-1));
2895 header.size += size;
2723 } 2896 }
2724 2897
2725 ret = perf_output_begin(&handle, counter, header.size, nmi, 1); 2898 ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
@@ -2755,26 +2928,8 @@ void perf_counter_output(struct perf_counter *counter, int nmi,
2755 if (sample_type & PERF_SAMPLE_PERIOD) 2928 if (sample_type & PERF_SAMPLE_PERIOD)
2756 perf_output_put(&handle, data->period); 2929 perf_output_put(&handle, data->period);
2757 2930
2758 /* 2931 if (sample_type & PERF_SAMPLE_READ)
2759 * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult. 2932 perf_output_read(&handle, counter);
2760 */
2761 if (sample_type & PERF_SAMPLE_GROUP) {
2762 struct perf_counter *leader, *sub;
2763 u64 nr = counter->nr_siblings;
2764
2765 perf_output_put(&handle, nr);
2766
2767 leader = counter->group_leader;
2768 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2769 if (sub != counter)
2770 sub->pmu->read(sub);
2771
2772 group_entry.id = primary_counter_id(sub);
2773 group_entry.counter = atomic64_read(&sub->count);
2774
2775 perf_output_put(&handle, group_entry);
2776 }
2777 }
2778 2933
2779 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 2934 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2780 if (callchain) 2935 if (callchain)
@@ -2785,8 +2940,21 @@ void perf_counter_output(struct perf_counter *counter, int nmi,
2785 } 2940 }
2786 } 2941 }
2787 2942
2788 if ((sample_type & PERF_SAMPLE_RAW) && raw) 2943 if (sample_type & PERF_SAMPLE_RAW) {
2789 perf_output_copy(&handle, raw->data, raw->size); 2944 if (data->raw) {
2945 perf_output_put(&handle, data->raw->size);
2946 perf_output_copy(&handle, data->raw->data, data->raw->size);
2947 } else {
2948 struct {
2949 u32 size;
2950 u32 data;
2951 } raw = {
2952 .size = sizeof(u32),
2953 .data = 0,
2954 };
2955 perf_output_put(&handle, raw);
2956 }
2957 }
2790 2958
2791 perf_output_end(&handle); 2959 perf_output_end(&handle);
2792} 2960}
@@ -2800,8 +2968,6 @@ struct perf_read_event {
2800 2968
2801 u32 pid; 2969 u32 pid;
2802 u32 tid; 2970 u32 tid;
2803 u64 value;
2804 u64 format[3];
2805}; 2971};
2806 2972
2807static void 2973static void
@@ -2813,34 +2979,20 @@ perf_counter_read_event(struct perf_counter *counter,
2813 .header = { 2979 .header = {
2814 .type = PERF_EVENT_READ, 2980 .type = PERF_EVENT_READ,
2815 .misc = 0, 2981 .misc = 0,
2816 .size = sizeof(event) - sizeof(event.format), 2982 .size = sizeof(event) + perf_counter_read_size(counter),
2817 }, 2983 },
2818 .pid = perf_counter_pid(counter, task), 2984 .pid = perf_counter_pid(counter, task),
2819 .tid = perf_counter_tid(counter, task), 2985 .tid = perf_counter_tid(counter, task),
2820 .value = atomic64_read(&counter->count),
2821 }; 2986 };
2822 int ret, i = 0; 2987 int ret;
2823
2824 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2825 event.header.size += sizeof(u64);
2826 event.format[i++] = counter->total_time_enabled;
2827 }
2828
2829 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2830 event.header.size += sizeof(u64);
2831 event.format[i++] = counter->total_time_running;
2832 }
2833
2834 if (counter->attr.read_format & PERF_FORMAT_ID) {
2835 event.header.size += sizeof(u64);
2836 event.format[i++] = primary_counter_id(counter);
2837 }
2838 2988
2839 ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); 2989 ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
2840 if (ret) 2990 if (ret)
2841 return; 2991 return;
2842 2992
2843 perf_output_copy(&handle, &event, event.header.size); 2993 perf_output_put(&handle, event);
2994 perf_output_read(&handle, counter);
2995
2844 perf_output_end(&handle); 2996 perf_output_end(&handle);
2845} 2997}
2846 2998
@@ -2876,10 +3028,10 @@ static void perf_counter_task_output(struct perf_counter *counter,
2876 return; 3028 return;
2877 3029
2878 task_event->event.pid = perf_counter_pid(counter, task); 3030 task_event->event.pid = perf_counter_pid(counter, task);
2879 task_event->event.ppid = perf_counter_pid(counter, task->real_parent); 3031 task_event->event.ppid = perf_counter_pid(counter, current);
2880 3032
2881 task_event->event.tid = perf_counter_tid(counter, task); 3033 task_event->event.tid = perf_counter_tid(counter, task);
2882 task_event->event.ptid = perf_counter_tid(counter, task->real_parent); 3034 task_event->event.ptid = perf_counter_tid(counter, current);
2883 3035
2884 perf_output_put(&handle, task_event->event); 3036 perf_output_put(&handle, task_event->event);
2885 perf_output_end(&handle); 3037 perf_output_end(&handle);
@@ -3426,40 +3578,32 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3426 3578
3427static int perf_swcounter_is_counting(struct perf_counter *counter) 3579static int perf_swcounter_is_counting(struct perf_counter *counter)
3428{ 3580{
3429 struct perf_counter_context *ctx; 3581 /*
3430 unsigned long flags; 3582 * The counter is active, we're good!
3431 int count; 3583 */
3432
3433 if (counter->state == PERF_COUNTER_STATE_ACTIVE) 3584 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
3434 return 1; 3585 return 1;
3435 3586
3587 /*
3588 * The counter is off/error, not counting.
3589 */
3436 if (counter->state != PERF_COUNTER_STATE_INACTIVE) 3590 if (counter->state != PERF_COUNTER_STATE_INACTIVE)
3437 return 0; 3591 return 0;
3438 3592
3439 /* 3593 /*
3440 * If the counter is inactive, it could be just because 3594 * The counter is inactive, if the context is active
3441 * its task is scheduled out, or because it's in a group 3595 * we're part of a group that didn't make it on the 'pmu',
3442 * which could not go on the PMU. We want to count in 3596 * not counting.
3443 * the first case but not the second. If the context is
3444 * currently active then an inactive software counter must
3445 * be the second case. If it's not currently active then
3446 * we need to know whether the counter was active when the
3447 * context was last active, which we can determine by
3448 * comparing counter->tstamp_stopped with ctx->time.
3449 *
3450 * We are within an RCU read-side critical section,
3451 * which protects the existence of *ctx.
3452 */ 3597 */
3453 ctx = counter->ctx; 3598 if (counter->ctx->is_active)
3454 spin_lock_irqsave(&ctx->lock, flags); 3599 return 0;
3455 count = 1; 3600
3456 /* Re-check state now we have the lock */ 3601 /*
3457 if (counter->state < PERF_COUNTER_STATE_INACTIVE || 3602 * We're inactive and the context is too, this means the
3458 counter->ctx->is_active || 3603 * task is scheduled out, we're counting events that happen
3459 counter->tstamp_stopped < ctx->time) 3604 * to us, like migration events.
3460 count = 0; 3605 */
3461 spin_unlock_irqrestore(&ctx->lock, flags); 3606 return 1;
3462 return count;
3463} 3607}
3464 3608
3465static int perf_swcounter_match(struct perf_counter *counter, 3609static int perf_swcounter_match(struct perf_counter *counter,
@@ -3770,6 +3914,14 @@ static void tp_perf_counter_destroy(struct perf_counter *counter)
3770 3914
3771static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) 3915static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3772{ 3916{
3917 /*
3918 * Raw tracepoint data is a severe data leak, only allow root to
3919 * have these.
3920 */
3921 if ((counter->attr.sample_type & PERF_SAMPLE_RAW) &&
3922 !capable(CAP_SYS_ADMIN))
3923 return ERR_PTR(-EPERM);
3924
3773 if (ftrace_profile_enable(counter->attr.config)) 3925 if (ftrace_profile_enable(counter->attr.config))
3774 return NULL; 3926 return NULL;
3775 3927
@@ -3903,9 +4055,9 @@ perf_counter_alloc(struct perf_counter_attr *attr,
3903 atomic64_set(&hwc->period_left, hwc->sample_period); 4055 atomic64_set(&hwc->period_left, hwc->sample_period);
3904 4056
3905 /* 4057 /*
3906 * we currently do not support PERF_SAMPLE_GROUP on inherited counters 4058 * we currently do not support PERF_FORMAT_GROUP on inherited counters
3907 */ 4059 */
3908 if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP)) 4060 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
3909 goto done; 4061 goto done;
3910 4062
3911 switch (attr->type) { 4063 switch (attr->type) {