aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c353
1 files changed, 251 insertions, 102 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index b0b20a07f394..36f65e2b8b57 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -88,6 +88,7 @@ void __weak hw_perf_disable(void) { barrier(); }
88void __weak hw_perf_enable(void) { barrier(); } 88void __weak hw_perf_enable(void) { barrier(); }
89 89
90void __weak hw_perf_counter_setup(int cpu) { barrier(); } 90void __weak hw_perf_counter_setup(int cpu) { barrier(); }
91void __weak hw_perf_counter_setup_online(int cpu) { barrier(); }
91 92
92int __weak 93int __weak
93hw_perf_group_sched_in(struct perf_counter *group_leader, 94hw_perf_group_sched_in(struct perf_counter *group_leader,
@@ -306,6 +307,10 @@ counter_sched_out(struct perf_counter *counter,
306 return; 307 return;
307 308
308 counter->state = PERF_COUNTER_STATE_INACTIVE; 309 counter->state = PERF_COUNTER_STATE_INACTIVE;
310 if (counter->pending_disable) {
311 counter->pending_disable = 0;
312 counter->state = PERF_COUNTER_STATE_OFF;
313 }
309 counter->tstamp_stopped = ctx->time; 314 counter->tstamp_stopped = ctx->time;
310 counter->pmu->disable(counter); 315 counter->pmu->disable(counter);
311 counter->oncpu = -1; 316 counter->oncpu = -1;
@@ -1498,10 +1503,21 @@ static void perf_counter_enable_on_exec(struct task_struct *task)
1498 */ 1503 */
1499static void __perf_counter_read(void *info) 1504static void __perf_counter_read(void *info)
1500{ 1505{
1506 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1501 struct perf_counter *counter = info; 1507 struct perf_counter *counter = info;
1502 struct perf_counter_context *ctx = counter->ctx; 1508 struct perf_counter_context *ctx = counter->ctx;
1503 unsigned long flags; 1509 unsigned long flags;
1504 1510
1511 /*
1512 * If this is a task context, we need to check whether it is
1513 * the current task context of this cpu. If not it has been
1514 * scheduled out before the smp call arrived. In that case
1515 * counter->count would have been updated to a recent sample
1516 * when the counter was scheduled out.
1517 */
1518 if (ctx->task && cpuctx->task_ctx != ctx)
1519 return;
1520
1505 local_irq_save(flags); 1521 local_irq_save(flags);
1506 if (ctx->is_active) 1522 if (ctx->is_active)
1507 update_context_time(ctx); 1523 update_context_time(ctx);
@@ -1691,7 +1707,32 @@ static int perf_release(struct inode *inode, struct file *file)
1691 return 0; 1707 return 0;
1692} 1708}
1693 1709
1694static u64 perf_counter_read_tree(struct perf_counter *counter) 1710static int perf_counter_read_size(struct perf_counter *counter)
1711{
1712 int entry = sizeof(u64); /* value */
1713 int size = 0;
1714 int nr = 1;
1715
1716 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1717 size += sizeof(u64);
1718
1719 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1720 size += sizeof(u64);
1721
1722 if (counter->attr.read_format & PERF_FORMAT_ID)
1723 entry += sizeof(u64);
1724
1725 if (counter->attr.read_format & PERF_FORMAT_GROUP) {
1726 nr += counter->group_leader->nr_siblings;
1727 size += sizeof(u64);
1728 }
1729
1730 size += entry * nr;
1731
1732 return size;
1733}
1734
1735static u64 perf_counter_read_value(struct perf_counter *counter)
1695{ 1736{
1696 struct perf_counter *child; 1737 struct perf_counter *child;
1697 u64 total = 0; 1738 u64 total = 0;
@@ -1703,14 +1744,96 @@ static u64 perf_counter_read_tree(struct perf_counter *counter)
1703 return total; 1744 return total;
1704} 1745}
1705 1746
1747static int perf_counter_read_entry(struct perf_counter *counter,
1748 u64 read_format, char __user *buf)
1749{
1750 int n = 0, count = 0;
1751 u64 values[2];
1752
1753 values[n++] = perf_counter_read_value(counter);
1754 if (read_format & PERF_FORMAT_ID)
1755 values[n++] = primary_counter_id(counter);
1756
1757 count = n * sizeof(u64);
1758
1759 if (copy_to_user(buf, values, count))
1760 return -EFAULT;
1761
1762 return count;
1763}
1764
1765static int perf_counter_read_group(struct perf_counter *counter,
1766 u64 read_format, char __user *buf)
1767{
1768 struct perf_counter *leader = counter->group_leader, *sub;
1769 int n = 0, size = 0, err = -EFAULT;
1770 u64 values[3];
1771
1772 values[n++] = 1 + leader->nr_siblings;
1773 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1774 values[n++] = leader->total_time_enabled +
1775 atomic64_read(&leader->child_total_time_enabled);
1776 }
1777 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1778 values[n++] = leader->total_time_running +
1779 atomic64_read(&leader->child_total_time_running);
1780 }
1781
1782 size = n * sizeof(u64);
1783
1784 if (copy_to_user(buf, values, size))
1785 return -EFAULT;
1786
1787 err = perf_counter_read_entry(leader, read_format, buf + size);
1788 if (err < 0)
1789 return err;
1790
1791 size += err;
1792
1793 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
1794 err = perf_counter_read_entry(counter, read_format,
1795 buf + size);
1796 if (err < 0)
1797 return err;
1798
1799 size += err;
1800 }
1801
1802 return size;
1803}
1804
1805static int perf_counter_read_one(struct perf_counter *counter,
1806 u64 read_format, char __user *buf)
1807{
1808 u64 values[4];
1809 int n = 0;
1810
1811 values[n++] = perf_counter_read_value(counter);
1812 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1813 values[n++] = counter->total_time_enabled +
1814 atomic64_read(&counter->child_total_time_enabled);
1815 }
1816 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1817 values[n++] = counter->total_time_running +
1818 atomic64_read(&counter->child_total_time_running);
1819 }
1820 if (read_format & PERF_FORMAT_ID)
1821 values[n++] = primary_counter_id(counter);
1822
1823 if (copy_to_user(buf, values, n * sizeof(u64)))
1824 return -EFAULT;
1825
1826 return n * sizeof(u64);
1827}
1828
1706/* 1829/*
1707 * Read the performance counter - simple non blocking version for now 1830 * Read the performance counter - simple non blocking version for now
1708 */ 1831 */
1709static ssize_t 1832static ssize_t
1710perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) 1833perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1711{ 1834{
1712 u64 values[4]; 1835 u64 read_format = counter->attr.read_format;
1713 int n; 1836 int ret;
1714 1837
1715 /* 1838 /*
1716 * Return end-of-file for a read on a counter that is in 1839 * Return end-of-file for a read on a counter that is in
@@ -1720,28 +1843,18 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1720 if (counter->state == PERF_COUNTER_STATE_ERROR) 1843 if (counter->state == PERF_COUNTER_STATE_ERROR)
1721 return 0; 1844 return 0;
1722 1845
1846 if (count < perf_counter_read_size(counter))
1847 return -ENOSPC;
1848
1723 WARN_ON_ONCE(counter->ctx->parent_ctx); 1849 WARN_ON_ONCE(counter->ctx->parent_ctx);
1724 mutex_lock(&counter->child_mutex); 1850 mutex_lock(&counter->child_mutex);
1725 values[0] = perf_counter_read_tree(counter); 1851 if (read_format & PERF_FORMAT_GROUP)
1726 n = 1; 1852 ret = perf_counter_read_group(counter, read_format, buf);
1727 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1853 else
1728 values[n++] = counter->total_time_enabled + 1854 ret = perf_counter_read_one(counter, read_format, buf);
1729 atomic64_read(&counter->child_total_time_enabled);
1730 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1731 values[n++] = counter->total_time_running +
1732 atomic64_read(&counter->child_total_time_running);
1733 if (counter->attr.read_format & PERF_FORMAT_ID)
1734 values[n++] = primary_counter_id(counter);
1735 mutex_unlock(&counter->child_mutex); 1855 mutex_unlock(&counter->child_mutex);
1736 1856
1737 if (count < n * sizeof(u64)) 1857 return ret;
1738 return -EINVAL;
1739 count = n * sizeof(u64);
1740
1741 if (copy_to_user(buf, values, count))
1742 return -EFAULT;
1743
1744 return count;
1745} 1858}
1746 1859
1747static ssize_t 1860static ssize_t
@@ -1906,6 +2019,10 @@ int perf_counter_task_disable(void)
1906 return 0; 2019 return 0;
1907} 2020}
1908 2021
2022#ifndef PERF_COUNTER_INDEX_OFFSET
2023# define PERF_COUNTER_INDEX_OFFSET 0
2024#endif
2025
1909static int perf_counter_index(struct perf_counter *counter) 2026static int perf_counter_index(struct perf_counter *counter)
1910{ 2027{
1911 if (counter->state != PERF_COUNTER_STATE_ACTIVE) 2028 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
@@ -2245,7 +2362,7 @@ static void perf_pending_counter(struct perf_pending_entry *entry)
2245 2362
2246 if (counter->pending_disable) { 2363 if (counter->pending_disable) {
2247 counter->pending_disable = 0; 2364 counter->pending_disable = 0;
2248 perf_counter_disable(counter); 2365 __perf_counter_disable(counter);
2249 } 2366 }
2250 2367
2251 if (counter->pending_wakeup) { 2368 if (counter->pending_wakeup) {
@@ -2630,7 +2747,80 @@ static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
2630 return task_pid_nr_ns(p, counter->ns); 2747 return task_pid_nr_ns(p, counter->ns);
2631} 2748}
2632 2749
2633static void perf_counter_output(struct perf_counter *counter, int nmi, 2750static void perf_output_read_one(struct perf_output_handle *handle,
2751 struct perf_counter *counter)
2752{
2753 u64 read_format = counter->attr.read_format;
2754 u64 values[4];
2755 int n = 0;
2756
2757 values[n++] = atomic64_read(&counter->count);
2758 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2759 values[n++] = counter->total_time_enabled +
2760 atomic64_read(&counter->child_total_time_enabled);
2761 }
2762 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2763 values[n++] = counter->total_time_running +
2764 atomic64_read(&counter->child_total_time_running);
2765 }
2766 if (read_format & PERF_FORMAT_ID)
2767 values[n++] = primary_counter_id(counter);
2768
2769 perf_output_copy(handle, values, n * sizeof(u64));
2770}
2771
2772/*
2773 * XXX PERF_FORMAT_GROUP vs inherited counters seems difficult.
2774 */
2775static void perf_output_read_group(struct perf_output_handle *handle,
2776 struct perf_counter *counter)
2777{
2778 struct perf_counter *leader = counter->group_leader, *sub;
2779 u64 read_format = counter->attr.read_format;
2780 u64 values[5];
2781 int n = 0;
2782
2783 values[n++] = 1 + leader->nr_siblings;
2784
2785 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2786 values[n++] = leader->total_time_enabled;
2787
2788 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2789 values[n++] = leader->total_time_running;
2790
2791 if (leader != counter)
2792 leader->pmu->read(leader);
2793
2794 values[n++] = atomic64_read(&leader->count);
2795 if (read_format & PERF_FORMAT_ID)
2796 values[n++] = primary_counter_id(leader);
2797
2798 perf_output_copy(handle, values, n * sizeof(u64));
2799
2800 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2801 n = 0;
2802
2803 if (sub != counter)
2804 sub->pmu->read(sub);
2805
2806 values[n++] = atomic64_read(&sub->count);
2807 if (read_format & PERF_FORMAT_ID)
2808 values[n++] = primary_counter_id(sub);
2809
2810 perf_output_copy(handle, values, n * sizeof(u64));
2811 }
2812}
2813
2814static void perf_output_read(struct perf_output_handle *handle,
2815 struct perf_counter *counter)
2816{
2817 if (counter->attr.read_format & PERF_FORMAT_GROUP)
2818 perf_output_read_group(handle, counter);
2819 else
2820 perf_output_read_one(handle, counter);
2821}
2822
2823void perf_counter_output(struct perf_counter *counter, int nmi,
2634 struct perf_sample_data *data) 2824 struct perf_sample_data *data)
2635{ 2825{
2636 int ret; 2826 int ret;
@@ -2641,10 +2831,6 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2641 struct { 2831 struct {
2642 u32 pid, tid; 2832 u32 pid, tid;
2643 } tid_entry; 2833 } tid_entry;
2644 struct {
2645 u64 id;
2646 u64 counter;
2647 } group_entry;
2648 struct perf_callchain_entry *callchain = NULL; 2834 struct perf_callchain_entry *callchain = NULL;
2649 int callchain_size = 0; 2835 int callchain_size = 0;
2650 u64 time; 2836 u64 time;
@@ -2699,10 +2885,8 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2699 if (sample_type & PERF_SAMPLE_PERIOD) 2885 if (sample_type & PERF_SAMPLE_PERIOD)
2700 header.size += sizeof(u64); 2886 header.size += sizeof(u64);
2701 2887
2702 if (sample_type & PERF_SAMPLE_GROUP) { 2888 if (sample_type & PERF_SAMPLE_READ)
2703 header.size += sizeof(u64) + 2889 header.size += perf_counter_read_size(counter);
2704 counter->nr_siblings * sizeof(group_entry);
2705 }
2706 2890
2707 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 2891 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2708 callchain = perf_callchain(data->regs); 2892 callchain = perf_callchain(data->regs);
@@ -2759,26 +2943,8 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2759 if (sample_type & PERF_SAMPLE_PERIOD) 2943 if (sample_type & PERF_SAMPLE_PERIOD)
2760 perf_output_put(&handle, data->period); 2944 perf_output_put(&handle, data->period);
2761 2945
2762 /* 2946 if (sample_type & PERF_SAMPLE_READ)
2763 * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult. 2947 perf_output_read(&handle, counter);
2764 */
2765 if (sample_type & PERF_SAMPLE_GROUP) {
2766 struct perf_counter *leader, *sub;
2767 u64 nr = counter->nr_siblings;
2768
2769 perf_output_put(&handle, nr);
2770
2771 leader = counter->group_leader;
2772 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2773 if (sub != counter)
2774 sub->pmu->read(sub);
2775
2776 group_entry.id = primary_counter_id(sub);
2777 group_entry.counter = atomic64_read(&sub->count);
2778
2779 perf_output_put(&handle, group_entry);
2780 }
2781 }
2782 2948
2783 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 2949 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2784 if (callchain) 2950 if (callchain)
@@ -2817,8 +2983,6 @@ struct perf_read_event {
2817 2983
2818 u32 pid; 2984 u32 pid;
2819 u32 tid; 2985 u32 tid;
2820 u64 value;
2821 u64 format[3];
2822}; 2986};
2823 2987
2824static void 2988static void
@@ -2830,34 +2994,20 @@ perf_counter_read_event(struct perf_counter *counter,
2830 .header = { 2994 .header = {
2831 .type = PERF_EVENT_READ, 2995 .type = PERF_EVENT_READ,
2832 .misc = 0, 2996 .misc = 0,
2833 .size = sizeof(event) - sizeof(event.format), 2997 .size = sizeof(event) + perf_counter_read_size(counter),
2834 }, 2998 },
2835 .pid = perf_counter_pid(counter, task), 2999 .pid = perf_counter_pid(counter, task),
2836 .tid = perf_counter_tid(counter, task), 3000 .tid = perf_counter_tid(counter, task),
2837 .value = atomic64_read(&counter->count),
2838 }; 3001 };
2839 int ret, i = 0; 3002 int ret;
2840
2841 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2842 event.header.size += sizeof(u64);
2843 event.format[i++] = counter->total_time_enabled;
2844 }
2845
2846 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2847 event.header.size += sizeof(u64);
2848 event.format[i++] = counter->total_time_running;
2849 }
2850
2851 if (counter->attr.read_format & PERF_FORMAT_ID) {
2852 event.header.size += sizeof(u64);
2853 event.format[i++] = primary_counter_id(counter);
2854 }
2855 3003
2856 ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); 3004 ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
2857 if (ret) 3005 if (ret)
2858 return; 3006 return;
2859 3007
2860 perf_output_copy(&handle, &event, event.header.size); 3008 perf_output_put(&handle, event);
3009 perf_output_read(&handle, counter);
3010
2861 perf_output_end(&handle); 3011 perf_output_end(&handle);
2862} 3012}
2863 3013
@@ -2893,10 +3043,10 @@ static void perf_counter_task_output(struct perf_counter *counter,
2893 return; 3043 return;
2894 3044
2895 task_event->event.pid = perf_counter_pid(counter, task); 3045 task_event->event.pid = perf_counter_pid(counter, task);
2896 task_event->event.ppid = perf_counter_pid(counter, task->real_parent); 3046 task_event->event.ppid = perf_counter_pid(counter, current);
2897 3047
2898 task_event->event.tid = perf_counter_tid(counter, task); 3048 task_event->event.tid = perf_counter_tid(counter, task);
2899 task_event->event.ptid = perf_counter_tid(counter, task->real_parent); 3049 task_event->event.ptid = perf_counter_tid(counter, current);
2900 3050
2901 perf_output_put(&handle, task_event->event); 3051 perf_output_put(&handle, task_event->event);
2902 perf_output_end(&handle); 3052 perf_output_end(&handle);
@@ -3443,40 +3593,32 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3443 3593
3444static int perf_swcounter_is_counting(struct perf_counter *counter) 3594static int perf_swcounter_is_counting(struct perf_counter *counter)
3445{ 3595{
3446 struct perf_counter_context *ctx; 3596 /*
3447 unsigned long flags; 3597 * The counter is active, we're good!
3448 int count; 3598 */
3449
3450 if (counter->state == PERF_COUNTER_STATE_ACTIVE) 3599 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
3451 return 1; 3600 return 1;
3452 3601
3602 /*
3603 * The counter is off/error, not counting.
3604 */
3453 if (counter->state != PERF_COUNTER_STATE_INACTIVE) 3605 if (counter->state != PERF_COUNTER_STATE_INACTIVE)
3454 return 0; 3606 return 0;
3455 3607
3456 /* 3608 /*
3457 * If the counter is inactive, it could be just because 3609 * The counter is inactive, if the context is active
3458 * its task is scheduled out, or because it's in a group 3610 * we're part of a group that didn't make it on the 'pmu',
3459 * which could not go on the PMU. We want to count in 3611 * not counting.
3460 * the first case but not the second. If the context is
3461 * currently active then an inactive software counter must
3462 * be the second case. If it's not currently active then
3463 * we need to know whether the counter was active when the
3464 * context was last active, which we can determine by
3465 * comparing counter->tstamp_stopped with ctx->time.
3466 *
3467 * We are within an RCU read-side critical section,
3468 * which protects the existence of *ctx.
3469 */ 3612 */
3470 ctx = counter->ctx; 3613 if (counter->ctx->is_active)
3471 spin_lock_irqsave(&ctx->lock, flags); 3614 return 0;
3472 count = 1; 3615
3473 /* Re-check state now we have the lock */ 3616 /*
3474 if (counter->state < PERF_COUNTER_STATE_INACTIVE || 3617 * We're inactive and the context is too, this means the
3475 counter->ctx->is_active || 3618 * task is scheduled out, we're counting events that happen
3476 counter->tstamp_stopped < ctx->time) 3619 * to us, like migration events.
3477 count = 0; 3620 */
3478 spin_unlock_irqrestore(&ctx->lock, flags); 3621 return 1;
3479 return count;
3480} 3622}
3481 3623
3482static int perf_swcounter_match(struct perf_counter *counter, 3624static int perf_swcounter_match(struct perf_counter *counter,
@@ -3928,9 +4070,9 @@ perf_counter_alloc(struct perf_counter_attr *attr,
3928 atomic64_set(&hwc->period_left, hwc->sample_period); 4070 atomic64_set(&hwc->period_left, hwc->sample_period);
3929 4071
3930 /* 4072 /*
3931 * we currently do not support PERF_SAMPLE_GROUP on inherited counters 4073 * we currently do not support PERF_FORMAT_GROUP on inherited counters
3932 */ 4074 */
3933 if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP)) 4075 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
3934 goto done; 4076 goto done;
3935 4077
3936 switch (attr->type) { 4078 switch (attr->type) {
@@ -4592,6 +4734,11 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
4592 perf_counter_init_cpu(cpu); 4734 perf_counter_init_cpu(cpu);
4593 break; 4735 break;
4594 4736
4737 case CPU_ONLINE:
4738 case CPU_ONLINE_FROZEN:
4739 hw_perf_counter_setup_online(cpu);
4740 break;
4741
4595 case CPU_DOWN_PREPARE: 4742 case CPU_DOWN_PREPARE:
4596 case CPU_DOWN_PREPARE_FROZEN: 4743 case CPU_DOWN_PREPARE_FROZEN:
4597 perf_counter_exit_cpu(cpu); 4744 perf_counter_exit_cpu(cpu);
@@ -4616,6 +4763,8 @@ void __init perf_counter_init(void)
4616{ 4763{
4617 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, 4764 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
4618 (void *)(long)smp_processor_id()); 4765 (void *)(long)smp_processor_id());
4766 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
4767 (void *)(long)smp_processor_id());
4619 register_cpu_notifier(&perf_cpu_nb); 4768 register_cpu_notifier(&perf_cpu_nb);
4620} 4769}
4621 4770