aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c677
1 files changed, 467 insertions, 210 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 950931041954..f274e1959885 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -42,6 +42,7 @@ static int perf_overcommit __read_mostly = 1;
42static atomic_t nr_counters __read_mostly; 42static atomic_t nr_counters __read_mostly;
43static atomic_t nr_mmap_counters __read_mostly; 43static atomic_t nr_mmap_counters __read_mostly;
44static atomic_t nr_comm_counters __read_mostly; 44static atomic_t nr_comm_counters __read_mostly;
45static atomic_t nr_task_counters __read_mostly;
45 46
46/* 47/*
47 * perf counter paranoia level: 48 * perf counter paranoia level:
@@ -87,6 +88,7 @@ void __weak hw_perf_disable(void) { barrier(); }
87void __weak hw_perf_enable(void) { barrier(); } 88void __weak hw_perf_enable(void) { barrier(); }
88 89
89void __weak hw_perf_counter_setup(int cpu) { barrier(); } 90void __weak hw_perf_counter_setup(int cpu) { barrier(); }
91void __weak hw_perf_counter_setup_online(int cpu) { barrier(); }
90 92
91int __weak 93int __weak
92hw_perf_group_sched_in(struct perf_counter *group_leader, 94hw_perf_group_sched_in(struct perf_counter *group_leader,
@@ -305,6 +307,10 @@ counter_sched_out(struct perf_counter *counter,
305 return; 307 return;
306 308
307 counter->state = PERF_COUNTER_STATE_INACTIVE; 309 counter->state = PERF_COUNTER_STATE_INACTIVE;
310 if (counter->pending_disable) {
311 counter->pending_disable = 0;
312 counter->state = PERF_COUNTER_STATE_OFF;
313 }
308 counter->tstamp_stopped = ctx->time; 314 counter->tstamp_stopped = ctx->time;
309 counter->pmu->disable(counter); 315 counter->pmu->disable(counter);
310 counter->oncpu = -1; 316 counter->oncpu = -1;
@@ -1103,7 +1109,7 @@ static void perf_counter_sync_stat(struct perf_counter_context *ctx,
1103 __perf_counter_sync_stat(counter, next_counter); 1109 __perf_counter_sync_stat(counter, next_counter);
1104 1110
1105 counter = list_next_entry(counter, event_entry); 1111 counter = list_next_entry(counter, event_entry);
1106 next_counter = list_next_entry(counter, event_entry); 1112 next_counter = list_next_entry(next_counter, event_entry);
1107 } 1113 }
1108} 1114}
1109 1115
@@ -1497,10 +1503,21 @@ static void perf_counter_enable_on_exec(struct task_struct *task)
1497 */ 1503 */
1498static void __perf_counter_read(void *info) 1504static void __perf_counter_read(void *info)
1499{ 1505{
1506 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1500 struct perf_counter *counter = info; 1507 struct perf_counter *counter = info;
1501 struct perf_counter_context *ctx = counter->ctx; 1508 struct perf_counter_context *ctx = counter->ctx;
1502 unsigned long flags; 1509 unsigned long flags;
1503 1510
1511 /*
1512 * If this is a task context, we need to check whether it is
1513 * the current task context of this cpu. If not it has been
1514 * scheduled out before the smp call arrived. In that case
1515 * counter->count would have been updated to a recent sample
1516 * when the counter was scheduled out.
1517 */
1518 if (ctx->task && cpuctx->task_ctx != ctx)
1519 return;
1520
1504 local_irq_save(flags); 1521 local_irq_save(flags);
1505 if (ctx->is_active) 1522 if (ctx->is_active)
1506 update_context_time(ctx); 1523 update_context_time(ctx);
@@ -1654,6 +1671,8 @@ static void free_counter(struct perf_counter *counter)
1654 atomic_dec(&nr_mmap_counters); 1671 atomic_dec(&nr_mmap_counters);
1655 if (counter->attr.comm) 1672 if (counter->attr.comm)
1656 atomic_dec(&nr_comm_counters); 1673 atomic_dec(&nr_comm_counters);
1674 if (counter->attr.task)
1675 atomic_dec(&nr_task_counters);
1657 } 1676 }
1658 1677
1659 if (counter->destroy) 1678 if (counter->destroy)
@@ -1688,14 +1707,133 @@ static int perf_release(struct inode *inode, struct file *file)
1688 return 0; 1707 return 0;
1689} 1708}
1690 1709
1710static int perf_counter_read_size(struct perf_counter *counter)
1711{
1712 int entry = sizeof(u64); /* value */
1713 int size = 0;
1714 int nr = 1;
1715
1716 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1717 size += sizeof(u64);
1718
1719 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1720 size += sizeof(u64);
1721
1722 if (counter->attr.read_format & PERF_FORMAT_ID)
1723 entry += sizeof(u64);
1724
1725 if (counter->attr.read_format & PERF_FORMAT_GROUP) {
1726 nr += counter->group_leader->nr_siblings;
1727 size += sizeof(u64);
1728 }
1729
1730 size += entry * nr;
1731
1732 return size;
1733}
1734
1735static u64 perf_counter_read_value(struct perf_counter *counter)
1736{
1737 struct perf_counter *child;
1738 u64 total = 0;
1739
1740 total += perf_counter_read(counter);
1741 list_for_each_entry(child, &counter->child_list, child_list)
1742 total += perf_counter_read(child);
1743
1744 return total;
1745}
1746
1747static int perf_counter_read_entry(struct perf_counter *counter,
1748 u64 read_format, char __user *buf)
1749{
1750 int n = 0, count = 0;
1751 u64 values[2];
1752
1753 values[n++] = perf_counter_read_value(counter);
1754 if (read_format & PERF_FORMAT_ID)
1755 values[n++] = primary_counter_id(counter);
1756
1757 count = n * sizeof(u64);
1758
1759 if (copy_to_user(buf, values, count))
1760 return -EFAULT;
1761
1762 return count;
1763}
1764
1765static int perf_counter_read_group(struct perf_counter *counter,
1766 u64 read_format, char __user *buf)
1767{
1768 struct perf_counter *leader = counter->group_leader, *sub;
1769 int n = 0, size = 0, err = -EFAULT;
1770 u64 values[3];
1771
1772 values[n++] = 1 + leader->nr_siblings;
1773 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1774 values[n++] = leader->total_time_enabled +
1775 atomic64_read(&leader->child_total_time_enabled);
1776 }
1777 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1778 values[n++] = leader->total_time_running +
1779 atomic64_read(&leader->child_total_time_running);
1780 }
1781
1782 size = n * sizeof(u64);
1783
1784 if (copy_to_user(buf, values, size))
1785 return -EFAULT;
1786
1787 err = perf_counter_read_entry(leader, read_format, buf + size);
1788 if (err < 0)
1789 return err;
1790
1791 size += err;
1792
1793 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
1794 err = perf_counter_read_entry(sub, read_format,
1795 buf + size);
1796 if (err < 0)
1797 return err;
1798
1799 size += err;
1800 }
1801
1802 return size;
1803}
1804
1805static int perf_counter_read_one(struct perf_counter *counter,
1806 u64 read_format, char __user *buf)
1807{
1808 u64 values[4];
1809 int n = 0;
1810
1811 values[n++] = perf_counter_read_value(counter);
1812 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1813 values[n++] = counter->total_time_enabled +
1814 atomic64_read(&counter->child_total_time_enabled);
1815 }
1816 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1817 values[n++] = counter->total_time_running +
1818 atomic64_read(&counter->child_total_time_running);
1819 }
1820 if (read_format & PERF_FORMAT_ID)
1821 values[n++] = primary_counter_id(counter);
1822
1823 if (copy_to_user(buf, values, n * sizeof(u64)))
1824 return -EFAULT;
1825
1826 return n * sizeof(u64);
1827}
1828
1691/* 1829/*
1692 * Read the performance counter - simple non blocking version for now 1830 * Read the performance counter - simple non blocking version for now
1693 */ 1831 */
1694static ssize_t 1832static ssize_t
1695perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) 1833perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1696{ 1834{
1697 u64 values[4]; 1835 u64 read_format = counter->attr.read_format;
1698 int n; 1836 int ret;
1699 1837
1700 /* 1838 /*
1701 * Return end-of-file for a read on a counter that is in 1839 * Return end-of-file for a read on a counter that is in
@@ -1705,28 +1843,18 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1705 if (counter->state == PERF_COUNTER_STATE_ERROR) 1843 if (counter->state == PERF_COUNTER_STATE_ERROR)
1706 return 0; 1844 return 0;
1707 1845
1846 if (count < perf_counter_read_size(counter))
1847 return -ENOSPC;
1848
1708 WARN_ON_ONCE(counter->ctx->parent_ctx); 1849 WARN_ON_ONCE(counter->ctx->parent_ctx);
1709 mutex_lock(&counter->child_mutex); 1850 mutex_lock(&counter->child_mutex);
1710 values[0] = perf_counter_read(counter); 1851 if (read_format & PERF_FORMAT_GROUP)
1711 n = 1; 1852 ret = perf_counter_read_group(counter, read_format, buf);
1712 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1853 else
1713 values[n++] = counter->total_time_enabled + 1854 ret = perf_counter_read_one(counter, read_format, buf);
1714 atomic64_read(&counter->child_total_time_enabled);
1715 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1716 values[n++] = counter->total_time_running +
1717 atomic64_read(&counter->child_total_time_running);
1718 if (counter->attr.read_format & PERF_FORMAT_ID)
1719 values[n++] = primary_counter_id(counter);
1720 mutex_unlock(&counter->child_mutex); 1855 mutex_unlock(&counter->child_mutex);
1721 1856
1722 if (count < n * sizeof(u64)) 1857 return ret;
1723 return -EINVAL;
1724 count = n * sizeof(u64);
1725
1726 if (copy_to_user(buf, values, count))
1727 return -EFAULT;
1728
1729 return count;
1730} 1858}
1731 1859
1732static ssize_t 1860static ssize_t
@@ -1891,6 +2019,10 @@ int perf_counter_task_disable(void)
1891 return 0; 2019 return 0;
1892} 2020}
1893 2021
2022#ifndef PERF_COUNTER_INDEX_OFFSET
2023# define PERF_COUNTER_INDEX_OFFSET 0
2024#endif
2025
1894static int perf_counter_index(struct perf_counter *counter) 2026static int perf_counter_index(struct perf_counter *counter)
1895{ 2027{
1896 if (counter->state != PERF_COUNTER_STATE_ACTIVE) 2028 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
@@ -2230,7 +2362,7 @@ static void perf_pending_counter(struct perf_pending_entry *entry)
2230 2362
2231 if (counter->pending_disable) { 2363 if (counter->pending_disable) {
2232 counter->pending_disable = 0; 2364 counter->pending_disable = 0;
2233 perf_counter_disable(counter); 2365 __perf_counter_disable(counter);
2234 } 2366 }
2235 2367
2236 if (counter->pending_wakeup) { 2368 if (counter->pending_wakeup) {
@@ -2615,7 +2747,80 @@ static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
2615 return task_pid_nr_ns(p, counter->ns); 2747 return task_pid_nr_ns(p, counter->ns);
2616} 2748}
2617 2749
2618static void perf_counter_output(struct perf_counter *counter, int nmi, 2750static void perf_output_read_one(struct perf_output_handle *handle,
2751 struct perf_counter *counter)
2752{
2753 u64 read_format = counter->attr.read_format;
2754 u64 values[4];
2755 int n = 0;
2756
2757 values[n++] = atomic64_read(&counter->count);
2758 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2759 values[n++] = counter->total_time_enabled +
2760 atomic64_read(&counter->child_total_time_enabled);
2761 }
2762 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2763 values[n++] = counter->total_time_running +
2764 atomic64_read(&counter->child_total_time_running);
2765 }
2766 if (read_format & PERF_FORMAT_ID)
2767 values[n++] = primary_counter_id(counter);
2768
2769 perf_output_copy(handle, values, n * sizeof(u64));
2770}
2771
2772/*
2773 * XXX PERF_FORMAT_GROUP vs inherited counters seems difficult.
2774 */
2775static void perf_output_read_group(struct perf_output_handle *handle,
2776 struct perf_counter *counter)
2777{
2778 struct perf_counter *leader = counter->group_leader, *sub;
2779 u64 read_format = counter->attr.read_format;
2780 u64 values[5];
2781 int n = 0;
2782
2783 values[n++] = 1 + leader->nr_siblings;
2784
2785 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2786 values[n++] = leader->total_time_enabled;
2787
2788 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2789 values[n++] = leader->total_time_running;
2790
2791 if (leader != counter)
2792 leader->pmu->read(leader);
2793
2794 values[n++] = atomic64_read(&leader->count);
2795 if (read_format & PERF_FORMAT_ID)
2796 values[n++] = primary_counter_id(leader);
2797
2798 perf_output_copy(handle, values, n * sizeof(u64));
2799
2800 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2801 n = 0;
2802
2803 if (sub != counter)
2804 sub->pmu->read(sub);
2805
2806 values[n++] = atomic64_read(&sub->count);
2807 if (read_format & PERF_FORMAT_ID)
2808 values[n++] = primary_counter_id(sub);
2809
2810 perf_output_copy(handle, values, n * sizeof(u64));
2811 }
2812}
2813
2814static void perf_output_read(struct perf_output_handle *handle,
2815 struct perf_counter *counter)
2816{
2817 if (counter->attr.read_format & PERF_FORMAT_GROUP)
2818 perf_output_read_group(handle, counter);
2819 else
2820 perf_output_read_one(handle, counter);
2821}
2822
2823void perf_counter_output(struct perf_counter *counter, int nmi,
2619 struct perf_sample_data *data) 2824 struct perf_sample_data *data)
2620{ 2825{
2621 int ret; 2826 int ret;
@@ -2626,10 +2831,6 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2626 struct { 2831 struct {
2627 u32 pid, tid; 2832 u32 pid, tid;
2628 } tid_entry; 2833 } tid_entry;
2629 struct {
2630 u64 id;
2631 u64 counter;
2632 } group_entry;
2633 struct perf_callchain_entry *callchain = NULL; 2834 struct perf_callchain_entry *callchain = NULL;
2634 int callchain_size = 0; 2835 int callchain_size = 0;
2635 u64 time; 2836 u64 time;
@@ -2684,10 +2885,8 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2684 if (sample_type & PERF_SAMPLE_PERIOD) 2885 if (sample_type & PERF_SAMPLE_PERIOD)
2685 header.size += sizeof(u64); 2886 header.size += sizeof(u64);
2686 2887
2687 if (sample_type & PERF_SAMPLE_GROUP) { 2888 if (sample_type & PERF_SAMPLE_READ)
2688 header.size += sizeof(u64) + 2889 header.size += perf_counter_read_size(counter);
2689 counter->nr_siblings * sizeof(group_entry);
2690 }
2691 2890
2692 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 2891 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2693 callchain = perf_callchain(data->regs); 2892 callchain = perf_callchain(data->regs);
@@ -2699,6 +2898,18 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2699 header.size += sizeof(u64); 2898 header.size += sizeof(u64);
2700 } 2899 }
2701 2900
2901 if (sample_type & PERF_SAMPLE_RAW) {
2902 int size = sizeof(u32);
2903
2904 if (data->raw)
2905 size += data->raw->size;
2906 else
2907 size += sizeof(u32);
2908
2909 WARN_ON_ONCE(size & (sizeof(u64)-1));
2910 header.size += size;
2911 }
2912
2702 ret = perf_output_begin(&handle, counter, header.size, nmi, 1); 2913 ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
2703 if (ret) 2914 if (ret)
2704 return; 2915 return;
@@ -2732,26 +2943,8 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2732 if (sample_type & PERF_SAMPLE_PERIOD) 2943 if (sample_type & PERF_SAMPLE_PERIOD)
2733 perf_output_put(&handle, data->period); 2944 perf_output_put(&handle, data->period);
2734 2945
2735 /* 2946 if (sample_type & PERF_SAMPLE_READ)
2736 * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult. 2947 perf_output_read(&handle, counter);
2737 */
2738 if (sample_type & PERF_SAMPLE_GROUP) {
2739 struct perf_counter *leader, *sub;
2740 u64 nr = counter->nr_siblings;
2741
2742 perf_output_put(&handle, nr);
2743
2744 leader = counter->group_leader;
2745 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2746 if (sub != counter)
2747 sub->pmu->read(sub);
2748
2749 group_entry.id = primary_counter_id(sub);
2750 group_entry.counter = atomic64_read(&sub->count);
2751
2752 perf_output_put(&handle, group_entry);
2753 }
2754 }
2755 2948
2756 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 2949 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2757 if (callchain) 2950 if (callchain)
@@ -2762,6 +2955,22 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2762 } 2955 }
2763 } 2956 }
2764 2957
2958 if (sample_type & PERF_SAMPLE_RAW) {
2959 if (data->raw) {
2960 perf_output_put(&handle, data->raw->size);
2961 perf_output_copy(&handle, data->raw->data, data->raw->size);
2962 } else {
2963 struct {
2964 u32 size;
2965 u32 data;
2966 } raw = {
2967 .size = sizeof(u32),
2968 .data = 0,
2969 };
2970 perf_output_put(&handle, raw);
2971 }
2972 }
2973
2765 perf_output_end(&handle); 2974 perf_output_end(&handle);
2766} 2975}
2767 2976
@@ -2774,8 +2983,6 @@ struct perf_read_event {
2774 2983
2775 u32 pid; 2984 u32 pid;
2776 u32 tid; 2985 u32 tid;
2777 u64 value;
2778 u64 format[3];
2779}; 2986};
2780 2987
2781static void 2988static void
@@ -2787,80 +2994,74 @@ perf_counter_read_event(struct perf_counter *counter,
2787 .header = { 2994 .header = {
2788 .type = PERF_EVENT_READ, 2995 .type = PERF_EVENT_READ,
2789 .misc = 0, 2996 .misc = 0,
2790 .size = sizeof(event) - sizeof(event.format), 2997 .size = sizeof(event) + perf_counter_read_size(counter),
2791 }, 2998 },
2792 .pid = perf_counter_pid(counter, task), 2999 .pid = perf_counter_pid(counter, task),
2793 .tid = perf_counter_tid(counter, task), 3000 .tid = perf_counter_tid(counter, task),
2794 .value = atomic64_read(&counter->count),
2795 }; 3001 };
2796 int ret, i = 0; 3002 int ret;
2797
2798 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2799 event.header.size += sizeof(u64);
2800 event.format[i++] = counter->total_time_enabled;
2801 }
2802
2803 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2804 event.header.size += sizeof(u64);
2805 event.format[i++] = counter->total_time_running;
2806 }
2807
2808 if (counter->attr.read_format & PERF_FORMAT_ID) {
2809 event.header.size += sizeof(u64);
2810 event.format[i++] = primary_counter_id(counter);
2811 }
2812 3003
2813 ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); 3004 ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
2814 if (ret) 3005 if (ret)
2815 return; 3006 return;
2816 3007
2817 perf_output_copy(&handle, &event, event.header.size); 3008 perf_output_put(&handle, event);
3009 perf_output_read(&handle, counter);
3010
2818 perf_output_end(&handle); 3011 perf_output_end(&handle);
2819} 3012}
2820 3013
2821/* 3014/*
2822 * fork tracking 3015 * task tracking -- fork/exit
3016 *
3017 * enabled by: attr.comm | attr.mmap | attr.task
2823 */ 3018 */
2824 3019
2825struct perf_fork_event { 3020struct perf_task_event {
2826 struct task_struct *task; 3021 struct task_struct *task;
3022 struct perf_counter_context *task_ctx;
2827 3023
2828 struct { 3024 struct {
2829 struct perf_event_header header; 3025 struct perf_event_header header;
2830 3026
2831 u32 pid; 3027 u32 pid;
2832 u32 ppid; 3028 u32 ppid;
3029 u32 tid;
3030 u32 ptid;
2833 } event; 3031 } event;
2834}; 3032};
2835 3033
2836static void perf_counter_fork_output(struct perf_counter *counter, 3034static void perf_counter_task_output(struct perf_counter *counter,
2837 struct perf_fork_event *fork_event) 3035 struct perf_task_event *task_event)
2838{ 3036{
2839 struct perf_output_handle handle; 3037 struct perf_output_handle handle;
2840 int size = fork_event->event.header.size; 3038 int size = task_event->event.header.size;
2841 struct task_struct *task = fork_event->task; 3039 struct task_struct *task = task_event->task;
2842 int ret = perf_output_begin(&handle, counter, size, 0, 0); 3040 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2843 3041
2844 if (ret) 3042 if (ret)
2845 return; 3043 return;
2846 3044
2847 fork_event->event.pid = perf_counter_pid(counter, task); 3045 task_event->event.pid = perf_counter_pid(counter, task);
2848 fork_event->event.ppid = perf_counter_pid(counter, task->real_parent); 3046 task_event->event.ppid = perf_counter_pid(counter, current);
2849 3047
2850 perf_output_put(&handle, fork_event->event); 3048 task_event->event.tid = perf_counter_tid(counter, task);
3049 task_event->event.ptid = perf_counter_tid(counter, current);
3050
3051 perf_output_put(&handle, task_event->event);
2851 perf_output_end(&handle); 3052 perf_output_end(&handle);
2852} 3053}
2853 3054
2854static int perf_counter_fork_match(struct perf_counter *counter) 3055static int perf_counter_task_match(struct perf_counter *counter)
2855{ 3056{
2856 if (counter->attr.comm || counter->attr.mmap) 3057 if (counter->attr.comm || counter->attr.mmap || counter->attr.task)
2857 return 1; 3058 return 1;
2858 3059
2859 return 0; 3060 return 0;
2860} 3061}
2861 3062
2862static void perf_counter_fork_ctx(struct perf_counter_context *ctx, 3063static void perf_counter_task_ctx(struct perf_counter_context *ctx,
2863 struct perf_fork_event *fork_event) 3064 struct perf_task_event *task_event)
2864{ 3065{
2865 struct perf_counter *counter; 3066 struct perf_counter *counter;
2866 3067
@@ -2869,54 +3070,62 @@ static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
2869 3070
2870 rcu_read_lock(); 3071 rcu_read_lock();
2871 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 3072 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2872 if (perf_counter_fork_match(counter)) 3073 if (perf_counter_task_match(counter))
2873 perf_counter_fork_output(counter, fork_event); 3074 perf_counter_task_output(counter, task_event);
2874 } 3075 }
2875 rcu_read_unlock(); 3076 rcu_read_unlock();
2876} 3077}
2877 3078
2878static void perf_counter_fork_event(struct perf_fork_event *fork_event) 3079static void perf_counter_task_event(struct perf_task_event *task_event)
2879{ 3080{
2880 struct perf_cpu_context *cpuctx; 3081 struct perf_cpu_context *cpuctx;
2881 struct perf_counter_context *ctx; 3082 struct perf_counter_context *ctx = task_event->task_ctx;
2882 3083
2883 cpuctx = &get_cpu_var(perf_cpu_context); 3084 cpuctx = &get_cpu_var(perf_cpu_context);
2884 perf_counter_fork_ctx(&cpuctx->ctx, fork_event); 3085 perf_counter_task_ctx(&cpuctx->ctx, task_event);
2885 put_cpu_var(perf_cpu_context); 3086 put_cpu_var(perf_cpu_context);
2886 3087
2887 rcu_read_lock(); 3088 rcu_read_lock();
2888 /* 3089 if (!ctx)
2889 * doesn't really matter which of the child contexts the 3090 ctx = rcu_dereference(task_event->task->perf_counter_ctxp);
2890 * events ends up in.
2891 */
2892 ctx = rcu_dereference(current->perf_counter_ctxp);
2893 if (ctx) 3091 if (ctx)
2894 perf_counter_fork_ctx(ctx, fork_event); 3092 perf_counter_task_ctx(ctx, task_event);
2895 rcu_read_unlock(); 3093 rcu_read_unlock();
2896} 3094}
2897 3095
2898void perf_counter_fork(struct task_struct *task) 3096static void perf_counter_task(struct task_struct *task,
3097 struct perf_counter_context *task_ctx,
3098 int new)
2899{ 3099{
2900 struct perf_fork_event fork_event; 3100 struct perf_task_event task_event;
2901 3101
2902 if (!atomic_read(&nr_comm_counters) && 3102 if (!atomic_read(&nr_comm_counters) &&
2903 !atomic_read(&nr_mmap_counters)) 3103 !atomic_read(&nr_mmap_counters) &&
3104 !atomic_read(&nr_task_counters))
2904 return; 3105 return;
2905 3106
2906 fork_event = (struct perf_fork_event){ 3107 task_event = (struct perf_task_event){
2907 .task = task, 3108 .task = task,
2908 .event = { 3109 .task_ctx = task_ctx,
3110 .event = {
2909 .header = { 3111 .header = {
2910 .type = PERF_EVENT_FORK, 3112 .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT,
2911 .misc = 0, 3113 .misc = 0,
2912 .size = sizeof(fork_event.event), 3114 .size = sizeof(task_event.event),
2913 }, 3115 },
2914 /* .pid */ 3116 /* .pid */
2915 /* .ppid */ 3117 /* .ppid */
3118 /* .tid */
3119 /* .ptid */
2916 }, 3120 },
2917 }; 3121 };
2918 3122
2919 perf_counter_fork_event(&fork_event); 3123 perf_counter_task_event(&task_event);
3124}
3125
3126void perf_counter_fork(struct task_struct *task)
3127{
3128 perf_counter_task(task, NULL, 1);
2920} 3129}
2921 3130
2922/* 3131/*
@@ -3305,125 +3514,111 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi,
3305 * Generic software counter infrastructure 3514 * Generic software counter infrastructure
3306 */ 3515 */
3307 3516
3308static void perf_swcounter_update(struct perf_counter *counter) 3517/*
3518 * We directly increment counter->count and keep a second value in
3519 * counter->hw.period_left to count intervals. This period counter
3520 * is kept in the range [-sample_period, 0] so that we can use the
3521 * sign as trigger.
3522 */
3523
3524static u64 perf_swcounter_set_period(struct perf_counter *counter)
3309{ 3525{
3310 struct hw_perf_counter *hwc = &counter->hw; 3526 struct hw_perf_counter *hwc = &counter->hw;
3311 u64 prev, now; 3527 u64 period = hwc->last_period;
3312 s64 delta; 3528 u64 nr, offset;
3529 s64 old, val;
3530
3531 hwc->last_period = hwc->sample_period;
3313 3532
3314again: 3533again:
3315 prev = atomic64_read(&hwc->prev_count); 3534 old = val = atomic64_read(&hwc->period_left);
3316 now = atomic64_read(&hwc->count); 3535 if (val < 0)
3317 if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev) 3536 return 0;
3318 goto again;
3319 3537
3320 delta = now - prev; 3538 nr = div64_u64(period + val, period);
3539 offset = nr * period;
3540 val -= offset;
3541 if (atomic64_cmpxchg(&hwc->period_left, old, val) != old)
3542 goto again;
3321 3543
3322 atomic64_add(delta, &counter->count); 3544 return nr;
3323 atomic64_sub(delta, &hwc->period_left);
3324} 3545}
3325 3546
3326static void perf_swcounter_set_period(struct perf_counter *counter) 3547static void perf_swcounter_overflow(struct perf_counter *counter,
3548 int nmi, struct perf_sample_data *data)
3327{ 3549{
3328 struct hw_perf_counter *hwc = &counter->hw; 3550 struct hw_perf_counter *hwc = &counter->hw;
3329 s64 left = atomic64_read(&hwc->period_left); 3551 u64 overflow;
3330 s64 period = hwc->sample_period;
3331 3552
3332 if (unlikely(left <= -period)) { 3553 data->period = counter->hw.last_period;
3333 left = period; 3554 overflow = perf_swcounter_set_period(counter);
3334 atomic64_set(&hwc->period_left, left);
3335 hwc->last_period = period;
3336 }
3337 3555
3338 if (unlikely(left <= 0)) { 3556 if (hwc->interrupts == MAX_INTERRUPTS)
3339 left += period; 3557 return;
3340 atomic64_add(period, &hwc->period_left);
3341 hwc->last_period = period;
3342 }
3343 3558
3344 atomic64_set(&hwc->prev_count, -left); 3559 for (; overflow; overflow--) {
3345 atomic64_set(&hwc->count, -left); 3560 if (perf_counter_overflow(counter, nmi, data)) {
3561 /*
3562 * We inhibit the overflow from happening when
3563 * hwc->interrupts == MAX_INTERRUPTS.
3564 */
3565 break;
3566 }
3567 }
3346} 3568}
3347 3569
3348static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) 3570static void perf_swcounter_unthrottle(struct perf_counter *counter)
3349{ 3571{
3350 enum hrtimer_restart ret = HRTIMER_RESTART;
3351 struct perf_sample_data data;
3352 struct perf_counter *counter;
3353 u64 period;
3354
3355 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
3356 counter->pmu->read(counter);
3357
3358 data.addr = 0;
3359 data.regs = get_irq_regs();
3360 /* 3572 /*
3361 * In case we exclude kernel IPs or are somehow not in interrupt 3573 * Nothing to do, we already reset hwc->interrupts.
3362 * context, provide the next best thing, the user IP.
3363 */ 3574 */
3364 if ((counter->attr.exclude_kernel || !data.regs) && 3575}
3365 !counter->attr.exclude_user)
3366 data.regs = task_pt_regs(current);
3367 3576
3368 if (data.regs) { 3577static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3369 if (perf_counter_overflow(counter, 0, &data)) 3578 int nmi, struct perf_sample_data *data)
3370 ret = HRTIMER_NORESTART; 3579{
3371 } 3580 struct hw_perf_counter *hwc = &counter->hw;
3372 3581
3373 period = max_t(u64, 10000, counter->hw.sample_period); 3582 atomic64_add(nr, &counter->count);
3374 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3375 3583
3376 return ret; 3584 if (!hwc->sample_period)
3377} 3585 return;
3378 3586
3379static void perf_swcounter_overflow(struct perf_counter *counter, 3587 if (!data->regs)
3380 int nmi, struct perf_sample_data *data) 3588 return;
3381{
3382 data->period = counter->hw.last_period;
3383 3589
3384 perf_swcounter_update(counter); 3590 if (!atomic64_add_negative(nr, &hwc->period_left))
3385 perf_swcounter_set_period(counter); 3591 perf_swcounter_overflow(counter, nmi, data);
3386 if (perf_counter_overflow(counter, nmi, data))
3387 /* soft-disable the counter */
3388 ;
3389} 3592}
3390 3593
3391static int perf_swcounter_is_counting(struct perf_counter *counter) 3594static int perf_swcounter_is_counting(struct perf_counter *counter)
3392{ 3595{
3393 struct perf_counter_context *ctx; 3596 /*
3394 unsigned long flags; 3597 * The counter is active, we're good!
3395 int count; 3598 */
3396
3397 if (counter->state == PERF_COUNTER_STATE_ACTIVE) 3599 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
3398 return 1; 3600 return 1;
3399 3601
3602 /*
3603 * The counter is off/error, not counting.
3604 */
3400 if (counter->state != PERF_COUNTER_STATE_INACTIVE) 3605 if (counter->state != PERF_COUNTER_STATE_INACTIVE)
3401 return 0; 3606 return 0;
3402 3607
3403 /* 3608 /*
3404 * If the counter is inactive, it could be just because 3609 * The counter is inactive, if the context is active
3405 * its task is scheduled out, or because it's in a group 3610 * we're part of a group that didn't make it on the 'pmu',
3406 * which could not go on the PMU. We want to count in 3611 * not counting.
3407 * the first case but not the second. If the context is
3408 * currently active then an inactive software counter must
3409 * be the second case. If it's not currently active then
3410 * we need to know whether the counter was active when the
3411 * context was last active, which we can determine by
3412 * comparing counter->tstamp_stopped with ctx->time.
3413 *
3414 * We are within an RCU read-side critical section,
3415 * which protects the existence of *ctx.
3416 */ 3612 */
3417 ctx = counter->ctx; 3613 if (counter->ctx->is_active)
3418 spin_lock_irqsave(&ctx->lock, flags); 3614 return 0;
3419 count = 1; 3615
3420 /* Re-check state now we have the lock */ 3616 /*
3421 if (counter->state < PERF_COUNTER_STATE_INACTIVE || 3617 * We're inactive and the context is too, this means the
3422 counter->ctx->is_active || 3618 * task is scheduled out, we're counting events that happen
3423 counter->tstamp_stopped < ctx->time) 3619 * to us, like migration events.
3424 count = 0; 3620 */
3425 spin_unlock_irqrestore(&ctx->lock, flags); 3621 return 1;
3426 return count;
3427} 3622}
3428 3623
3429static int perf_swcounter_match(struct perf_counter *counter, 3624static int perf_swcounter_match(struct perf_counter *counter,
@@ -3449,15 +3644,6 @@ static int perf_swcounter_match(struct perf_counter *counter,
3449 return 1; 3644 return 1;
3450} 3645}
3451 3646
3452static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3453 int nmi, struct perf_sample_data *data)
3454{
3455 int neg = atomic64_add_negative(nr, &counter->hw.count);
3456
3457 if (counter->hw.sample_period && !neg && data->regs)
3458 perf_swcounter_overflow(counter, nmi, data);
3459}
3460
3461static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, 3647static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
3462 enum perf_type_id type, 3648 enum perf_type_id type,
3463 u32 event, u64 nr, int nmi, 3649 u32 event, u64 nr, int nmi,
@@ -3536,27 +3722,66 @@ void __perf_swcounter_event(u32 event, u64 nr, int nmi,
3536 3722
3537static void perf_swcounter_read(struct perf_counter *counter) 3723static void perf_swcounter_read(struct perf_counter *counter)
3538{ 3724{
3539 perf_swcounter_update(counter);
3540} 3725}
3541 3726
3542static int perf_swcounter_enable(struct perf_counter *counter) 3727static int perf_swcounter_enable(struct perf_counter *counter)
3543{ 3728{
3544 perf_swcounter_set_period(counter); 3729 struct hw_perf_counter *hwc = &counter->hw;
3730
3731 if (hwc->sample_period) {
3732 hwc->last_period = hwc->sample_period;
3733 perf_swcounter_set_period(counter);
3734 }
3545 return 0; 3735 return 0;
3546} 3736}
3547 3737
3548static void perf_swcounter_disable(struct perf_counter *counter) 3738static void perf_swcounter_disable(struct perf_counter *counter)
3549{ 3739{
3550 perf_swcounter_update(counter);
3551} 3740}
3552 3741
3553static const struct pmu perf_ops_generic = { 3742static const struct pmu perf_ops_generic = {
3554 .enable = perf_swcounter_enable, 3743 .enable = perf_swcounter_enable,
3555 .disable = perf_swcounter_disable, 3744 .disable = perf_swcounter_disable,
3556 .read = perf_swcounter_read, 3745 .read = perf_swcounter_read,
3746 .unthrottle = perf_swcounter_unthrottle,
3557}; 3747};
3558 3748
3559/* 3749/*
3750 * hrtimer based swcounter callback
3751 */
3752
3753static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
3754{
3755 enum hrtimer_restart ret = HRTIMER_RESTART;
3756 struct perf_sample_data data;
3757 struct perf_counter *counter;
3758 u64 period;
3759
3760 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
3761 counter->pmu->read(counter);
3762
3763 data.addr = 0;
3764 data.regs = get_irq_regs();
3765 /*
3766 * In case we exclude kernel IPs or are somehow not in interrupt
3767 * context, provide the next best thing, the user IP.
3768 */
3769 if ((counter->attr.exclude_kernel || !data.regs) &&
3770 !counter->attr.exclude_user)
3771 data.regs = task_pt_regs(current);
3772
3773 if (data.regs) {
3774 if (perf_counter_overflow(counter, 0, &data))
3775 ret = HRTIMER_NORESTART;
3776 }
3777
3778 period = max_t(u64, 10000, counter->hw.sample_period);
3779 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3780
3781 return ret;
3782}
3783
3784/*
3560 * Software counter: cpu wall time clock 3785 * Software counter: cpu wall time clock
3561 */ 3786 */
3562 3787
@@ -3673,17 +3898,24 @@ static const struct pmu perf_ops_task_clock = {
3673}; 3898};
3674 3899
3675#ifdef CONFIG_EVENT_PROFILE 3900#ifdef CONFIG_EVENT_PROFILE
3676void perf_tpcounter_event(int event_id) 3901void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record,
3902 int entry_size)
3677{ 3903{
3904 struct perf_raw_record raw = {
3905 .size = entry_size,
3906 .data = record,
3907 };
3908
3678 struct perf_sample_data data = { 3909 struct perf_sample_data data = {
3679 .regs = get_irq_regs(), 3910 .regs = get_irq_regs(),
3680 .addr = 0, 3911 .addr = addr,
3912 .raw = &raw,
3681 }; 3913 };
3682 3914
3683 if (!data.regs) 3915 if (!data.regs)
3684 data.regs = task_pt_regs(current); 3916 data.regs = task_pt_regs(current);
3685 3917
3686 do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data); 3918 do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data);
3687} 3919}
3688EXPORT_SYMBOL_GPL(perf_tpcounter_event); 3920EXPORT_SYMBOL_GPL(perf_tpcounter_event);
3689 3921
@@ -3697,6 +3929,14 @@ static void tp_perf_counter_destroy(struct perf_counter *counter)
3697 3929
3698static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) 3930static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3699{ 3931{
3932 /*
3933 * Raw tracepoint data is a severe data leak, only allow root to
3934 * have these.
3935 */
3936 if ((counter->attr.sample_type & PERF_SAMPLE_RAW) &&
3937 !capable(CAP_SYS_ADMIN))
3938 return ERR_PTR(-EPERM);
3939
3700 if (ftrace_profile_enable(counter->attr.config)) 3940 if (ftrace_profile_enable(counter->attr.config))
3701 return NULL; 3941 return NULL;
3702 3942
@@ -3830,9 +4070,9 @@ perf_counter_alloc(struct perf_counter_attr *attr,
3830 atomic64_set(&hwc->period_left, hwc->sample_period); 4070 atomic64_set(&hwc->period_left, hwc->sample_period);
3831 4071
3832 /* 4072 /*
3833 * we currently do not support PERF_SAMPLE_GROUP on inherited counters 4073 * we currently do not support PERF_FORMAT_GROUP on inherited counters
3834 */ 4074 */
3835 if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP)) 4075 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
3836 goto done; 4076 goto done;
3837 4077
3838 switch (attr->type) { 4078 switch (attr->type) {
@@ -3875,6 +4115,8 @@ done:
3875 atomic_inc(&nr_mmap_counters); 4115 atomic_inc(&nr_mmap_counters);
3876 if (counter->attr.comm) 4116 if (counter->attr.comm)
3877 atomic_inc(&nr_comm_counters); 4117 atomic_inc(&nr_comm_counters);
4118 if (counter->attr.task)
4119 atomic_inc(&nr_task_counters);
3878 } 4120 }
3879 4121
3880 return counter; 4122 return counter;
@@ -4236,8 +4478,10 @@ void perf_counter_exit_task(struct task_struct *child)
4236 struct perf_counter_context *child_ctx; 4478 struct perf_counter_context *child_ctx;
4237 unsigned long flags; 4479 unsigned long flags;
4238 4480
4239 if (likely(!child->perf_counter_ctxp)) 4481 if (likely(!child->perf_counter_ctxp)) {
4482 perf_counter_task(child, NULL, 0);
4240 return; 4483 return;
4484 }
4241 4485
4242 local_irq_save(flags); 4486 local_irq_save(flags);
4243 /* 4487 /*
@@ -4262,8 +4506,14 @@ void perf_counter_exit_task(struct task_struct *child)
4262 * the counters from it. 4506 * the counters from it.
4263 */ 4507 */
4264 unclone_ctx(child_ctx); 4508 unclone_ctx(child_ctx);
4265 spin_unlock(&child_ctx->lock); 4509 spin_unlock_irqrestore(&child_ctx->lock, flags);
4266 local_irq_restore(flags); 4510
4511 /*
4512 * Report the task dead after unscheduling the counters so that we
4513 * won't get any samples after PERF_EVENT_EXIT. We can however still
4514 * get a few PERF_EVENT_READ events.
4515 */
4516 perf_counter_task(child, child_ctx, 0);
4267 4517
4268 /* 4518 /*
4269 * We can recurse on the same lock type through: 4519 * We can recurse on the same lock type through:
@@ -4484,6 +4734,11 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
4484 perf_counter_init_cpu(cpu); 4734 perf_counter_init_cpu(cpu);
4485 break; 4735 break;
4486 4736
4737 case CPU_ONLINE:
4738 case CPU_ONLINE_FROZEN:
4739 hw_perf_counter_setup_online(cpu);
4740 break;
4741
4487 case CPU_DOWN_PREPARE: 4742 case CPU_DOWN_PREPARE:
4488 case CPU_DOWN_PREPARE_FROZEN: 4743 case CPU_DOWN_PREPARE_FROZEN:
4489 perf_counter_exit_cpu(cpu); 4744 perf_counter_exit_cpu(cpu);
@@ -4508,6 +4763,8 @@ void __init perf_counter_init(void)
4508{ 4763{
4509 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, 4764 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
4510 (void *)(long)smp_processor_id()); 4765 (void *)(long)smp_processor_id());
4766 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
4767 (void *)(long)smp_processor_id());
4511 register_cpu_notifier(&perf_cpu_nb); 4768 register_cpu_notifier(&perf_cpu_nb);
4512} 4769}
4513 4770