aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-08-14 01:41:02 -0400
committerTejun Heo <tj@kernel.org>2009-08-14 01:45:31 -0400
commit384be2b18a5f9475eab9ca2bdfa95cc1a04ef59c (patch)
tree04c93f391a1b65c8bf8d7ba8643c07d26c26590a /kernel/perf_counter.c
parenta76761b621bcd8336065c4fe3a74f046858bc34c (diff)
parent142d44b0dd6741a64a7bdbe029110e7c1dcf1d23 (diff)
Merge branch 'percpu-for-linus' into percpu-for-next
Conflicts: arch/sparc/kernel/smp_64.c arch/x86/kernel/cpu/perf_counter.c arch/x86/kernel/setup_percpu.c drivers/cpufreq/cpufreq_ondemand.c mm/percpu.c Conflicts in core and arch percpu codes are mostly from commit ed78e1e078dd44249f88b1dd8c76dafb39567161 which substituted many num_possible_cpus() with nr_cpu_ids. As for-next branch has moved all the first chunk allocators into mm/percpu.c, the changes are moved from arch code to mm/percpu.c. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c842
1 files changed, 541 insertions, 301 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index fc3b97410bbf..b0bdb36ccfc8 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -42,6 +42,7 @@ static int perf_overcommit __read_mostly = 1;
42static atomic_t nr_counters __read_mostly; 42static atomic_t nr_counters __read_mostly;
43static atomic_t nr_mmap_counters __read_mostly; 43static atomic_t nr_mmap_counters __read_mostly;
44static atomic_t nr_comm_counters __read_mostly; 44static atomic_t nr_comm_counters __read_mostly;
45static atomic_t nr_task_counters __read_mostly;
45 46
46/* 47/*
47 * perf counter paranoia level: 48 * perf counter paranoia level:
@@ -87,6 +88,7 @@ void __weak hw_perf_disable(void) { barrier(); }
87void __weak hw_perf_enable(void) { barrier(); } 88void __weak hw_perf_enable(void) { barrier(); }
88 89
89void __weak hw_perf_counter_setup(int cpu) { barrier(); } 90void __weak hw_perf_counter_setup(int cpu) { barrier(); }
91void __weak hw_perf_counter_setup_online(int cpu) { barrier(); }
90 92
91int __weak 93int __weak
92hw_perf_group_sched_in(struct perf_counter *group_leader, 94hw_perf_group_sched_in(struct perf_counter *group_leader,
@@ -146,6 +148,28 @@ static void put_ctx(struct perf_counter_context *ctx)
146 } 148 }
147} 149}
148 150
151static void unclone_ctx(struct perf_counter_context *ctx)
152{
153 if (ctx->parent_ctx) {
154 put_ctx(ctx->parent_ctx);
155 ctx->parent_ctx = NULL;
156 }
157}
158
159/*
160 * If we inherit counters we want to return the parent counter id
161 * to userspace.
162 */
163static u64 primary_counter_id(struct perf_counter *counter)
164{
165 u64 id = counter->id;
166
167 if (counter->parent)
168 id = counter->parent->id;
169
170 return id;
171}
172
149/* 173/*
150 * Get the perf_counter_context for a task and lock it. 174 * Get the perf_counter_context for a task and lock it.
151 * This has to cope with with the fact that until it is locked, 175 * This has to cope with with the fact that until it is locked,
@@ -283,6 +307,10 @@ counter_sched_out(struct perf_counter *counter,
283 return; 307 return;
284 308
285 counter->state = PERF_COUNTER_STATE_INACTIVE; 309 counter->state = PERF_COUNTER_STATE_INACTIVE;
310 if (counter->pending_disable) {
311 counter->pending_disable = 0;
312 counter->state = PERF_COUNTER_STATE_OFF;
313 }
286 counter->tstamp_stopped = ctx->time; 314 counter->tstamp_stopped = ctx->time;
287 counter->pmu->disable(counter); 315 counter->pmu->disable(counter);
288 counter->oncpu = -1; 316 counter->oncpu = -1;
@@ -1081,7 +1109,7 @@ static void perf_counter_sync_stat(struct perf_counter_context *ctx,
1081 __perf_counter_sync_stat(counter, next_counter); 1109 __perf_counter_sync_stat(counter, next_counter);
1082 1110
1083 counter = list_next_entry(counter, event_entry); 1111 counter = list_next_entry(counter, event_entry);
1084 next_counter = list_next_entry(counter, event_entry); 1112 next_counter = list_next_entry(next_counter, event_entry);
1085 } 1113 }
1086} 1114}
1087 1115
@@ -1288,7 +1316,6 @@ static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
1288#define MAX_INTERRUPTS (~0ULL) 1316#define MAX_INTERRUPTS (~0ULL)
1289 1317
1290static void perf_log_throttle(struct perf_counter *counter, int enable); 1318static void perf_log_throttle(struct perf_counter *counter, int enable);
1291static void perf_log_period(struct perf_counter *counter, u64 period);
1292 1319
1293static void perf_adjust_period(struct perf_counter *counter, u64 events) 1320static void perf_adjust_period(struct perf_counter *counter, u64 events)
1294{ 1321{
@@ -1307,8 +1334,6 @@ static void perf_adjust_period(struct perf_counter *counter, u64 events)
1307 if (!sample_period) 1334 if (!sample_period)
1308 sample_period = 1; 1335 sample_period = 1;
1309 1336
1310 perf_log_period(counter, sample_period);
1311
1312 hwc->sample_period = sample_period; 1337 hwc->sample_period = sample_period;
1313} 1338}
1314 1339
@@ -1463,10 +1488,8 @@ static void perf_counter_enable_on_exec(struct task_struct *task)
1463 /* 1488 /*
1464 * Unclone this context if we enabled any counter. 1489 * Unclone this context if we enabled any counter.
1465 */ 1490 */
1466 if (enabled && ctx->parent_ctx) { 1491 if (enabled)
1467 put_ctx(ctx->parent_ctx); 1492 unclone_ctx(ctx);
1468 ctx->parent_ctx = NULL;
1469 }
1470 1493
1471 spin_unlock(&ctx->lock); 1494 spin_unlock(&ctx->lock);
1472 1495
@@ -1526,7 +1549,6 @@ __perf_counter_init_context(struct perf_counter_context *ctx,
1526 1549
1527static struct perf_counter_context *find_get_context(pid_t pid, int cpu) 1550static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1528{ 1551{
1529 struct perf_counter_context *parent_ctx;
1530 struct perf_counter_context *ctx; 1552 struct perf_counter_context *ctx;
1531 struct perf_cpu_context *cpuctx; 1553 struct perf_cpu_context *cpuctx;
1532 struct task_struct *task; 1554 struct task_struct *task;
@@ -1586,11 +1608,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1586 retry: 1608 retry:
1587 ctx = perf_lock_task_context(task, &flags); 1609 ctx = perf_lock_task_context(task, &flags);
1588 if (ctx) { 1610 if (ctx) {
1589 parent_ctx = ctx->parent_ctx; 1611 unclone_ctx(ctx);
1590 if (parent_ctx) {
1591 put_ctx(parent_ctx);
1592 ctx->parent_ctx = NULL; /* no longer a clone */
1593 }
1594 spin_unlock_irqrestore(&ctx->lock, flags); 1612 spin_unlock_irqrestore(&ctx->lock, flags);
1595 } 1613 }
1596 1614
@@ -1642,6 +1660,8 @@ static void free_counter(struct perf_counter *counter)
1642 atomic_dec(&nr_mmap_counters); 1660 atomic_dec(&nr_mmap_counters);
1643 if (counter->attr.comm) 1661 if (counter->attr.comm)
1644 atomic_dec(&nr_comm_counters); 1662 atomic_dec(&nr_comm_counters);
1663 if (counter->attr.task)
1664 atomic_dec(&nr_task_counters);
1645 } 1665 }
1646 1666
1647 if (counter->destroy) 1667 if (counter->destroy)
@@ -1676,14 +1696,133 @@ static int perf_release(struct inode *inode, struct file *file)
1676 return 0; 1696 return 0;
1677} 1697}
1678 1698
1699static int perf_counter_read_size(struct perf_counter *counter)
1700{
1701 int entry = sizeof(u64); /* value */
1702 int size = 0;
1703 int nr = 1;
1704
1705 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1706 size += sizeof(u64);
1707
1708 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1709 size += sizeof(u64);
1710
1711 if (counter->attr.read_format & PERF_FORMAT_ID)
1712 entry += sizeof(u64);
1713
1714 if (counter->attr.read_format & PERF_FORMAT_GROUP) {
1715 nr += counter->group_leader->nr_siblings;
1716 size += sizeof(u64);
1717 }
1718
1719 size += entry * nr;
1720
1721 return size;
1722}
1723
1724static u64 perf_counter_read_value(struct perf_counter *counter)
1725{
1726 struct perf_counter *child;
1727 u64 total = 0;
1728
1729 total += perf_counter_read(counter);
1730 list_for_each_entry(child, &counter->child_list, child_list)
1731 total += perf_counter_read(child);
1732
1733 return total;
1734}
1735
1736static int perf_counter_read_entry(struct perf_counter *counter,
1737 u64 read_format, char __user *buf)
1738{
1739 int n = 0, count = 0;
1740 u64 values[2];
1741
1742 values[n++] = perf_counter_read_value(counter);
1743 if (read_format & PERF_FORMAT_ID)
1744 values[n++] = primary_counter_id(counter);
1745
1746 count = n * sizeof(u64);
1747
1748 if (copy_to_user(buf, values, count))
1749 return -EFAULT;
1750
1751 return count;
1752}
1753
1754static int perf_counter_read_group(struct perf_counter *counter,
1755 u64 read_format, char __user *buf)
1756{
1757 struct perf_counter *leader = counter->group_leader, *sub;
1758 int n = 0, size = 0, err = -EFAULT;
1759 u64 values[3];
1760
1761 values[n++] = 1 + leader->nr_siblings;
1762 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1763 values[n++] = leader->total_time_enabled +
1764 atomic64_read(&leader->child_total_time_enabled);
1765 }
1766 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1767 values[n++] = leader->total_time_running +
1768 atomic64_read(&leader->child_total_time_running);
1769 }
1770
1771 size = n * sizeof(u64);
1772
1773 if (copy_to_user(buf, values, size))
1774 return -EFAULT;
1775
1776 err = perf_counter_read_entry(leader, read_format, buf + size);
1777 if (err < 0)
1778 return err;
1779
1780 size += err;
1781
1782 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
1783 err = perf_counter_read_entry(counter, read_format,
1784 buf + size);
1785 if (err < 0)
1786 return err;
1787
1788 size += err;
1789 }
1790
1791 return size;
1792}
1793
1794static int perf_counter_read_one(struct perf_counter *counter,
1795 u64 read_format, char __user *buf)
1796{
1797 u64 values[4];
1798 int n = 0;
1799
1800 values[n++] = perf_counter_read_value(counter);
1801 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1802 values[n++] = counter->total_time_enabled +
1803 atomic64_read(&counter->child_total_time_enabled);
1804 }
1805 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1806 values[n++] = counter->total_time_running +
1807 atomic64_read(&counter->child_total_time_running);
1808 }
1809 if (read_format & PERF_FORMAT_ID)
1810 values[n++] = primary_counter_id(counter);
1811
1812 if (copy_to_user(buf, values, n * sizeof(u64)))
1813 return -EFAULT;
1814
1815 return n * sizeof(u64);
1816}
1817
1679/* 1818/*
1680 * Read the performance counter - simple non blocking version for now 1819 * Read the performance counter - simple non blocking version for now
1681 */ 1820 */
1682static ssize_t 1821static ssize_t
1683perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) 1822perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1684{ 1823{
1685 u64 values[4]; 1824 u64 read_format = counter->attr.read_format;
1686 int n; 1825 int ret;
1687 1826
1688 /* 1827 /*
1689 * Return end-of-file for a read on a counter that is in 1828 * Return end-of-file for a read on a counter that is in
@@ -1693,28 +1832,18 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1693 if (counter->state == PERF_COUNTER_STATE_ERROR) 1832 if (counter->state == PERF_COUNTER_STATE_ERROR)
1694 return 0; 1833 return 0;
1695 1834
1835 if (count < perf_counter_read_size(counter))
1836 return -ENOSPC;
1837
1696 WARN_ON_ONCE(counter->ctx->parent_ctx); 1838 WARN_ON_ONCE(counter->ctx->parent_ctx);
1697 mutex_lock(&counter->child_mutex); 1839 mutex_lock(&counter->child_mutex);
1698 values[0] = perf_counter_read(counter); 1840 if (read_format & PERF_FORMAT_GROUP)
1699 n = 1; 1841 ret = perf_counter_read_group(counter, read_format, buf);
1700 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1842 else
1701 values[n++] = counter->total_time_enabled + 1843 ret = perf_counter_read_one(counter, read_format, buf);
1702 atomic64_read(&counter->child_total_time_enabled);
1703 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1704 values[n++] = counter->total_time_running +
1705 atomic64_read(&counter->child_total_time_running);
1706 if (counter->attr.read_format & PERF_FORMAT_ID)
1707 values[n++] = counter->id;
1708 mutex_unlock(&counter->child_mutex); 1844 mutex_unlock(&counter->child_mutex);
1709 1845
1710 if (count < n * sizeof(u64)) 1846 return ret;
1711 return -EINVAL;
1712 count = n * sizeof(u64);
1713
1714 if (copy_to_user(buf, values, count))
1715 return -EFAULT;
1716
1717 return count;
1718} 1847}
1719 1848
1720static ssize_t 1849static ssize_t
@@ -1811,8 +1940,6 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
1811 1940
1812 counter->attr.sample_freq = value; 1941 counter->attr.sample_freq = value;
1813 } else { 1942 } else {
1814 perf_log_period(counter, value);
1815
1816 counter->attr.sample_period = value; 1943 counter->attr.sample_period = value;
1817 counter->hw.sample_period = value; 1944 counter->hw.sample_period = value;
1818 } 1945 }
@@ -2020,7 +2147,7 @@ fail:
2020 2147
2021static void perf_mmap_free_page(unsigned long addr) 2148static void perf_mmap_free_page(unsigned long addr)
2022{ 2149{
2023 struct page *page = virt_to_page(addr); 2150 struct page *page = virt_to_page((void *)addr);
2024 2151
2025 page->mapping = NULL; 2152 page->mapping = NULL;
2026 __free_page(page); 2153 __free_page(page);
@@ -2220,7 +2347,7 @@ static void perf_pending_counter(struct perf_pending_entry *entry)
2220 2347
2221 if (counter->pending_disable) { 2348 if (counter->pending_disable) {
2222 counter->pending_disable = 0; 2349 counter->pending_disable = 0;
2223 perf_counter_disable(counter); 2350 __perf_counter_disable(counter);
2224 } 2351 }
2225 2352
2226 if (counter->pending_wakeup) { 2353 if (counter->pending_wakeup) {
@@ -2605,7 +2732,80 @@ static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
2605 return task_pid_nr_ns(p, counter->ns); 2732 return task_pid_nr_ns(p, counter->ns);
2606} 2733}
2607 2734
2608static void perf_counter_output(struct perf_counter *counter, int nmi, 2735static void perf_output_read_one(struct perf_output_handle *handle,
2736 struct perf_counter *counter)
2737{
2738 u64 read_format = counter->attr.read_format;
2739 u64 values[4];
2740 int n = 0;
2741
2742 values[n++] = atomic64_read(&counter->count);
2743 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2744 values[n++] = counter->total_time_enabled +
2745 atomic64_read(&counter->child_total_time_enabled);
2746 }
2747 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2748 values[n++] = counter->total_time_running +
2749 atomic64_read(&counter->child_total_time_running);
2750 }
2751 if (read_format & PERF_FORMAT_ID)
2752 values[n++] = primary_counter_id(counter);
2753
2754 perf_output_copy(handle, values, n * sizeof(u64));
2755}
2756
2757/*
2758 * XXX PERF_FORMAT_GROUP vs inherited counters seems difficult.
2759 */
2760static void perf_output_read_group(struct perf_output_handle *handle,
2761 struct perf_counter *counter)
2762{
2763 struct perf_counter *leader = counter->group_leader, *sub;
2764 u64 read_format = counter->attr.read_format;
2765 u64 values[5];
2766 int n = 0;
2767
2768 values[n++] = 1 + leader->nr_siblings;
2769
2770 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2771 values[n++] = leader->total_time_enabled;
2772
2773 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2774 values[n++] = leader->total_time_running;
2775
2776 if (leader != counter)
2777 leader->pmu->read(leader);
2778
2779 values[n++] = atomic64_read(&leader->count);
2780 if (read_format & PERF_FORMAT_ID)
2781 values[n++] = primary_counter_id(leader);
2782
2783 perf_output_copy(handle, values, n * sizeof(u64));
2784
2785 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2786 n = 0;
2787
2788 if (sub != counter)
2789 sub->pmu->read(sub);
2790
2791 values[n++] = atomic64_read(&sub->count);
2792 if (read_format & PERF_FORMAT_ID)
2793 values[n++] = primary_counter_id(sub);
2794
2795 perf_output_copy(handle, values, n * sizeof(u64));
2796 }
2797}
2798
2799static void perf_output_read(struct perf_output_handle *handle,
2800 struct perf_counter *counter)
2801{
2802 if (counter->attr.read_format & PERF_FORMAT_GROUP)
2803 perf_output_read_group(handle, counter);
2804 else
2805 perf_output_read_one(handle, counter);
2806}
2807
2808void perf_counter_output(struct perf_counter *counter, int nmi,
2609 struct perf_sample_data *data) 2809 struct perf_sample_data *data)
2610{ 2810{
2611 int ret; 2811 int ret;
@@ -2616,10 +2816,6 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2616 struct { 2816 struct {
2617 u32 pid, tid; 2817 u32 pid, tid;
2618 } tid_entry; 2818 } tid_entry;
2619 struct {
2620 u64 id;
2621 u64 counter;
2622 } group_entry;
2623 struct perf_callchain_entry *callchain = NULL; 2819 struct perf_callchain_entry *callchain = NULL;
2624 int callchain_size = 0; 2820 int callchain_size = 0;
2625 u64 time; 2821 u64 time;
@@ -2661,19 +2857,21 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2661 if (sample_type & PERF_SAMPLE_ID) 2857 if (sample_type & PERF_SAMPLE_ID)
2662 header.size += sizeof(u64); 2858 header.size += sizeof(u64);
2663 2859
2860 if (sample_type & PERF_SAMPLE_STREAM_ID)
2861 header.size += sizeof(u64);
2862
2664 if (sample_type & PERF_SAMPLE_CPU) { 2863 if (sample_type & PERF_SAMPLE_CPU) {
2665 header.size += sizeof(cpu_entry); 2864 header.size += sizeof(cpu_entry);
2666 2865
2667 cpu_entry.cpu = raw_smp_processor_id(); 2866 cpu_entry.cpu = raw_smp_processor_id();
2867 cpu_entry.reserved = 0;
2668 } 2868 }
2669 2869
2670 if (sample_type & PERF_SAMPLE_PERIOD) 2870 if (sample_type & PERF_SAMPLE_PERIOD)
2671 header.size += sizeof(u64); 2871 header.size += sizeof(u64);
2672 2872
2673 if (sample_type & PERF_SAMPLE_GROUP) { 2873 if (sample_type & PERF_SAMPLE_READ)
2674 header.size += sizeof(u64) + 2874 header.size += perf_counter_read_size(counter);
2675 counter->nr_siblings * sizeof(group_entry);
2676 }
2677 2875
2678 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 2876 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2679 callchain = perf_callchain(data->regs); 2877 callchain = perf_callchain(data->regs);
@@ -2685,6 +2883,18 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2685 header.size += sizeof(u64); 2883 header.size += sizeof(u64);
2686 } 2884 }
2687 2885
2886 if (sample_type & PERF_SAMPLE_RAW) {
2887 int size = sizeof(u32);
2888
2889 if (data->raw)
2890 size += data->raw->size;
2891 else
2892 size += sizeof(u32);
2893
2894 WARN_ON_ONCE(size & (sizeof(u64)-1));
2895 header.size += size;
2896 }
2897
2688 ret = perf_output_begin(&handle, counter, header.size, nmi, 1); 2898 ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
2689 if (ret) 2899 if (ret)
2690 return; 2900 return;
@@ -2703,7 +2913,13 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2703 if (sample_type & PERF_SAMPLE_ADDR) 2913 if (sample_type & PERF_SAMPLE_ADDR)
2704 perf_output_put(&handle, data->addr); 2914 perf_output_put(&handle, data->addr);
2705 2915
2706 if (sample_type & PERF_SAMPLE_ID) 2916 if (sample_type & PERF_SAMPLE_ID) {
2917 u64 id = primary_counter_id(counter);
2918
2919 perf_output_put(&handle, id);
2920 }
2921
2922 if (sample_type & PERF_SAMPLE_STREAM_ID)
2707 perf_output_put(&handle, counter->id); 2923 perf_output_put(&handle, counter->id);
2708 2924
2709 if (sample_type & PERF_SAMPLE_CPU) 2925 if (sample_type & PERF_SAMPLE_CPU)
@@ -2712,26 +2928,8 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2712 if (sample_type & PERF_SAMPLE_PERIOD) 2928 if (sample_type & PERF_SAMPLE_PERIOD)
2713 perf_output_put(&handle, data->period); 2929 perf_output_put(&handle, data->period);
2714 2930
2715 /* 2931 if (sample_type & PERF_SAMPLE_READ)
2716 * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult. 2932 perf_output_read(&handle, counter);
2717 */
2718 if (sample_type & PERF_SAMPLE_GROUP) {
2719 struct perf_counter *leader, *sub;
2720 u64 nr = counter->nr_siblings;
2721
2722 perf_output_put(&handle, nr);
2723
2724 leader = counter->group_leader;
2725 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2726 if (sub != counter)
2727 sub->pmu->read(sub);
2728
2729 group_entry.id = sub->id;
2730 group_entry.counter = atomic64_read(&sub->count);
2731
2732 perf_output_put(&handle, group_entry);
2733 }
2734 }
2735 2933
2736 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 2934 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2737 if (callchain) 2935 if (callchain)
@@ -2742,6 +2940,22 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2742 } 2940 }
2743 } 2941 }
2744 2942
2943 if (sample_type & PERF_SAMPLE_RAW) {
2944 if (data->raw) {
2945 perf_output_put(&handle, data->raw->size);
2946 perf_output_copy(&handle, data->raw->data, data->raw->size);
2947 } else {
2948 struct {
2949 u32 size;
2950 u32 data;
2951 } raw = {
2952 .size = sizeof(u32),
2953 .data = 0,
2954 };
2955 perf_output_put(&handle, raw);
2956 }
2957 }
2958
2745 perf_output_end(&handle); 2959 perf_output_end(&handle);
2746} 2960}
2747 2961
@@ -2754,8 +2968,6 @@ struct perf_read_event {
2754 2968
2755 u32 pid; 2969 u32 pid;
2756 u32 tid; 2970 u32 tid;
2757 u64 value;
2758 u64 format[3];
2759}; 2971};
2760 2972
2761static void 2973static void
@@ -2767,87 +2979,74 @@ perf_counter_read_event(struct perf_counter *counter,
2767 .header = { 2979 .header = {
2768 .type = PERF_EVENT_READ, 2980 .type = PERF_EVENT_READ,
2769 .misc = 0, 2981 .misc = 0,
2770 .size = sizeof(event) - sizeof(event.format), 2982 .size = sizeof(event) + perf_counter_read_size(counter),
2771 }, 2983 },
2772 .pid = perf_counter_pid(counter, task), 2984 .pid = perf_counter_pid(counter, task),
2773 .tid = perf_counter_tid(counter, task), 2985 .tid = perf_counter_tid(counter, task),
2774 .value = atomic64_read(&counter->count),
2775 }; 2986 };
2776 int ret, i = 0; 2987 int ret;
2777
2778 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2779 event.header.size += sizeof(u64);
2780 event.format[i++] = counter->total_time_enabled;
2781 }
2782
2783 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2784 event.header.size += sizeof(u64);
2785 event.format[i++] = counter->total_time_running;
2786 }
2787
2788 if (counter->attr.read_format & PERF_FORMAT_ID) {
2789 u64 id;
2790
2791 event.header.size += sizeof(u64);
2792 if (counter->parent)
2793 id = counter->parent->id;
2794 else
2795 id = counter->id;
2796
2797 event.format[i++] = id;
2798 }
2799 2988
2800 ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); 2989 ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
2801 if (ret) 2990 if (ret)
2802 return; 2991 return;
2803 2992
2804 perf_output_copy(&handle, &event, event.header.size); 2993 perf_output_put(&handle, event);
2994 perf_output_read(&handle, counter);
2995
2805 perf_output_end(&handle); 2996 perf_output_end(&handle);
2806} 2997}
2807 2998
2808/* 2999/*
2809 * fork tracking 3000 * task tracking -- fork/exit
3001 *
3002 * enabled by: attr.comm | attr.mmap | attr.task
2810 */ 3003 */
2811 3004
2812struct perf_fork_event { 3005struct perf_task_event {
2813 struct task_struct *task; 3006 struct task_struct *task;
3007 struct perf_counter_context *task_ctx;
2814 3008
2815 struct { 3009 struct {
2816 struct perf_event_header header; 3010 struct perf_event_header header;
2817 3011
2818 u32 pid; 3012 u32 pid;
2819 u32 ppid; 3013 u32 ppid;
3014 u32 tid;
3015 u32 ptid;
2820 } event; 3016 } event;
2821}; 3017};
2822 3018
2823static void perf_counter_fork_output(struct perf_counter *counter, 3019static void perf_counter_task_output(struct perf_counter *counter,
2824 struct perf_fork_event *fork_event) 3020 struct perf_task_event *task_event)
2825{ 3021{
2826 struct perf_output_handle handle; 3022 struct perf_output_handle handle;
2827 int size = fork_event->event.header.size; 3023 int size = task_event->event.header.size;
2828 struct task_struct *task = fork_event->task; 3024 struct task_struct *task = task_event->task;
2829 int ret = perf_output_begin(&handle, counter, size, 0, 0); 3025 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2830 3026
2831 if (ret) 3027 if (ret)
2832 return; 3028 return;
2833 3029
2834 fork_event->event.pid = perf_counter_pid(counter, task); 3030 task_event->event.pid = perf_counter_pid(counter, task);
2835 fork_event->event.ppid = perf_counter_pid(counter, task->real_parent); 3031 task_event->event.ppid = perf_counter_pid(counter, current);
3032
3033 task_event->event.tid = perf_counter_tid(counter, task);
3034 task_event->event.ptid = perf_counter_tid(counter, current);
2836 3035
2837 perf_output_put(&handle, fork_event->event); 3036 perf_output_put(&handle, task_event->event);
2838 perf_output_end(&handle); 3037 perf_output_end(&handle);
2839} 3038}
2840 3039
2841static int perf_counter_fork_match(struct perf_counter *counter) 3040static int perf_counter_task_match(struct perf_counter *counter)
2842{ 3041{
2843 if (counter->attr.comm || counter->attr.mmap) 3042 if (counter->attr.comm || counter->attr.mmap || counter->attr.task)
2844 return 1; 3043 return 1;
2845 3044
2846 return 0; 3045 return 0;
2847} 3046}
2848 3047
2849static void perf_counter_fork_ctx(struct perf_counter_context *ctx, 3048static void perf_counter_task_ctx(struct perf_counter_context *ctx,
2850 struct perf_fork_event *fork_event) 3049 struct perf_task_event *task_event)
2851{ 3050{
2852 struct perf_counter *counter; 3051 struct perf_counter *counter;
2853 3052
@@ -2856,51 +3055,62 @@ static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
2856 3055
2857 rcu_read_lock(); 3056 rcu_read_lock();
2858 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 3057 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2859 if (perf_counter_fork_match(counter)) 3058 if (perf_counter_task_match(counter))
2860 perf_counter_fork_output(counter, fork_event); 3059 perf_counter_task_output(counter, task_event);
2861 } 3060 }
2862 rcu_read_unlock(); 3061 rcu_read_unlock();
2863} 3062}
2864 3063
2865static void perf_counter_fork_event(struct perf_fork_event *fork_event) 3064static void perf_counter_task_event(struct perf_task_event *task_event)
2866{ 3065{
2867 struct perf_cpu_context *cpuctx; 3066 struct perf_cpu_context *cpuctx;
2868 struct perf_counter_context *ctx; 3067 struct perf_counter_context *ctx = task_event->task_ctx;
2869 3068
2870 cpuctx = &get_cpu_var(perf_cpu_context); 3069 cpuctx = &get_cpu_var(perf_cpu_context);
2871 perf_counter_fork_ctx(&cpuctx->ctx, fork_event); 3070 perf_counter_task_ctx(&cpuctx->ctx, task_event);
2872 put_cpu_var(perf_cpu_context); 3071 put_cpu_var(perf_cpu_context);
2873 3072
2874 rcu_read_lock(); 3073 rcu_read_lock();
2875 /* 3074 if (!ctx)
2876 * doesn't really matter which of the child contexts the 3075 ctx = rcu_dereference(task_event->task->perf_counter_ctxp);
2877 * events ends up in.
2878 */
2879 ctx = rcu_dereference(current->perf_counter_ctxp);
2880 if (ctx) 3076 if (ctx)
2881 perf_counter_fork_ctx(ctx, fork_event); 3077 perf_counter_task_ctx(ctx, task_event);
2882 rcu_read_unlock(); 3078 rcu_read_unlock();
2883} 3079}
2884 3080
2885void perf_counter_fork(struct task_struct *task) 3081static void perf_counter_task(struct task_struct *task,
3082 struct perf_counter_context *task_ctx,
3083 int new)
2886{ 3084{
2887 struct perf_fork_event fork_event; 3085 struct perf_task_event task_event;
2888 3086
2889 if (!atomic_read(&nr_comm_counters) && 3087 if (!atomic_read(&nr_comm_counters) &&
2890 !atomic_read(&nr_mmap_counters)) 3088 !atomic_read(&nr_mmap_counters) &&
3089 !atomic_read(&nr_task_counters))
2891 return; 3090 return;
2892 3091
2893 fork_event = (struct perf_fork_event){ 3092 task_event = (struct perf_task_event){
2894 .task = task, 3093 .task = task,
2895 .event = { 3094 .task_ctx = task_ctx,
3095 .event = {
2896 .header = { 3096 .header = {
2897 .type = PERF_EVENT_FORK, 3097 .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT,
2898 .size = sizeof(fork_event.event), 3098 .misc = 0,
3099 .size = sizeof(task_event.event),
2899 }, 3100 },
3101 /* .pid */
3102 /* .ppid */
3103 /* .tid */
3104 /* .ptid */
2900 }, 3105 },
2901 }; 3106 };
2902 3107
2903 perf_counter_fork_event(&fork_event); 3108 perf_counter_task_event(&task_event);
3109}
3110
3111void perf_counter_fork(struct task_struct *task)
3112{
3113 perf_counter_task(task, NULL, 1);
2904} 3114}
2905 3115
2906/* 3116/*
@@ -2968,8 +3178,10 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event)
2968 struct perf_cpu_context *cpuctx; 3178 struct perf_cpu_context *cpuctx;
2969 struct perf_counter_context *ctx; 3179 struct perf_counter_context *ctx;
2970 unsigned int size; 3180 unsigned int size;
2971 char *comm = comm_event->task->comm; 3181 char comm[TASK_COMM_LEN];
2972 3182
3183 memset(comm, 0, sizeof(comm));
3184 strncpy(comm, comm_event->task->comm, sizeof(comm));
2973 size = ALIGN(strlen(comm)+1, sizeof(u64)); 3185 size = ALIGN(strlen(comm)+1, sizeof(u64));
2974 3186
2975 comm_event->comm = comm; 3187 comm_event->comm = comm;
@@ -3004,8 +3216,16 @@ void perf_counter_comm(struct task_struct *task)
3004 3216
3005 comm_event = (struct perf_comm_event){ 3217 comm_event = (struct perf_comm_event){
3006 .task = task, 3218 .task = task,
3219 /* .comm */
3220 /* .comm_size */
3007 .event = { 3221 .event = {
3008 .header = { .type = PERF_EVENT_COMM, }, 3222 .header = {
3223 .type = PERF_EVENT_COMM,
3224 .misc = 0,
3225 /* .size */
3226 },
3227 /* .pid */
3228 /* .tid */
3009 }, 3229 },
3010 }; 3230 };
3011 3231
@@ -3088,8 +3308,15 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
3088 char *buf = NULL; 3308 char *buf = NULL;
3089 const char *name; 3309 const char *name;
3090 3310
3311 memset(tmp, 0, sizeof(tmp));
3312
3091 if (file) { 3313 if (file) {
3092 buf = kzalloc(PATH_MAX, GFP_KERNEL); 3314 /*
3315 * d_path works from the end of the buffer backwards, so we
3316 * need to add enough zero bytes after the string to handle
3317 * the 64bit alignment we do later.
3318 */
3319 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
3093 if (!buf) { 3320 if (!buf) {
3094 name = strncpy(tmp, "//enomem", sizeof(tmp)); 3321 name = strncpy(tmp, "//enomem", sizeof(tmp));
3095 goto got_name; 3322 goto got_name;
@@ -3100,9 +3327,11 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
3100 goto got_name; 3327 goto got_name;
3101 } 3328 }
3102 } else { 3329 } else {
3103 name = arch_vma_name(mmap_event->vma); 3330 if (arch_vma_name(mmap_event->vma)) {
3104 if (name) 3331 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
3332 sizeof(tmp));
3105 goto got_name; 3333 goto got_name;
3334 }
3106 3335
3107 if (!vma->vm_mm) { 3336 if (!vma->vm_mm) {
3108 name = strncpy(tmp, "[vdso]", sizeof(tmp)); 3337 name = strncpy(tmp, "[vdso]", sizeof(tmp));
@@ -3147,8 +3376,16 @@ void __perf_counter_mmap(struct vm_area_struct *vma)
3147 3376
3148 mmap_event = (struct perf_mmap_event){ 3377 mmap_event = (struct perf_mmap_event){
3149 .vma = vma, 3378 .vma = vma,
3379 /* .file_name */
3380 /* .file_size */
3150 .event = { 3381 .event = {
3151 .header = { .type = PERF_EVENT_MMAP, }, 3382 .header = {
3383 .type = PERF_EVENT_MMAP,
3384 .misc = 0,
3385 /* .size */
3386 },
3387 /* .pid */
3388 /* .tid */
3152 .start = vma->vm_start, 3389 .start = vma->vm_start,
3153 .len = vma->vm_end - vma->vm_start, 3390 .len = vma->vm_end - vma->vm_start,
3154 .pgoff = vma->vm_pgoff, 3391 .pgoff = vma->vm_pgoff,
@@ -3159,49 +3396,6 @@ void __perf_counter_mmap(struct vm_area_struct *vma)
3159} 3396}
3160 3397
3161/* 3398/*
3162 * Log sample_period changes so that analyzing tools can re-normalize the
3163 * event flow.
3164 */
3165
3166struct freq_event {
3167 struct perf_event_header header;
3168 u64 time;
3169 u64 id;
3170 u64 period;
3171};
3172
3173static void perf_log_period(struct perf_counter *counter, u64 period)
3174{
3175 struct perf_output_handle handle;
3176 struct freq_event event;
3177 int ret;
3178
3179 if (counter->hw.sample_period == period)
3180 return;
3181
3182 if (counter->attr.sample_type & PERF_SAMPLE_PERIOD)
3183 return;
3184
3185 event = (struct freq_event) {
3186 .header = {
3187 .type = PERF_EVENT_PERIOD,
3188 .misc = 0,
3189 .size = sizeof(event),
3190 },
3191 .time = sched_clock(),
3192 .id = counter->id,
3193 .period = period,
3194 };
3195
3196 ret = perf_output_begin(&handle, counter, sizeof(event), 1, 0);
3197 if (ret)
3198 return;
3199
3200 perf_output_put(&handle, event);
3201 perf_output_end(&handle);
3202}
3203
3204/*
3205 * IRQ throttle logging 3399 * IRQ throttle logging
3206 */ 3400 */
3207 3401
@@ -3214,16 +3408,21 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
3214 struct perf_event_header header; 3408 struct perf_event_header header;
3215 u64 time; 3409 u64 time;
3216 u64 id; 3410 u64 id;
3411 u64 stream_id;
3217 } throttle_event = { 3412 } throttle_event = {
3218 .header = { 3413 .header = {
3219 .type = PERF_EVENT_THROTTLE + 1, 3414 .type = PERF_EVENT_THROTTLE,
3220 .misc = 0, 3415 .misc = 0,
3221 .size = sizeof(throttle_event), 3416 .size = sizeof(throttle_event),
3222 }, 3417 },
3223 .time = sched_clock(), 3418 .time = sched_clock(),
3224 .id = counter->id, 3419 .id = primary_counter_id(counter),
3420 .stream_id = counter->id,
3225 }; 3421 };
3226 3422
3423 if (enable)
3424 throttle_event.header.type = PERF_EVENT_UNTHROTTLE;
3425
3227 ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0); 3426 ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
3228 if (ret) 3427 if (ret)
3229 return; 3428 return;
@@ -3300,125 +3499,111 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi,
3300 * Generic software counter infrastructure 3499 * Generic software counter infrastructure
3301 */ 3500 */
3302 3501
3303static void perf_swcounter_update(struct perf_counter *counter) 3502/*
3503 * We directly increment counter->count and keep a second value in
3504 * counter->hw.period_left to count intervals. This period counter
3505 * is kept in the range [-sample_period, 0] so that we can use the
3506 * sign as trigger.
3507 */
3508
3509static u64 perf_swcounter_set_period(struct perf_counter *counter)
3304{ 3510{
3305 struct hw_perf_counter *hwc = &counter->hw; 3511 struct hw_perf_counter *hwc = &counter->hw;
3306 u64 prev, now; 3512 u64 period = hwc->last_period;
3307 s64 delta; 3513 u64 nr, offset;
3514 s64 old, val;
3515
3516 hwc->last_period = hwc->sample_period;
3308 3517
3309again: 3518again:
3310 prev = atomic64_read(&hwc->prev_count); 3519 old = val = atomic64_read(&hwc->period_left);
3311 now = atomic64_read(&hwc->count); 3520 if (val < 0)
3312 if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev) 3521 return 0;
3313 goto again;
3314 3522
3315 delta = now - prev; 3523 nr = div64_u64(period + val, period);
3524 offset = nr * period;
3525 val -= offset;
3526 if (atomic64_cmpxchg(&hwc->period_left, old, val) != old)
3527 goto again;
3316 3528
3317 atomic64_add(delta, &counter->count); 3529 return nr;
3318 atomic64_sub(delta, &hwc->period_left);
3319} 3530}
3320 3531
3321static void perf_swcounter_set_period(struct perf_counter *counter) 3532static void perf_swcounter_overflow(struct perf_counter *counter,
3533 int nmi, struct perf_sample_data *data)
3322{ 3534{
3323 struct hw_perf_counter *hwc = &counter->hw; 3535 struct hw_perf_counter *hwc = &counter->hw;
3324 s64 left = atomic64_read(&hwc->period_left); 3536 u64 overflow;
3325 s64 period = hwc->sample_period;
3326 3537
3327 if (unlikely(left <= -period)) { 3538 data->period = counter->hw.last_period;
3328 left = period; 3539 overflow = perf_swcounter_set_period(counter);
3329 atomic64_set(&hwc->period_left, left);
3330 hwc->last_period = period;
3331 }
3332 3540
3333 if (unlikely(left <= 0)) { 3541 if (hwc->interrupts == MAX_INTERRUPTS)
3334 left += period; 3542 return;
3335 atomic64_add(period, &hwc->period_left);
3336 hwc->last_period = period;
3337 }
3338 3543
3339 atomic64_set(&hwc->prev_count, -left); 3544 for (; overflow; overflow--) {
3340 atomic64_set(&hwc->count, -left); 3545 if (perf_counter_overflow(counter, nmi, data)) {
3546 /*
3547 * We inhibit the overflow from happening when
3548 * hwc->interrupts == MAX_INTERRUPTS.
3549 */
3550 break;
3551 }
3552 }
3341} 3553}
3342 3554
3343static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) 3555static void perf_swcounter_unthrottle(struct perf_counter *counter)
3344{ 3556{
3345 enum hrtimer_restart ret = HRTIMER_RESTART;
3346 struct perf_sample_data data;
3347 struct perf_counter *counter;
3348 u64 period;
3349
3350 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
3351 counter->pmu->read(counter);
3352
3353 data.addr = 0;
3354 data.regs = get_irq_regs();
3355 /* 3557 /*
3356 * In case we exclude kernel IPs or are somehow not in interrupt 3558 * Nothing to do, we already reset hwc->interrupts.
3357 * context, provide the next best thing, the user IP.
3358 */ 3559 */
3359 if ((counter->attr.exclude_kernel || !data.regs) && 3560}
3360 !counter->attr.exclude_user)
3361 data.regs = task_pt_regs(current);
3362 3561
3363 if (data.regs) { 3562static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3364 if (perf_counter_overflow(counter, 0, &data)) 3563 int nmi, struct perf_sample_data *data)
3365 ret = HRTIMER_NORESTART; 3564{
3366 } 3565 struct hw_perf_counter *hwc = &counter->hw;
3367 3566
3368 period = max_t(u64, 10000, counter->hw.sample_period); 3567 atomic64_add(nr, &counter->count);
3369 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3370 3568
3371 return ret; 3569 if (!hwc->sample_period)
3372} 3570 return;
3373 3571
3374static void perf_swcounter_overflow(struct perf_counter *counter, 3572 if (!data->regs)
3375 int nmi, struct perf_sample_data *data) 3573 return;
3376{
3377 data->period = counter->hw.last_period;
3378 3574
3379 perf_swcounter_update(counter); 3575 if (!atomic64_add_negative(nr, &hwc->period_left))
3380 perf_swcounter_set_period(counter); 3576 perf_swcounter_overflow(counter, nmi, data);
3381 if (perf_counter_overflow(counter, nmi, data))
3382 /* soft-disable the counter */
3383 ;
3384} 3577}
3385 3578
3386static int perf_swcounter_is_counting(struct perf_counter *counter) 3579static int perf_swcounter_is_counting(struct perf_counter *counter)
3387{ 3580{
3388 struct perf_counter_context *ctx; 3581 /*
3389 unsigned long flags; 3582 * The counter is active, we're good!
3390 int count; 3583 */
3391
3392 if (counter->state == PERF_COUNTER_STATE_ACTIVE) 3584 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
3393 return 1; 3585 return 1;
3394 3586
3587 /*
3588 * The counter is off/error, not counting.
3589 */
3395 if (counter->state != PERF_COUNTER_STATE_INACTIVE) 3590 if (counter->state != PERF_COUNTER_STATE_INACTIVE)
3396 return 0; 3591 return 0;
3397 3592
3398 /* 3593 /*
3399 * If the counter is inactive, it could be just because 3594 * The counter is inactive, if the context is active
3400 * its task is scheduled out, or because it's in a group 3595 * we're part of a group that didn't make it on the 'pmu',
3401 * which could not go on the PMU. We want to count in 3596 * not counting.
3402 * the first case but not the second. If the context is
3403 * currently active then an inactive software counter must
3404 * be the second case. If it's not currently active then
3405 * we need to know whether the counter was active when the
3406 * context was last active, which we can determine by
3407 * comparing counter->tstamp_stopped with ctx->time.
3408 *
3409 * We are within an RCU read-side critical section,
3410 * which protects the existence of *ctx.
3411 */ 3597 */
3412 ctx = counter->ctx; 3598 if (counter->ctx->is_active)
3413 spin_lock_irqsave(&ctx->lock, flags); 3599 return 0;
3414 count = 1; 3600
3415 /* Re-check state now we have the lock */ 3601 /*
3416 if (counter->state < PERF_COUNTER_STATE_INACTIVE || 3602 * We're inactive and the context is too, this means the
3417 counter->ctx->is_active || 3603 * task is scheduled out, we're counting events that happen
3418 counter->tstamp_stopped < ctx->time) 3604 * to us, like migration events.
3419 count = 0; 3605 */
3420 spin_unlock_irqrestore(&ctx->lock, flags); 3606 return 1;
3421 return count;
3422} 3607}
3423 3608
3424static int perf_swcounter_match(struct perf_counter *counter, 3609static int perf_swcounter_match(struct perf_counter *counter,
@@ -3444,15 +3629,6 @@ static int perf_swcounter_match(struct perf_counter *counter,
3444 return 1; 3629 return 1;
3445} 3630}
3446 3631
3447static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3448 int nmi, struct perf_sample_data *data)
3449{
3450 int neg = atomic64_add_negative(nr, &counter->hw.count);
3451
3452 if (counter->hw.sample_period && !neg && data->regs)
3453 perf_swcounter_overflow(counter, nmi, data);
3454}
3455
3456static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, 3632static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
3457 enum perf_type_id type, 3633 enum perf_type_id type,
3458 u32 event, u64 nr, int nmi, 3634 u32 event, u64 nr, int nmi,
@@ -3531,27 +3707,66 @@ void __perf_swcounter_event(u32 event, u64 nr, int nmi,
3531 3707
3532static void perf_swcounter_read(struct perf_counter *counter) 3708static void perf_swcounter_read(struct perf_counter *counter)
3533{ 3709{
3534 perf_swcounter_update(counter);
3535} 3710}
3536 3711
3537static int perf_swcounter_enable(struct perf_counter *counter) 3712static int perf_swcounter_enable(struct perf_counter *counter)
3538{ 3713{
3539 perf_swcounter_set_period(counter); 3714 struct hw_perf_counter *hwc = &counter->hw;
3715
3716 if (hwc->sample_period) {
3717 hwc->last_period = hwc->sample_period;
3718 perf_swcounter_set_period(counter);
3719 }
3540 return 0; 3720 return 0;
3541} 3721}
3542 3722
3543static void perf_swcounter_disable(struct perf_counter *counter) 3723static void perf_swcounter_disable(struct perf_counter *counter)
3544{ 3724{
3545 perf_swcounter_update(counter);
3546} 3725}
3547 3726
3548static const struct pmu perf_ops_generic = { 3727static const struct pmu perf_ops_generic = {
3549 .enable = perf_swcounter_enable, 3728 .enable = perf_swcounter_enable,
3550 .disable = perf_swcounter_disable, 3729 .disable = perf_swcounter_disable,
3551 .read = perf_swcounter_read, 3730 .read = perf_swcounter_read,
3731 .unthrottle = perf_swcounter_unthrottle,
3552}; 3732};
3553 3733
3554/* 3734/*
3735 * hrtimer based swcounter callback
3736 */
3737
3738static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
3739{
3740 enum hrtimer_restart ret = HRTIMER_RESTART;
3741 struct perf_sample_data data;
3742 struct perf_counter *counter;
3743 u64 period;
3744
3745 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
3746 counter->pmu->read(counter);
3747
3748 data.addr = 0;
3749 data.regs = get_irq_regs();
3750 /*
3751 * In case we exclude kernel IPs or are somehow not in interrupt
3752 * context, provide the next best thing, the user IP.
3753 */
3754 if ((counter->attr.exclude_kernel || !data.regs) &&
3755 !counter->attr.exclude_user)
3756 data.regs = task_pt_regs(current);
3757
3758 if (data.regs) {
3759 if (perf_counter_overflow(counter, 0, &data))
3760 ret = HRTIMER_NORESTART;
3761 }
3762
3763 period = max_t(u64, 10000, counter->hw.sample_period);
3764 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3765
3766 return ret;
3767}
3768
3769/*
3555 * Software counter: cpu wall time clock 3770 * Software counter: cpu wall time clock
3556 */ 3771 */
3557 3772
@@ -3668,17 +3883,24 @@ static const struct pmu perf_ops_task_clock = {
3668}; 3883};
3669 3884
3670#ifdef CONFIG_EVENT_PROFILE 3885#ifdef CONFIG_EVENT_PROFILE
3671void perf_tpcounter_event(int event_id) 3886void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record,
3887 int entry_size)
3672{ 3888{
3889 struct perf_raw_record raw = {
3890 .size = entry_size,
3891 .data = record,
3892 };
3893
3673 struct perf_sample_data data = { 3894 struct perf_sample_data data = {
3674 .regs = get_irq_regs(); 3895 .regs = get_irq_regs(),
3675 .addr = 0, 3896 .addr = addr,
3897 .raw = &raw,
3676 }; 3898 };
3677 3899
3678 if (!data.regs) 3900 if (!data.regs)
3679 data.regs = task_pt_regs(current); 3901 data.regs = task_pt_regs(current);
3680 3902
3681 do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data); 3903 do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data);
3682} 3904}
3683EXPORT_SYMBOL_GPL(perf_tpcounter_event); 3905EXPORT_SYMBOL_GPL(perf_tpcounter_event);
3684 3906
@@ -3687,16 +3909,20 @@ extern void ftrace_profile_disable(int);
3687 3909
3688static void tp_perf_counter_destroy(struct perf_counter *counter) 3910static void tp_perf_counter_destroy(struct perf_counter *counter)
3689{ 3911{
3690 ftrace_profile_disable(perf_event_id(&counter->attr)); 3912 ftrace_profile_disable(counter->attr.config);
3691} 3913}
3692 3914
3693static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) 3915static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3694{ 3916{
3695 int event_id = perf_event_id(&counter->attr); 3917 /*
3696 int ret; 3918 * Raw tracepoint data is a severe data leak, only allow root to
3919 * have these.
3920 */
3921 if ((counter->attr.sample_type & PERF_SAMPLE_RAW) &&
3922 !capable(CAP_SYS_ADMIN))
3923 return ERR_PTR(-EPERM);
3697 3924
3698 ret = ftrace_profile_enable(event_id); 3925 if (ftrace_profile_enable(counter->attr.config))
3699 if (ret)
3700 return NULL; 3926 return NULL;
3701 3927
3702 counter->destroy = tp_perf_counter_destroy; 3928 counter->destroy = tp_perf_counter_destroy;
@@ -3829,9 +4055,9 @@ perf_counter_alloc(struct perf_counter_attr *attr,
3829 atomic64_set(&hwc->period_left, hwc->sample_period); 4055 atomic64_set(&hwc->period_left, hwc->sample_period);
3830 4056
3831 /* 4057 /*
3832 * we currently do not support PERF_SAMPLE_GROUP on inherited counters 4058 * we currently do not support PERF_FORMAT_GROUP on inherited counters
3833 */ 4059 */
3834 if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP)) 4060 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
3835 goto done; 4061 goto done;
3836 4062
3837 switch (attr->type) { 4063 switch (attr->type) {
@@ -3874,6 +4100,8 @@ done:
3874 atomic_inc(&nr_mmap_counters); 4100 atomic_inc(&nr_mmap_counters);
3875 if (counter->attr.comm) 4101 if (counter->attr.comm)
3876 atomic_inc(&nr_comm_counters); 4102 atomic_inc(&nr_comm_counters);
4103 if (counter->attr.task)
4104 atomic_inc(&nr_task_counters);
3877 } 4105 }
3878 4106
3879 return counter; 4107 return counter;
@@ -4235,8 +4463,10 @@ void perf_counter_exit_task(struct task_struct *child)
4235 struct perf_counter_context *child_ctx; 4463 struct perf_counter_context *child_ctx;
4236 unsigned long flags; 4464 unsigned long flags;
4237 4465
4238 if (likely(!child->perf_counter_ctxp)) 4466 if (likely(!child->perf_counter_ctxp)) {
4467 perf_counter_task(child, NULL, 0);
4239 return; 4468 return;
4469 }
4240 4470
4241 local_irq_save(flags); 4471 local_irq_save(flags);
4242 /* 4472 /*
@@ -4255,17 +4485,20 @@ void perf_counter_exit_task(struct task_struct *child)
4255 */ 4485 */
4256 spin_lock(&child_ctx->lock); 4486 spin_lock(&child_ctx->lock);
4257 child->perf_counter_ctxp = NULL; 4487 child->perf_counter_ctxp = NULL;
4258 if (child_ctx->parent_ctx) { 4488 /*
4259 /* 4489 * If this context is a clone; unclone it so it can't get
4260 * This context is a clone; unclone it so it can't get 4490 * swapped to another process while we're removing all
4261 * swapped to another process while we're removing all 4491 * the counters from it.
4262 * the counters from it. 4492 */
4263 */ 4493 unclone_ctx(child_ctx);
4264 put_ctx(child_ctx->parent_ctx); 4494 spin_unlock_irqrestore(&child_ctx->lock, flags);
4265 child_ctx->parent_ctx = NULL; 4495
4266 } 4496 /*
4267 spin_unlock(&child_ctx->lock); 4497 * Report the task dead after unscheduling the counters so that we
4268 local_irq_restore(flags); 4498 * won't get any samples after PERF_EVENT_EXIT. We can however still
4499 * get a few PERF_EVENT_READ events.
4500 */
4501 perf_counter_task(child, child_ctx, 0);
4269 4502
4270 /* 4503 /*
4271 * We can recurse on the same lock type through: 4504 * We can recurse on the same lock type through:
@@ -4486,6 +4719,11 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
4486 perf_counter_init_cpu(cpu); 4719 perf_counter_init_cpu(cpu);
4487 break; 4720 break;
4488 4721
4722 case CPU_ONLINE:
4723 case CPU_ONLINE_FROZEN:
4724 hw_perf_counter_setup_online(cpu);
4725 break;
4726
4489 case CPU_DOWN_PREPARE: 4727 case CPU_DOWN_PREPARE:
4490 case CPU_DOWN_PREPARE_FROZEN: 4728 case CPU_DOWN_PREPARE_FROZEN:
4491 perf_counter_exit_cpu(cpu); 4729 perf_counter_exit_cpu(cpu);
@@ -4510,6 +4748,8 @@ void __init perf_counter_init(void)
4510{ 4748{
4511 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, 4749 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
4512 (void *)(long)smp_processor_id()); 4750 (void *)(long)smp_processor_id());
4751 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
4752 (void *)(long)smp_processor_id());
4513 register_cpu_notifier(&perf_cpu_nb); 4753 register_cpu_notifier(&perf_cpu_nb);
4514} 4754}
4515 4755