aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c695
1 files changed, 473 insertions, 222 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 9d0b5c665883..6b7ddba1dd64 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -28,6 +28,8 @@
28#include <linux/anon_inodes.h> 28#include <linux/anon_inodes.h>
29#include <linux/kernel_stat.h> 29#include <linux/kernel_stat.h>
30#include <linux/perf_event.h> 30#include <linux/perf_event.h>
31#include <linux/ftrace_event.h>
32#include <linux/hw_breakpoint.h>
31 33
32#include <asm/irq_regs.h> 34#include <asm/irq_regs.h>
33 35
@@ -244,6 +246,49 @@ static void perf_unpin_context(struct perf_event_context *ctx)
244 put_ctx(ctx); 246 put_ctx(ctx);
245} 247}
246 248
249static inline u64 perf_clock(void)
250{
251 return cpu_clock(smp_processor_id());
252}
253
254/*
255 * Update the record of the current time in a context.
256 */
257static void update_context_time(struct perf_event_context *ctx)
258{
259 u64 now = perf_clock();
260
261 ctx->time += now - ctx->timestamp;
262 ctx->timestamp = now;
263}
264
265/*
266 * Update the total_time_enabled and total_time_running fields for a event.
267 */
268static void update_event_times(struct perf_event *event)
269{
270 struct perf_event_context *ctx = event->ctx;
271 u64 run_end;
272
273 if (event->state < PERF_EVENT_STATE_INACTIVE ||
274 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
275 return;
276
277 if (ctx->is_active)
278 run_end = ctx->time;
279 else
280 run_end = event->tstamp_stopped;
281
282 event->total_time_enabled = run_end - event->tstamp_enabled;
283
284 if (event->state == PERF_EVENT_STATE_INACTIVE)
285 run_end = event->tstamp_stopped;
286 else
287 run_end = ctx->time;
288
289 event->total_time_running = run_end - event->tstamp_running;
290}
291
247/* 292/*
248 * Add a event from the lists for its context. 293 * Add a event from the lists for its context.
249 * Must be called with ctx->mutex and ctx->lock held. 294 * Must be called with ctx->mutex and ctx->lock held.
@@ -292,6 +337,18 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
292 if (event->group_leader != event) 337 if (event->group_leader != event)
293 event->group_leader->nr_siblings--; 338 event->group_leader->nr_siblings--;
294 339
340 update_event_times(event);
341
342 /*
343 * If event was in error state, then keep it
344 * that way, otherwise bogus counts will be
345 * returned on read(). The only way to get out
346 * of error state is by explicit re-enabling
347 * of the event
348 */
349 if (event->state > PERF_EVENT_STATE_OFF)
350 event->state = PERF_EVENT_STATE_OFF;
351
295 /* 352 /*
296 * If this was a group event with sibling events then 353 * If this was a group event with sibling events then
297 * upgrade the siblings to singleton events by adding them 354 * upgrade the siblings to singleton events by adding them
@@ -445,50 +502,11 @@ retry:
445 * can remove the event safely, if the call above did not 502 * can remove the event safely, if the call above did not
446 * succeed. 503 * succeed.
447 */ 504 */
448 if (!list_empty(&event->group_entry)) { 505 if (!list_empty(&event->group_entry))
449 list_del_event(event, ctx); 506 list_del_event(event, ctx);
450 }
451 spin_unlock_irq(&ctx->lock); 507 spin_unlock_irq(&ctx->lock);
452} 508}
453 509
454static inline u64 perf_clock(void)
455{
456 return cpu_clock(smp_processor_id());
457}
458
459/*
460 * Update the record of the current time in a context.
461 */
462static void update_context_time(struct perf_event_context *ctx)
463{
464 u64 now = perf_clock();
465
466 ctx->time += now - ctx->timestamp;
467 ctx->timestamp = now;
468}
469
470/*
471 * Update the total_time_enabled and total_time_running fields for a event.
472 */
473static void update_event_times(struct perf_event *event)
474{
475 struct perf_event_context *ctx = event->ctx;
476 u64 run_end;
477
478 if (event->state < PERF_EVENT_STATE_INACTIVE ||
479 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
480 return;
481
482 event->total_time_enabled = ctx->time - event->tstamp_enabled;
483
484 if (event->state == PERF_EVENT_STATE_INACTIVE)
485 run_end = event->tstamp_stopped;
486 else
487 run_end = ctx->time;
488
489 event->total_time_running = run_end - event->tstamp_running;
490}
491
492/* 510/*
493 * Update total_time_enabled and total_time_running for all events in a group. 511 * Update total_time_enabled and total_time_running for all events in a group.
494 */ 512 */
@@ -1031,10 +1049,10 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
1031 update_context_time(ctx); 1049 update_context_time(ctx);
1032 1050
1033 perf_disable(); 1051 perf_disable();
1034 if (ctx->nr_active) 1052 if (ctx->nr_active) {
1035 list_for_each_entry(event, &ctx->group_list, group_entry) 1053 list_for_each_entry(event, &ctx->group_list, group_entry)
1036 group_sched_out(event, cpuctx, ctx); 1054 group_sched_out(event, cpuctx, ctx);
1037 1055 }
1038 perf_enable(); 1056 perf_enable();
1039 out: 1057 out:
1040 spin_unlock(&ctx->lock); 1058 spin_unlock(&ctx->lock);
@@ -1059,8 +1077,6 @@ static int context_equiv(struct perf_event_context *ctx1,
1059 && !ctx1->pin_count && !ctx2->pin_count; 1077 && !ctx1->pin_count && !ctx2->pin_count;
1060} 1078}
1061 1079
1062static void __perf_event_read(void *event);
1063
1064static void __perf_event_sync_stat(struct perf_event *event, 1080static void __perf_event_sync_stat(struct perf_event *event,
1065 struct perf_event *next_event) 1081 struct perf_event *next_event)
1066{ 1082{
@@ -1078,8 +1094,8 @@ static void __perf_event_sync_stat(struct perf_event *event,
1078 */ 1094 */
1079 switch (event->state) { 1095 switch (event->state) {
1080 case PERF_EVENT_STATE_ACTIVE: 1096 case PERF_EVENT_STATE_ACTIVE:
1081 __perf_event_read(event); 1097 event->pmu->read(event);
1082 break; 1098 /* fall-through */
1083 1099
1084 case PERF_EVENT_STATE_INACTIVE: 1100 case PERF_EVENT_STATE_INACTIVE:
1085 update_event_times(event); 1101 update_event_times(event);
@@ -1118,6 +1134,8 @@ static void perf_event_sync_stat(struct perf_event_context *ctx,
1118 if (!ctx->nr_stat) 1134 if (!ctx->nr_stat)
1119 return; 1135 return;
1120 1136
1137 update_context_time(ctx);
1138
1121 event = list_first_entry(&ctx->event_list, 1139 event = list_first_entry(&ctx->event_list,
1122 struct perf_event, event_entry); 1140 struct perf_event, event_entry);
1123 1141
@@ -1161,8 +1179,6 @@ void perf_event_task_sched_out(struct task_struct *task,
1161 if (likely(!ctx || !cpuctx->task_ctx)) 1179 if (likely(!ctx || !cpuctx->task_ctx))
1162 return; 1180 return;
1163 1181
1164 update_context_time(ctx);
1165
1166 rcu_read_lock(); 1182 rcu_read_lock();
1167 parent = rcu_dereference(ctx->parent_ctx); 1183 parent = rcu_dereference(ctx->parent_ctx);
1168 next_ctx = next->perf_event_ctxp; 1184 next_ctx = next->perf_event_ctxp;
@@ -1355,7 +1371,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1355 u64 interrupts, freq; 1371 u64 interrupts, freq;
1356 1372
1357 spin_lock(&ctx->lock); 1373 spin_lock(&ctx->lock);
1358 list_for_each_entry(event, &ctx->group_list, group_entry) { 1374 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
1359 if (event->state != PERF_EVENT_STATE_ACTIVE) 1375 if (event->state != PERF_EVENT_STATE_ACTIVE)
1360 continue; 1376 continue;
1361 1377
@@ -1515,7 +1531,6 @@ static void __perf_event_read(void *info)
1515 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 1531 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1516 struct perf_event *event = info; 1532 struct perf_event *event = info;
1517 struct perf_event_context *ctx = event->ctx; 1533 struct perf_event_context *ctx = event->ctx;
1518 unsigned long flags;
1519 1534
1520 /* 1535 /*
1521 * If this is a task context, we need to check whether it is 1536 * If this is a task context, we need to check whether it is
@@ -1527,12 +1542,12 @@ static void __perf_event_read(void *info)
1527 if (ctx->task && cpuctx->task_ctx != ctx) 1542 if (ctx->task && cpuctx->task_ctx != ctx)
1528 return; 1543 return;
1529 1544
1530 local_irq_save(flags); 1545 spin_lock(&ctx->lock);
1531 if (ctx->is_active) 1546 update_context_time(ctx);
1532 update_context_time(ctx);
1533 event->pmu->read(event);
1534 update_event_times(event); 1547 update_event_times(event);
1535 local_irq_restore(flags); 1548 spin_unlock(&ctx->lock);
1549
1550 event->pmu->read(event);
1536} 1551}
1537 1552
1538static u64 perf_event_read(struct perf_event *event) 1553static u64 perf_event_read(struct perf_event *event)
@@ -1545,7 +1560,13 @@ static u64 perf_event_read(struct perf_event *event)
1545 smp_call_function_single(event->oncpu, 1560 smp_call_function_single(event->oncpu,
1546 __perf_event_read, event, 1); 1561 __perf_event_read, event, 1);
1547 } else if (event->state == PERF_EVENT_STATE_INACTIVE) { 1562 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
1563 struct perf_event_context *ctx = event->ctx;
1564 unsigned long flags;
1565
1566 spin_lock_irqsave(&ctx->lock, flags);
1567 update_context_time(ctx);
1548 update_event_times(event); 1568 update_event_times(event);
1569 spin_unlock_irqrestore(&ctx->lock, flags);
1549 } 1570 }
1550 1571
1551 return atomic64_read(&event->count); 1572 return atomic64_read(&event->count);
@@ -1658,6 +1679,8 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
1658 return ERR_PTR(err); 1679 return ERR_PTR(err);
1659} 1680}
1660 1681
1682static void perf_event_free_filter(struct perf_event *event);
1683
1661static void free_event_rcu(struct rcu_head *head) 1684static void free_event_rcu(struct rcu_head *head)
1662{ 1685{
1663 struct perf_event *event; 1686 struct perf_event *event;
@@ -1665,6 +1688,7 @@ static void free_event_rcu(struct rcu_head *head)
1665 event = container_of(head, struct perf_event, rcu_head); 1688 event = container_of(head, struct perf_event, rcu_head);
1666 if (event->ns) 1689 if (event->ns)
1667 put_pid_ns(event->ns); 1690 put_pid_ns(event->ns);
1691 perf_event_free_filter(event);
1668 kfree(event); 1692 kfree(event);
1669} 1693}
1670 1694
@@ -1696,16 +1720,10 @@ static void free_event(struct perf_event *event)
1696 call_rcu(&event->rcu_head, free_event_rcu); 1720 call_rcu(&event->rcu_head, free_event_rcu);
1697} 1721}
1698 1722
1699/* 1723int perf_event_release_kernel(struct perf_event *event)
1700 * Called when the last reference to the file is gone.
1701 */
1702static int perf_release(struct inode *inode, struct file *file)
1703{ 1724{
1704 struct perf_event *event = file->private_data;
1705 struct perf_event_context *ctx = event->ctx; 1725 struct perf_event_context *ctx = event->ctx;
1706 1726
1707 file->private_data = NULL;
1708
1709 WARN_ON_ONCE(ctx->parent_ctx); 1727 WARN_ON_ONCE(ctx->parent_ctx);
1710 mutex_lock(&ctx->mutex); 1728 mutex_lock(&ctx->mutex);
1711 perf_event_remove_from_context(event); 1729 perf_event_remove_from_context(event);
@@ -1720,6 +1738,19 @@ static int perf_release(struct inode *inode, struct file *file)
1720 1738
1721 return 0; 1739 return 0;
1722} 1740}
1741EXPORT_SYMBOL_GPL(perf_event_release_kernel);
1742
1743/*
1744 * Called when the last reference to the file is gone.
1745 */
1746static int perf_release(struct inode *inode, struct file *file)
1747{
1748 struct perf_event *event = file->private_data;
1749
1750 file->private_data = NULL;
1751
1752 return perf_event_release_kernel(event);
1753}
1723 1754
1724static int perf_event_read_size(struct perf_event *event) 1755static int perf_event_read_size(struct perf_event *event)
1725{ 1756{
@@ -1746,91 +1777,94 @@ static int perf_event_read_size(struct perf_event *event)
1746 return size; 1777 return size;
1747} 1778}
1748 1779
1749static u64 perf_event_read_value(struct perf_event *event) 1780u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
1750{ 1781{
1751 struct perf_event *child; 1782 struct perf_event *child;
1752 u64 total = 0; 1783 u64 total = 0;
1753 1784
1785 *enabled = 0;
1786 *running = 0;
1787
1788 mutex_lock(&event->child_mutex);
1754 total += perf_event_read(event); 1789 total += perf_event_read(event);
1755 list_for_each_entry(child, &event->child_list, child_list) 1790 *enabled += event->total_time_enabled +
1791 atomic64_read(&event->child_total_time_enabled);
1792 *running += event->total_time_running +
1793 atomic64_read(&event->child_total_time_running);
1794
1795 list_for_each_entry(child, &event->child_list, child_list) {
1756 total += perf_event_read(child); 1796 total += perf_event_read(child);
1797 *enabled += child->total_time_enabled;
1798 *running += child->total_time_running;
1799 }
1800 mutex_unlock(&event->child_mutex);
1757 1801
1758 return total; 1802 return total;
1759} 1803}
1760 1804EXPORT_SYMBOL_GPL(perf_event_read_value);
1761static int perf_event_read_entry(struct perf_event *event,
1762 u64 read_format, char __user *buf)
1763{
1764 int n = 0, count = 0;
1765 u64 values[2];
1766
1767 values[n++] = perf_event_read_value(event);
1768 if (read_format & PERF_FORMAT_ID)
1769 values[n++] = primary_event_id(event);
1770
1771 count = n * sizeof(u64);
1772
1773 if (copy_to_user(buf, values, count))
1774 return -EFAULT;
1775
1776 return count;
1777}
1778 1805
1779static int perf_event_read_group(struct perf_event *event, 1806static int perf_event_read_group(struct perf_event *event,
1780 u64 read_format, char __user *buf) 1807 u64 read_format, char __user *buf)
1781{ 1808{
1782 struct perf_event *leader = event->group_leader, *sub; 1809 struct perf_event *leader = event->group_leader, *sub;
1783 int n = 0, size = 0, err = -EFAULT; 1810 int n = 0, size = 0, ret = -EFAULT;
1784 u64 values[3]; 1811 struct perf_event_context *ctx = leader->ctx;
1812 u64 values[5];
1813 u64 count, enabled, running;
1814
1815 mutex_lock(&ctx->mutex);
1816 count = perf_event_read_value(leader, &enabled, &running);
1785 1817
1786 values[n++] = 1 + leader->nr_siblings; 1818 values[n++] = 1 + leader->nr_siblings;
1787 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 1819 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1788 values[n++] = leader->total_time_enabled + 1820 values[n++] = enabled;
1789 atomic64_read(&leader->child_total_time_enabled); 1821 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1790 } 1822 values[n++] = running;
1791 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 1823 values[n++] = count;
1792 values[n++] = leader->total_time_running + 1824 if (read_format & PERF_FORMAT_ID)
1793 atomic64_read(&leader->child_total_time_running); 1825 values[n++] = primary_event_id(leader);
1794 }
1795 1826
1796 size = n * sizeof(u64); 1827 size = n * sizeof(u64);
1797 1828
1798 if (copy_to_user(buf, values, size)) 1829 if (copy_to_user(buf, values, size))
1799 return -EFAULT; 1830 goto unlock;
1800
1801 err = perf_event_read_entry(leader, read_format, buf + size);
1802 if (err < 0)
1803 return err;
1804 1831
1805 size += err; 1832 ret = size;
1806 1833
1807 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 1834 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
1808 err = perf_event_read_entry(sub, read_format, 1835 n = 0;
1809 buf + size); 1836
1810 if (err < 0) 1837 values[n++] = perf_event_read_value(sub, &enabled, &running);
1811 return err; 1838 if (read_format & PERF_FORMAT_ID)
1839 values[n++] = primary_event_id(sub);
1840
1841 size = n * sizeof(u64);
1842
1843 if (copy_to_user(buf + ret, values, size)) {
1844 ret = -EFAULT;
1845 goto unlock;
1846 }
1812 1847
1813 size += err; 1848 ret += size;
1814 } 1849 }
1850unlock:
1851 mutex_unlock(&ctx->mutex);
1815 1852
1816 return size; 1853 return ret;
1817} 1854}
1818 1855
1819static int perf_event_read_one(struct perf_event *event, 1856static int perf_event_read_one(struct perf_event *event,
1820 u64 read_format, char __user *buf) 1857 u64 read_format, char __user *buf)
1821{ 1858{
1859 u64 enabled, running;
1822 u64 values[4]; 1860 u64 values[4];
1823 int n = 0; 1861 int n = 0;
1824 1862
1825 values[n++] = perf_event_read_value(event); 1863 values[n++] = perf_event_read_value(event, &enabled, &running);
1826 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 1864 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1827 values[n++] = event->total_time_enabled + 1865 values[n++] = enabled;
1828 atomic64_read(&event->child_total_time_enabled); 1866 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1829 } 1867 values[n++] = running;
1830 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1831 values[n++] = event->total_time_running +
1832 atomic64_read(&event->child_total_time_running);
1833 }
1834 if (read_format & PERF_FORMAT_ID) 1868 if (read_format & PERF_FORMAT_ID)
1835 values[n++] = primary_event_id(event); 1869 values[n++] = primary_event_id(event);
1836 1870
@@ -1861,12 +1895,10 @@ perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
1861 return -ENOSPC; 1895 return -ENOSPC;
1862 1896
1863 WARN_ON_ONCE(event->ctx->parent_ctx); 1897 WARN_ON_ONCE(event->ctx->parent_ctx);
1864 mutex_lock(&event->child_mutex);
1865 if (read_format & PERF_FORMAT_GROUP) 1898 if (read_format & PERF_FORMAT_GROUP)
1866 ret = perf_event_read_group(event, read_format, buf); 1899 ret = perf_event_read_group(event, read_format, buf);
1867 else 1900 else
1868 ret = perf_event_read_one(event, read_format, buf); 1901 ret = perf_event_read_one(event, read_format, buf);
1869 mutex_unlock(&event->child_mutex);
1870 1902
1871 return ret; 1903 return ret;
1872} 1904}
@@ -1974,7 +2006,8 @@ unlock:
1974 return ret; 2006 return ret;
1975} 2007}
1976 2008
1977int perf_event_set_output(struct perf_event *event, int output_fd); 2009static int perf_event_set_output(struct perf_event *event, int output_fd);
2010static int perf_event_set_filter(struct perf_event *event, void __user *arg);
1978 2011
1979static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2012static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1980{ 2013{
@@ -2002,6 +2035,9 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2002 case PERF_EVENT_IOC_SET_OUTPUT: 2035 case PERF_EVENT_IOC_SET_OUTPUT:
2003 return perf_event_set_output(event, arg); 2036 return perf_event_set_output(event, arg);
2004 2037
2038 case PERF_EVENT_IOC_SET_FILTER:
2039 return perf_event_set_filter(event, (void __user *)arg);
2040
2005 default: 2041 default:
2006 return -ENOTTY; 2042 return -ENOTTY;
2007 } 2043 }
@@ -2174,6 +2210,7 @@ static void perf_mmap_data_free(struct perf_mmap_data *data)
2174 perf_mmap_free_page((unsigned long)data->user_page); 2210 perf_mmap_free_page((unsigned long)data->user_page);
2175 for (i = 0; i < data->nr_pages; i++) 2211 for (i = 0; i < data->nr_pages; i++)
2176 perf_mmap_free_page((unsigned long)data->data_pages[i]); 2212 perf_mmap_free_page((unsigned long)data->data_pages[i]);
2213 kfree(data);
2177} 2214}
2178 2215
2179#else 2216#else
@@ -2214,6 +2251,7 @@ static void perf_mmap_data_free_work(struct work_struct *work)
2214 perf_mmap_unmark_page(base + (i * PAGE_SIZE)); 2251 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
2215 2252
2216 vfree(base); 2253 vfree(base);
2254 kfree(data);
2217} 2255}
2218 2256
2219static void perf_mmap_data_free(struct perf_mmap_data *data) 2257static void perf_mmap_data_free(struct perf_mmap_data *data)
@@ -2307,7 +2345,7 @@ perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data)
2307 } 2345 }
2308 2346
2309 if (!data->watermark) 2347 if (!data->watermark)
2310 data->watermark = max_t(long, PAGE_SIZE, max_size / 2); 2348 data->watermark = max_size / 2;
2311 2349
2312 2350
2313 rcu_assign_pointer(event->data, data); 2351 rcu_assign_pointer(event->data, data);
@@ -2319,7 +2357,6 @@ static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head)
2319 2357
2320 data = container_of(rcu_head, struct perf_mmap_data, rcu_head); 2358 data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
2321 perf_mmap_data_free(data); 2359 perf_mmap_data_free(data);
2322 kfree(data);
2323} 2360}
2324 2361
2325static void perf_mmap_data_release(struct perf_event *event) 2362static void perf_mmap_data_release(struct perf_event *event)
@@ -2666,20 +2703,21 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
2666static void perf_output_lock(struct perf_output_handle *handle) 2703static void perf_output_lock(struct perf_output_handle *handle)
2667{ 2704{
2668 struct perf_mmap_data *data = handle->data; 2705 struct perf_mmap_data *data = handle->data;
2669 int cpu; 2706 int cur, cpu = get_cpu();
2670 2707
2671 handle->locked = 0; 2708 handle->locked = 0;
2672 2709
2673 local_irq_save(handle->flags); 2710 for (;;) {
2674 cpu = smp_processor_id(); 2711 cur = atomic_cmpxchg(&data->lock, -1, cpu);
2675 2712 if (cur == -1) {
2676 if (in_nmi() && atomic_read(&data->lock) == cpu) 2713 handle->locked = 1;
2677 return; 2714 break;
2715 }
2716 if (cur == cpu)
2717 break;
2678 2718
2679 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2680 cpu_relax(); 2719 cpu_relax();
2681 2720 }
2682 handle->locked = 1;
2683} 2721}
2684 2722
2685static void perf_output_unlock(struct perf_output_handle *handle) 2723static void perf_output_unlock(struct perf_output_handle *handle)
@@ -2725,7 +2763,7 @@ again:
2725 if (atomic_xchg(&data->wakeup, 0)) 2763 if (atomic_xchg(&data->wakeup, 0))
2726 perf_output_wakeup(handle); 2764 perf_output_wakeup(handle);
2727out: 2765out:
2728 local_irq_restore(handle->flags); 2766 put_cpu();
2729} 2767}
2730 2768
2731void perf_output_copy(struct perf_output_handle *handle, 2769void perf_output_copy(struct perf_output_handle *handle,
@@ -3236,15 +3274,10 @@ static void perf_event_task_ctx(struct perf_event_context *ctx,
3236{ 3274{
3237 struct perf_event *event; 3275 struct perf_event *event;
3238 3276
3239 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3240 return;
3241
3242 rcu_read_lock();
3243 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 3277 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3244 if (perf_event_task_match(event)) 3278 if (perf_event_task_match(event))
3245 perf_event_task_output(event, task_event); 3279 perf_event_task_output(event, task_event);
3246 } 3280 }
3247 rcu_read_unlock();
3248} 3281}
3249 3282
3250static void perf_event_task_event(struct perf_task_event *task_event) 3283static void perf_event_task_event(struct perf_task_event *task_event)
@@ -3252,11 +3285,11 @@ static void perf_event_task_event(struct perf_task_event *task_event)
3252 struct perf_cpu_context *cpuctx; 3285 struct perf_cpu_context *cpuctx;
3253 struct perf_event_context *ctx = task_event->task_ctx; 3286 struct perf_event_context *ctx = task_event->task_ctx;
3254 3287
3288 rcu_read_lock();
3255 cpuctx = &get_cpu_var(perf_cpu_context); 3289 cpuctx = &get_cpu_var(perf_cpu_context);
3256 perf_event_task_ctx(&cpuctx->ctx, task_event); 3290 perf_event_task_ctx(&cpuctx->ctx, task_event);
3257 put_cpu_var(perf_cpu_context); 3291 put_cpu_var(perf_cpu_context);
3258 3292
3259 rcu_read_lock();
3260 if (!ctx) 3293 if (!ctx)
3261 ctx = rcu_dereference(task_event->task->perf_event_ctxp); 3294 ctx = rcu_dereference(task_event->task->perf_event_ctxp);
3262 if (ctx) 3295 if (ctx)
@@ -3348,15 +3381,10 @@ static void perf_event_comm_ctx(struct perf_event_context *ctx,
3348{ 3381{
3349 struct perf_event *event; 3382 struct perf_event *event;
3350 3383
3351 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3352 return;
3353
3354 rcu_read_lock();
3355 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 3384 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3356 if (perf_event_comm_match(event)) 3385 if (perf_event_comm_match(event))
3357 perf_event_comm_output(event, comm_event); 3386 perf_event_comm_output(event, comm_event);
3358 } 3387 }
3359 rcu_read_unlock();
3360} 3388}
3361 3389
3362static void perf_event_comm_event(struct perf_comm_event *comm_event) 3390static void perf_event_comm_event(struct perf_comm_event *comm_event)
@@ -3367,7 +3395,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
3367 char comm[TASK_COMM_LEN]; 3395 char comm[TASK_COMM_LEN];
3368 3396
3369 memset(comm, 0, sizeof(comm)); 3397 memset(comm, 0, sizeof(comm));
3370 strncpy(comm, comm_event->task->comm, sizeof(comm)); 3398 strlcpy(comm, comm_event->task->comm, sizeof(comm));
3371 size = ALIGN(strlen(comm)+1, sizeof(u64)); 3399 size = ALIGN(strlen(comm)+1, sizeof(u64));
3372 3400
3373 comm_event->comm = comm; 3401 comm_event->comm = comm;
@@ -3375,11 +3403,11 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
3375 3403
3376 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; 3404 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
3377 3405
3406 rcu_read_lock();
3378 cpuctx = &get_cpu_var(perf_cpu_context); 3407 cpuctx = &get_cpu_var(perf_cpu_context);
3379 perf_event_comm_ctx(&cpuctx->ctx, comm_event); 3408 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
3380 put_cpu_var(perf_cpu_context); 3409 put_cpu_var(perf_cpu_context);
3381 3410
3382 rcu_read_lock();
3383 /* 3411 /*
3384 * doesn't really matter which of the child contexts the 3412 * doesn't really matter which of the child contexts the
3385 * events ends up in. 3413 * events ends up in.
@@ -3472,15 +3500,10 @@ static void perf_event_mmap_ctx(struct perf_event_context *ctx,
3472{ 3500{
3473 struct perf_event *event; 3501 struct perf_event *event;
3474 3502
3475 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3476 return;
3477
3478 rcu_read_lock();
3479 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 3503 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3480 if (perf_event_mmap_match(event, mmap_event)) 3504 if (perf_event_mmap_match(event, mmap_event))
3481 perf_event_mmap_output(event, mmap_event); 3505 perf_event_mmap_output(event, mmap_event);
3482 } 3506 }
3483 rcu_read_unlock();
3484} 3507}
3485 3508
3486static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) 3509static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
@@ -3536,11 +3559,11 @@ got_name:
3536 3559
3537 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; 3560 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
3538 3561
3562 rcu_read_lock();
3539 cpuctx = &get_cpu_var(perf_cpu_context); 3563 cpuctx = &get_cpu_var(perf_cpu_context);
3540 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event); 3564 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
3541 put_cpu_var(perf_cpu_context); 3565 put_cpu_var(perf_cpu_context);
3542 3566
3543 rcu_read_lock();
3544 /* 3567 /*
3545 * doesn't really matter which of the child contexts the 3568 * doesn't really matter which of the child contexts the
3546 * events ends up in. 3569 * events ends up in.
@@ -3679,7 +3702,11 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
3679 perf_event_disable(event); 3702 perf_event_disable(event);
3680 } 3703 }
3681 3704
3682 perf_event_output(event, nmi, data, regs); 3705 if (event->overflow_handler)
3706 event->overflow_handler(event, nmi, data, regs);
3707 else
3708 perf_event_output(event, nmi, data, regs);
3709
3683 return ret; 3710 return ret;
3684} 3711}
3685 3712
@@ -3724,16 +3751,16 @@ again:
3724 return nr; 3751 return nr;
3725} 3752}
3726 3753
3727static void perf_swevent_overflow(struct perf_event *event, 3754static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
3728 int nmi, struct perf_sample_data *data, 3755 int nmi, struct perf_sample_data *data,
3729 struct pt_regs *regs) 3756 struct pt_regs *regs)
3730{ 3757{
3731 struct hw_perf_event *hwc = &event->hw; 3758 struct hw_perf_event *hwc = &event->hw;
3732 int throttle = 0; 3759 int throttle = 0;
3733 u64 overflow;
3734 3760
3735 data->period = event->hw.last_period; 3761 data->period = event->hw.last_period;
3736 overflow = perf_swevent_set_period(event); 3762 if (!overflow)
3763 overflow = perf_swevent_set_period(event);
3737 3764
3738 if (hwc->interrupts == MAX_INTERRUPTS) 3765 if (hwc->interrupts == MAX_INTERRUPTS)
3739 return; 3766 return;
@@ -3766,14 +3793,19 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
3766 3793
3767 atomic64_add(nr, &event->count); 3794 atomic64_add(nr, &event->count);
3768 3795
3796 if (!regs)
3797 return;
3798
3769 if (!hwc->sample_period) 3799 if (!hwc->sample_period)
3770 return; 3800 return;
3771 3801
3772 if (!regs) 3802 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
3803 return perf_swevent_overflow(event, 1, nmi, data, regs);
3804
3805 if (atomic64_add_negative(nr, &hwc->period_left))
3773 return; 3806 return;
3774 3807
3775 if (!atomic64_add_negative(nr, &hwc->period_left)) 3808 perf_swevent_overflow(event, 0, nmi, data, regs);
3776 perf_swevent_overflow(event, nmi, data, regs);
3777} 3809}
3778 3810
3779static int perf_swevent_is_counting(struct perf_event *event) 3811static int perf_swevent_is_counting(struct perf_event *event)
@@ -3806,25 +3838,44 @@ static int perf_swevent_is_counting(struct perf_event *event)
3806 return 1; 3838 return 1;
3807} 3839}
3808 3840
3841static int perf_tp_event_match(struct perf_event *event,
3842 struct perf_sample_data *data);
3843
3844static int perf_exclude_event(struct perf_event *event,
3845 struct pt_regs *regs)
3846{
3847 if (regs) {
3848 if (event->attr.exclude_user && user_mode(regs))
3849 return 1;
3850
3851 if (event->attr.exclude_kernel && !user_mode(regs))
3852 return 1;
3853 }
3854
3855 return 0;
3856}
3857
3809static int perf_swevent_match(struct perf_event *event, 3858static int perf_swevent_match(struct perf_event *event,
3810 enum perf_type_id type, 3859 enum perf_type_id type,
3811 u32 event_id, struct pt_regs *regs) 3860 u32 event_id,
3861 struct perf_sample_data *data,
3862 struct pt_regs *regs)
3812{ 3863{
3813 if (!perf_swevent_is_counting(event)) 3864 if (!perf_swevent_is_counting(event))
3814 return 0; 3865 return 0;
3815 3866
3816 if (event->attr.type != type) 3867 if (event->attr.type != type)
3817 return 0; 3868 return 0;
3869
3818 if (event->attr.config != event_id) 3870 if (event->attr.config != event_id)
3819 return 0; 3871 return 0;
3820 3872
3821 if (regs) { 3873 if (perf_exclude_event(event, regs))
3822 if (event->attr.exclude_user && user_mode(regs)) 3874 return 0;
3823 return 0;
3824 3875
3825 if (event->attr.exclude_kernel && !user_mode(regs)) 3876 if (event->attr.type == PERF_TYPE_TRACEPOINT &&
3826 return 0; 3877 !perf_tp_event_match(event, data))
3827 } 3878 return 0;
3828 3879
3829 return 1; 3880 return 1;
3830} 3881}
@@ -3837,49 +3888,59 @@ static void perf_swevent_ctx_event(struct perf_event_context *ctx,
3837{ 3888{
3838 struct perf_event *event; 3889 struct perf_event *event;
3839 3890
3840 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3841 return;
3842
3843 rcu_read_lock();
3844 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 3891 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3845 if (perf_swevent_match(event, type, event_id, regs)) 3892 if (perf_swevent_match(event, type, event_id, data, regs))
3846 perf_swevent_add(event, nr, nmi, data, regs); 3893 perf_swevent_add(event, nr, nmi, data, regs);
3847 } 3894 }
3848 rcu_read_unlock();
3849} 3895}
3850 3896
3851static int *perf_swevent_recursion_context(struct perf_cpu_context *cpuctx) 3897int perf_swevent_get_recursion_context(void)
3852{ 3898{
3899 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
3900 int rctx;
3901
3853 if (in_nmi()) 3902 if (in_nmi())
3854 return &cpuctx->recursion[3]; 3903 rctx = 3;
3904 else if (in_irq())
3905 rctx = 2;
3906 else if (in_softirq())
3907 rctx = 1;
3908 else
3909 rctx = 0;
3855 3910
3856 if (in_irq()) 3911 if (cpuctx->recursion[rctx]) {
3857 return &cpuctx->recursion[2]; 3912 put_cpu_var(perf_cpu_context);
3913 return -1;
3914 }
3858 3915
3859 if (in_softirq()) 3916 cpuctx->recursion[rctx]++;
3860 return &cpuctx->recursion[1]; 3917 barrier();
3861 3918
3862 return &cpuctx->recursion[0]; 3919 return rctx;
3920}
3921EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
3922
3923void perf_swevent_put_recursion_context(int rctx)
3924{
3925 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
3926 barrier();
3927 cpuctx->recursion[rctx]--;
3928 put_cpu_var(perf_cpu_context);
3863} 3929}
3930EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context);
3864 3931
3865static void do_perf_sw_event(enum perf_type_id type, u32 event_id, 3932static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
3866 u64 nr, int nmi, 3933 u64 nr, int nmi,
3867 struct perf_sample_data *data, 3934 struct perf_sample_data *data,
3868 struct pt_regs *regs) 3935 struct pt_regs *regs)
3869{ 3936{
3870 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); 3937 struct perf_cpu_context *cpuctx;
3871 int *recursion = perf_swevent_recursion_context(cpuctx);
3872 struct perf_event_context *ctx; 3938 struct perf_event_context *ctx;
3873 3939
3874 if (*recursion) 3940 cpuctx = &__get_cpu_var(perf_cpu_context);
3875 goto out; 3941 rcu_read_lock();
3876
3877 (*recursion)++;
3878 barrier();
3879
3880 perf_swevent_ctx_event(&cpuctx->ctx, type, event_id, 3942 perf_swevent_ctx_event(&cpuctx->ctx, type, event_id,
3881 nr, nmi, data, regs); 3943 nr, nmi, data, regs);
3882 rcu_read_lock();
3883 /* 3944 /*
3884 * doesn't really matter which of the child contexts the 3945 * doesn't really matter which of the child contexts the
3885 * events ends up in. 3946 * events ends up in.
@@ -3888,23 +3949,24 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
3888 if (ctx) 3949 if (ctx)
3889 perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs); 3950 perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs);
3890 rcu_read_unlock(); 3951 rcu_read_unlock();
3891
3892 barrier();
3893 (*recursion)--;
3894
3895out:
3896 put_cpu_var(perf_cpu_context);
3897} 3952}
3898 3953
3899void __perf_sw_event(u32 event_id, u64 nr, int nmi, 3954void __perf_sw_event(u32 event_id, u64 nr, int nmi,
3900 struct pt_regs *regs, u64 addr) 3955 struct pt_regs *regs, u64 addr)
3901{ 3956{
3902 struct perf_sample_data data = { 3957 struct perf_sample_data data;
3903 .addr = addr, 3958 int rctx;
3904 };
3905 3959
3906 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, 3960 rctx = perf_swevent_get_recursion_context();
3907 &data, regs); 3961 if (rctx < 0)
3962 return;
3963
3964 data.addr = addr;
3965 data.raw = NULL;
3966
3967 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
3968
3969 perf_swevent_put_recursion_context(rctx);
3908} 3970}
3909 3971
3910static void perf_swevent_read(struct perf_event *event) 3972static void perf_swevent_read(struct perf_event *event)
@@ -3949,6 +4011,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
3949 event->pmu->read(event); 4011 event->pmu->read(event);
3950 4012
3951 data.addr = 0; 4013 data.addr = 0;
4014 data.period = event->hw.last_period;
3952 regs = get_irq_regs(); 4015 regs = get_irq_regs();
3953 /* 4016 /*
3954 * In case we exclude kernel IPs or are somehow not in interrupt 4017 * In case we exclude kernel IPs or are somehow not in interrupt
@@ -3959,8 +4022,9 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
3959 regs = task_pt_regs(current); 4022 regs = task_pt_regs(current);
3960 4023
3961 if (regs) { 4024 if (regs) {
3962 if (perf_event_overflow(event, 0, &data, regs)) 4025 if (!(event->attr.exclude_idle && current->pid == 0))
3963 ret = HRTIMER_NORESTART; 4026 if (perf_event_overflow(event, 0, &data, regs))
4027 ret = HRTIMER_NORESTART;
3964 } 4028 }
3965 4029
3966 period = max_t(u64, 10000, event->hw.sample_period); 4030 period = max_t(u64, 10000, event->hw.sample_period);
@@ -3969,6 +4033,42 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
3969 return ret; 4033 return ret;
3970} 4034}
3971 4035
4036static void perf_swevent_start_hrtimer(struct perf_event *event)
4037{
4038 struct hw_perf_event *hwc = &event->hw;
4039
4040 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4041 hwc->hrtimer.function = perf_swevent_hrtimer;
4042 if (hwc->sample_period) {
4043 u64 period;
4044
4045 if (hwc->remaining) {
4046 if (hwc->remaining < 0)
4047 period = 10000;
4048 else
4049 period = hwc->remaining;
4050 hwc->remaining = 0;
4051 } else {
4052 period = max_t(u64, 10000, hwc->sample_period);
4053 }
4054 __hrtimer_start_range_ns(&hwc->hrtimer,
4055 ns_to_ktime(period), 0,
4056 HRTIMER_MODE_REL, 0);
4057 }
4058}
4059
4060static void perf_swevent_cancel_hrtimer(struct perf_event *event)
4061{
4062 struct hw_perf_event *hwc = &event->hw;
4063
4064 if (hwc->sample_period) {
4065 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
4066 hwc->remaining = ktime_to_ns(remaining);
4067
4068 hrtimer_cancel(&hwc->hrtimer);
4069 }
4070}
4071
3972/* 4072/*
3973 * Software event: cpu wall time clock 4073 * Software event: cpu wall time clock
3974 */ 4074 */
@@ -3991,22 +4091,14 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
3991 int cpu = raw_smp_processor_id(); 4091 int cpu = raw_smp_processor_id();
3992 4092
3993 atomic64_set(&hwc->prev_count, cpu_clock(cpu)); 4093 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
3994 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 4094 perf_swevent_start_hrtimer(event);
3995 hwc->hrtimer.function = perf_swevent_hrtimer;
3996 if (hwc->sample_period) {
3997 u64 period = max_t(u64, 10000, hwc->sample_period);
3998 __hrtimer_start_range_ns(&hwc->hrtimer,
3999 ns_to_ktime(period), 0,
4000 HRTIMER_MODE_REL, 0);
4001 }
4002 4095
4003 return 0; 4096 return 0;
4004} 4097}
4005 4098
4006static void cpu_clock_perf_event_disable(struct perf_event *event) 4099static void cpu_clock_perf_event_disable(struct perf_event *event)
4007{ 4100{
4008 if (event->hw.sample_period) 4101 perf_swevent_cancel_hrtimer(event);
4009 hrtimer_cancel(&event->hw.hrtimer);
4010 cpu_clock_perf_event_update(event); 4102 cpu_clock_perf_event_update(event);
4011} 4103}
4012 4104
@@ -4043,22 +4135,15 @@ static int task_clock_perf_event_enable(struct perf_event *event)
4043 now = event->ctx->time; 4135 now = event->ctx->time;
4044 4136
4045 atomic64_set(&hwc->prev_count, now); 4137 atomic64_set(&hwc->prev_count, now);
4046 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 4138
4047 hwc->hrtimer.function = perf_swevent_hrtimer; 4139 perf_swevent_start_hrtimer(event);
4048 if (hwc->sample_period) {
4049 u64 period = max_t(u64, 10000, hwc->sample_period);
4050 __hrtimer_start_range_ns(&hwc->hrtimer,
4051 ns_to_ktime(period), 0,
4052 HRTIMER_MODE_REL, 0);
4053 }
4054 4140
4055 return 0; 4141 return 0;
4056} 4142}
4057 4143
4058static void task_clock_perf_event_disable(struct perf_event *event) 4144static void task_clock_perf_event_disable(struct perf_event *event)
4059{ 4145{
4060 if (event->hw.sample_period) 4146 perf_swevent_cancel_hrtimer(event);
4061 hrtimer_cancel(&event->hw.hrtimer);
4062 task_clock_perf_event_update(event, event->ctx->time); 4147 task_clock_perf_event_update(event, event->ctx->time);
4063 4148
4064} 4149}
@@ -4086,6 +4171,7 @@ static const struct pmu perf_ops_task_clock = {
4086}; 4171};
4087 4172
4088#ifdef CONFIG_EVENT_PROFILE 4173#ifdef CONFIG_EVENT_PROFILE
4174
4089void perf_tp_event(int event_id, u64 addr, u64 count, void *record, 4175void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
4090 int entry_size) 4176 int entry_size)
4091{ 4177{
@@ -4104,13 +4190,21 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
4104 if (!regs) 4190 if (!regs)
4105 regs = task_pt_regs(current); 4191 regs = task_pt_regs(current);
4106 4192
4193 /* Trace events already protected against recursion */
4107 do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, 4194 do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
4108 &data, regs); 4195 &data, regs);
4109} 4196}
4110EXPORT_SYMBOL_GPL(perf_tp_event); 4197EXPORT_SYMBOL_GPL(perf_tp_event);
4111 4198
4112extern int ftrace_profile_enable(int); 4199static int perf_tp_event_match(struct perf_event *event,
4113extern void ftrace_profile_disable(int); 4200 struct perf_sample_data *data)
4201{
4202 void *record = data->raw->data;
4203
4204 if (likely(!event->filter) || filter_match_preds(event->filter, record))
4205 return 1;
4206 return 0;
4207}
4114 4208
4115static void tp_perf_event_destroy(struct perf_event *event) 4209static void tp_perf_event_destroy(struct perf_event *event)
4116{ 4210{
@@ -4135,11 +4229,99 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event)
4135 4229
4136 return &perf_ops_generic; 4230 return &perf_ops_generic;
4137} 4231}
4232
4233static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4234{
4235 char *filter_str;
4236 int ret;
4237
4238 if (event->attr.type != PERF_TYPE_TRACEPOINT)
4239 return -EINVAL;
4240
4241 filter_str = strndup_user(arg, PAGE_SIZE);
4242 if (IS_ERR(filter_str))
4243 return PTR_ERR(filter_str);
4244
4245 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
4246
4247 kfree(filter_str);
4248 return ret;
4249}
4250
4251static void perf_event_free_filter(struct perf_event *event)
4252{
4253 ftrace_profile_free_filter(event);
4254}
4255
4138#else 4256#else
4257
4258static int perf_tp_event_match(struct perf_event *event,
4259 struct perf_sample_data *data)
4260{
4261 return 1;
4262}
4263
4139static const struct pmu *tp_perf_event_init(struct perf_event *event) 4264static const struct pmu *tp_perf_event_init(struct perf_event *event)
4140{ 4265{
4141 return NULL; 4266 return NULL;
4142} 4267}
4268
4269static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4270{
4271 return -ENOENT;
4272}
4273
4274static void perf_event_free_filter(struct perf_event *event)
4275{
4276}
4277
4278#endif /* CONFIG_EVENT_PROFILE */
4279
4280#ifdef CONFIG_HAVE_HW_BREAKPOINT
4281static void bp_perf_event_destroy(struct perf_event *event)
4282{
4283 release_bp_slot(event);
4284}
4285
4286static const struct pmu *bp_perf_event_init(struct perf_event *bp)
4287{
4288 int err;
4289 /*
4290 * The breakpoint is already filled if we haven't created the counter
4291 * through perf syscall
4292 * FIXME: manage to get trigerred to NULL if it comes from syscalls
4293 */
4294 if (!bp->callback)
4295 err = register_perf_hw_breakpoint(bp);
4296 else
4297 err = __register_perf_hw_breakpoint(bp);
4298 if (err)
4299 return ERR_PTR(err);
4300
4301 bp->destroy = bp_perf_event_destroy;
4302
4303 return &perf_ops_bp;
4304}
4305
4306void perf_bp_event(struct perf_event *bp, void *data)
4307{
4308 struct perf_sample_data sample;
4309 struct pt_regs *regs = data;
4310
4311 sample.addr = bp->attr.bp_addr;
4312
4313 if (!perf_exclude_event(bp, regs))
4314 perf_swevent_add(bp, 1, 1, &sample, regs);
4315}
4316#else
4317static const struct pmu *bp_perf_event_init(struct perf_event *bp)
4318{
4319 return NULL;
4320}
4321
4322void perf_bp_event(struct perf_event *bp, void *regs)
4323{
4324}
4143#endif 4325#endif
4144 4326
4145atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; 4327atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
@@ -4186,6 +4368,8 @@ static const struct pmu *sw_perf_event_init(struct perf_event *event)
4186 case PERF_COUNT_SW_PAGE_FAULTS_MAJ: 4368 case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
4187 case PERF_COUNT_SW_CONTEXT_SWITCHES: 4369 case PERF_COUNT_SW_CONTEXT_SWITCHES:
4188 case PERF_COUNT_SW_CPU_MIGRATIONS: 4370 case PERF_COUNT_SW_CPU_MIGRATIONS:
4371 case PERF_COUNT_SW_ALIGNMENT_FAULTS:
4372 case PERF_COUNT_SW_EMULATION_FAULTS:
4189 if (!event->parent) { 4373 if (!event->parent) {
4190 atomic_inc(&perf_swevent_enabled[event_id]); 4374 atomic_inc(&perf_swevent_enabled[event_id]);
4191 event->destroy = sw_perf_event_destroy; 4375 event->destroy = sw_perf_event_destroy;
@@ -4206,6 +4390,7 @@ perf_event_alloc(struct perf_event_attr *attr,
4206 struct perf_event_context *ctx, 4390 struct perf_event_context *ctx,
4207 struct perf_event *group_leader, 4391 struct perf_event *group_leader,
4208 struct perf_event *parent_event, 4392 struct perf_event *parent_event,
4393 perf_callback_t callback,
4209 gfp_t gfpflags) 4394 gfp_t gfpflags)
4210{ 4395{
4211 const struct pmu *pmu; 4396 const struct pmu *pmu;
@@ -4248,6 +4433,11 @@ perf_event_alloc(struct perf_event_attr *attr,
4248 4433
4249 event->state = PERF_EVENT_STATE_INACTIVE; 4434 event->state = PERF_EVENT_STATE_INACTIVE;
4250 4435
4436 if (!callback && parent_event)
4437 callback = parent_event->callback;
4438
4439 event->callback = callback;
4440
4251 if (attr->disabled) 4441 if (attr->disabled)
4252 event->state = PERF_EVENT_STATE_OFF; 4442 event->state = PERF_EVENT_STATE_OFF;
4253 4443
@@ -4282,6 +4472,11 @@ perf_event_alloc(struct perf_event_attr *attr,
4282 pmu = tp_perf_event_init(event); 4472 pmu = tp_perf_event_init(event);
4283 break; 4473 break;
4284 4474
4475 case PERF_TYPE_BREAKPOINT:
4476 pmu = bp_perf_event_init(event);
4477 break;
4478
4479
4285 default: 4480 default:
4286 break; 4481 break;
4287 } 4482 }
@@ -4394,7 +4589,7 @@ err_size:
4394 goto out; 4589 goto out;
4395} 4590}
4396 4591
4397int perf_event_set_output(struct perf_event *event, int output_fd) 4592static int perf_event_set_output(struct perf_event *event, int output_fd)
4398{ 4593{
4399 struct perf_event *output_event = NULL; 4594 struct perf_event *output_event = NULL;
4400 struct file *output_file = NULL; 4595 struct file *output_file = NULL;
@@ -4524,7 +4719,7 @@ SYSCALL_DEFINE5(perf_event_open,
4524 } 4719 }
4525 4720
4526 event = perf_event_alloc(&attr, cpu, ctx, group_leader, 4721 event = perf_event_alloc(&attr, cpu, ctx, group_leader,
4527 NULL, GFP_KERNEL); 4722 NULL, NULL, GFP_KERNEL);
4528 err = PTR_ERR(event); 4723 err = PTR_ERR(event);
4529 if (IS_ERR(event)) 4724 if (IS_ERR(event))
4530 goto err_put_context; 4725 goto err_put_context;
@@ -4572,6 +4767,60 @@ err_put_context:
4572 return err; 4767 return err;
4573} 4768}
4574 4769
4770/**
4771 * perf_event_create_kernel_counter
4772 *
4773 * @attr: attributes of the counter to create
4774 * @cpu: cpu in which the counter is bound
4775 * @pid: task to profile
4776 */
4777struct perf_event *
4778perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
4779 pid_t pid, perf_callback_t callback)
4780{
4781 struct perf_event *event;
4782 struct perf_event_context *ctx;
4783 int err;
4784
4785 /*
4786 * Get the target context (task or percpu):
4787 */
4788
4789 ctx = find_get_context(pid, cpu);
4790 if (IS_ERR(ctx)) {
4791 err = PTR_ERR(ctx);
4792 goto err_exit;
4793 }
4794
4795 event = perf_event_alloc(attr, cpu, ctx, NULL,
4796 NULL, callback, GFP_KERNEL);
4797 if (IS_ERR(event)) {
4798 err = PTR_ERR(event);
4799 goto err_put_context;
4800 }
4801
4802 event->filp = NULL;
4803 WARN_ON_ONCE(ctx->parent_ctx);
4804 mutex_lock(&ctx->mutex);
4805 perf_install_in_context(ctx, event, cpu);
4806 ++ctx->generation;
4807 mutex_unlock(&ctx->mutex);
4808
4809 event->owner = current;
4810 get_task_struct(current);
4811 mutex_lock(&current->perf_event_mutex);
4812 list_add_tail(&event->owner_entry, &current->perf_event_list);
4813 mutex_unlock(&current->perf_event_mutex);
4814
4815 return event;
4816
4817 err_put_context:
4818 put_ctx(ctx);
4819 err_exit:
4820 return ERR_PTR(err);
4821}
4822EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
4823
4575/* 4824/*
4576 * inherit a event from parent task to child task: 4825 * inherit a event from parent task to child task:
4577 */ 4826 */
@@ -4597,7 +4846,7 @@ inherit_event(struct perf_event *parent_event,
4597 child_event = perf_event_alloc(&parent_event->attr, 4846 child_event = perf_event_alloc(&parent_event->attr,
4598 parent_event->cpu, child_ctx, 4847 parent_event->cpu, child_ctx,
4599 group_leader, parent_event, 4848 group_leader, parent_event,
4600 GFP_KERNEL); 4849 NULL, GFP_KERNEL);
4601 if (IS_ERR(child_event)) 4850 if (IS_ERR(child_event))
4602 return child_event; 4851 return child_event;
4603 get_ctx(child_ctx); 4852 get_ctx(child_ctx);
@@ -4615,6 +4864,8 @@ inherit_event(struct perf_event *parent_event,
4615 if (parent_event->attr.freq) 4864 if (parent_event->attr.freq)
4616 child_event->hw.sample_period = parent_event->hw.sample_period; 4865 child_event->hw.sample_period = parent_event->hw.sample_period;
4617 4866
4867 child_event->overflow_handler = parent_event->overflow_handler;
4868
4618 /* 4869 /*
4619 * Link it up in the child's context: 4870 * Link it up in the child's context:
4620 */ 4871 */
@@ -4704,7 +4955,6 @@ __perf_event_exit_task(struct perf_event *child_event,
4704{ 4955{
4705 struct perf_event *parent_event; 4956 struct perf_event *parent_event;
4706 4957
4707 update_event_times(child_event);
4708 perf_event_remove_from_context(child_event); 4958 perf_event_remove_from_context(child_event);
4709 4959
4710 parent_event = child_event->parent; 4960 parent_event = child_event->parent;
@@ -4756,6 +5006,7 @@ void perf_event_exit_task(struct task_struct *child)
4756 * the events from it. 5006 * the events from it.
4757 */ 5007 */
4758 unclone_ctx(child_ctx); 5008 unclone_ctx(child_ctx);
5009 update_context_time(child_ctx);
4759 spin_unlock_irqrestore(&child_ctx->lock, flags); 5010 spin_unlock_irqrestore(&child_ctx->lock, flags);
4760 5011
4761 /* 5012 /*