aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c528
1 files changed, 344 insertions, 184 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 3256e36ad251..6b7ddba1dd64 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -29,6 +29,7 @@
29#include <linux/kernel_stat.h> 29#include <linux/kernel_stat.h>
30#include <linux/perf_event.h> 30#include <linux/perf_event.h>
31#include <linux/ftrace_event.h> 31#include <linux/ftrace_event.h>
32#include <linux/hw_breakpoint.h>
32 33
33#include <asm/irq_regs.h> 34#include <asm/irq_regs.h>
34 35
@@ -245,6 +246,49 @@ static void perf_unpin_context(struct perf_event_context *ctx)
245 put_ctx(ctx); 246 put_ctx(ctx);
246} 247}
247 248
249static inline u64 perf_clock(void)
250{
251 return cpu_clock(smp_processor_id());
252}
253
254/*
255 * Update the record of the current time in a context.
256 */
257static void update_context_time(struct perf_event_context *ctx)
258{
259 u64 now = perf_clock();
260
261 ctx->time += now - ctx->timestamp;
262 ctx->timestamp = now;
263}
264
265/*
266 * Update the total_time_enabled and total_time_running fields for a event.
267 */
268static void update_event_times(struct perf_event *event)
269{
270 struct perf_event_context *ctx = event->ctx;
271 u64 run_end;
272
273 if (event->state < PERF_EVENT_STATE_INACTIVE ||
274 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
275 return;
276
277 if (ctx->is_active)
278 run_end = ctx->time;
279 else
280 run_end = event->tstamp_stopped;
281
282 event->total_time_enabled = run_end - event->tstamp_enabled;
283
284 if (event->state == PERF_EVENT_STATE_INACTIVE)
285 run_end = event->tstamp_stopped;
286 else
287 run_end = ctx->time;
288
289 event->total_time_running = run_end - event->tstamp_running;
290}
291
248/* 292/*
249 * Add a event from the lists for its context. 293 * Add a event from the lists for its context.
250 * Must be called with ctx->mutex and ctx->lock held. 294 * Must be called with ctx->mutex and ctx->lock held.
@@ -293,6 +337,18 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
293 if (event->group_leader != event) 337 if (event->group_leader != event)
294 event->group_leader->nr_siblings--; 338 event->group_leader->nr_siblings--;
295 339
340 update_event_times(event);
341
342 /*
343 * If event was in error state, then keep it
344 * that way, otherwise bogus counts will be
345 * returned on read(). The only way to get out
346 * of error state is by explicit re-enabling
347 * of the event
348 */
349 if (event->state > PERF_EVENT_STATE_OFF)
350 event->state = PERF_EVENT_STATE_OFF;
351
296 /* 352 /*
297 * If this was a group event with sibling events then 353 * If this was a group event with sibling events then
298 * upgrade the siblings to singleton events by adding them 354 * upgrade the siblings to singleton events by adding them
@@ -446,50 +502,11 @@ retry:
446 * can remove the event safely, if the call above did not 502 * can remove the event safely, if the call above did not
447 * succeed. 503 * succeed.
448 */ 504 */
449 if (!list_empty(&event->group_entry)) { 505 if (!list_empty(&event->group_entry))
450 list_del_event(event, ctx); 506 list_del_event(event, ctx);
451 }
452 spin_unlock_irq(&ctx->lock); 507 spin_unlock_irq(&ctx->lock);
453} 508}
454 509
455static inline u64 perf_clock(void)
456{
457 return cpu_clock(smp_processor_id());
458}
459
460/*
461 * Update the record of the current time in a context.
462 */
463static void update_context_time(struct perf_event_context *ctx)
464{
465 u64 now = perf_clock();
466
467 ctx->time += now - ctx->timestamp;
468 ctx->timestamp = now;
469}
470
471/*
472 * Update the total_time_enabled and total_time_running fields for a event.
473 */
474static void update_event_times(struct perf_event *event)
475{
476 struct perf_event_context *ctx = event->ctx;
477 u64 run_end;
478
479 if (event->state < PERF_EVENT_STATE_INACTIVE ||
480 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
481 return;
482
483 event->total_time_enabled = ctx->time - event->tstamp_enabled;
484
485 if (event->state == PERF_EVENT_STATE_INACTIVE)
486 run_end = event->tstamp_stopped;
487 else
488 run_end = ctx->time;
489
490 event->total_time_running = run_end - event->tstamp_running;
491}
492
493/* 510/*
494 * Update total_time_enabled and total_time_running for all events in a group. 511 * Update total_time_enabled and total_time_running for all events in a group.
495 */ 512 */
@@ -1032,10 +1049,10 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
1032 update_context_time(ctx); 1049 update_context_time(ctx);
1033 1050
1034 perf_disable(); 1051 perf_disable();
1035 if (ctx->nr_active) 1052 if (ctx->nr_active) {
1036 list_for_each_entry(event, &ctx->group_list, group_entry) 1053 list_for_each_entry(event, &ctx->group_list, group_entry)
1037 group_sched_out(event, cpuctx, ctx); 1054 group_sched_out(event, cpuctx, ctx);
1038 1055 }
1039 perf_enable(); 1056 perf_enable();
1040 out: 1057 out:
1041 spin_unlock(&ctx->lock); 1058 spin_unlock(&ctx->lock);
@@ -1060,8 +1077,6 @@ static int context_equiv(struct perf_event_context *ctx1,
1060 && !ctx1->pin_count && !ctx2->pin_count; 1077 && !ctx1->pin_count && !ctx2->pin_count;
1061} 1078}
1062 1079
1063static void __perf_event_read(void *event);
1064
1065static void __perf_event_sync_stat(struct perf_event *event, 1080static void __perf_event_sync_stat(struct perf_event *event,
1066 struct perf_event *next_event) 1081 struct perf_event *next_event)
1067{ 1082{
@@ -1079,8 +1094,8 @@ static void __perf_event_sync_stat(struct perf_event *event,
1079 */ 1094 */
1080 switch (event->state) { 1095 switch (event->state) {
1081 case PERF_EVENT_STATE_ACTIVE: 1096 case PERF_EVENT_STATE_ACTIVE:
1082 __perf_event_read(event); 1097 event->pmu->read(event);
1083 break; 1098 /* fall-through */
1084 1099
1085 case PERF_EVENT_STATE_INACTIVE: 1100 case PERF_EVENT_STATE_INACTIVE:
1086 update_event_times(event); 1101 update_event_times(event);
@@ -1119,6 +1134,8 @@ static void perf_event_sync_stat(struct perf_event_context *ctx,
1119 if (!ctx->nr_stat) 1134 if (!ctx->nr_stat)
1120 return; 1135 return;
1121 1136
1137 update_context_time(ctx);
1138
1122 event = list_first_entry(&ctx->event_list, 1139 event = list_first_entry(&ctx->event_list,
1123 struct perf_event, event_entry); 1140 struct perf_event, event_entry);
1124 1141
@@ -1162,8 +1179,6 @@ void perf_event_task_sched_out(struct task_struct *task,
1162 if (likely(!ctx || !cpuctx->task_ctx)) 1179 if (likely(!ctx || !cpuctx->task_ctx))
1163 return; 1180 return;
1164 1181
1165 update_context_time(ctx);
1166
1167 rcu_read_lock(); 1182 rcu_read_lock();
1168 parent = rcu_dereference(ctx->parent_ctx); 1183 parent = rcu_dereference(ctx->parent_ctx);
1169 next_ctx = next->perf_event_ctxp; 1184 next_ctx = next->perf_event_ctxp;
@@ -1516,7 +1531,6 @@ static void __perf_event_read(void *info)
1516 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 1531 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1517 struct perf_event *event = info; 1532 struct perf_event *event = info;
1518 struct perf_event_context *ctx = event->ctx; 1533 struct perf_event_context *ctx = event->ctx;
1519 unsigned long flags;
1520 1534
1521 /* 1535 /*
1522 * If this is a task context, we need to check whether it is 1536 * If this is a task context, we need to check whether it is
@@ -1528,12 +1542,12 @@ static void __perf_event_read(void *info)
1528 if (ctx->task && cpuctx->task_ctx != ctx) 1542 if (ctx->task && cpuctx->task_ctx != ctx)
1529 return; 1543 return;
1530 1544
1531 local_irq_save(flags); 1545 spin_lock(&ctx->lock);
1532 if (ctx->is_active) 1546 update_context_time(ctx);
1533 update_context_time(ctx);
1534 event->pmu->read(event);
1535 update_event_times(event); 1547 update_event_times(event);
1536 local_irq_restore(flags); 1548 spin_unlock(&ctx->lock);
1549
1550 event->pmu->read(event);
1537} 1551}
1538 1552
1539static u64 perf_event_read(struct perf_event *event) 1553static u64 perf_event_read(struct perf_event *event)
@@ -1546,7 +1560,13 @@ static u64 perf_event_read(struct perf_event *event)
1546 smp_call_function_single(event->oncpu, 1560 smp_call_function_single(event->oncpu,
1547 __perf_event_read, event, 1); 1561 __perf_event_read, event, 1);
1548 } else if (event->state == PERF_EVENT_STATE_INACTIVE) { 1562 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
1563 struct perf_event_context *ctx = event->ctx;
1564 unsigned long flags;
1565
1566 spin_lock_irqsave(&ctx->lock, flags);
1567 update_context_time(ctx);
1549 update_event_times(event); 1568 update_event_times(event);
1569 spin_unlock_irqrestore(&ctx->lock, flags);
1550 } 1570 }
1551 1571
1552 return atomic64_read(&event->count); 1572 return atomic64_read(&event->count);
@@ -1700,16 +1720,10 @@ static void free_event(struct perf_event *event)
1700 call_rcu(&event->rcu_head, free_event_rcu); 1720 call_rcu(&event->rcu_head, free_event_rcu);
1701} 1721}
1702 1722
1703/* 1723int perf_event_release_kernel(struct perf_event *event)
1704 * Called when the last reference to the file is gone.
1705 */
1706static int perf_release(struct inode *inode, struct file *file)
1707{ 1724{
1708 struct perf_event *event = file->private_data;
1709 struct perf_event_context *ctx = event->ctx; 1725 struct perf_event_context *ctx = event->ctx;
1710 1726
1711 file->private_data = NULL;
1712
1713 WARN_ON_ONCE(ctx->parent_ctx); 1727 WARN_ON_ONCE(ctx->parent_ctx);
1714 mutex_lock(&ctx->mutex); 1728 mutex_lock(&ctx->mutex);
1715 perf_event_remove_from_context(event); 1729 perf_event_remove_from_context(event);
@@ -1724,6 +1738,19 @@ static int perf_release(struct inode *inode, struct file *file)
1724 1738
1725 return 0; 1739 return 0;
1726} 1740}
1741EXPORT_SYMBOL_GPL(perf_event_release_kernel);
1742
1743/*
1744 * Called when the last reference to the file is gone.
1745 */
1746static int perf_release(struct inode *inode, struct file *file)
1747{
1748 struct perf_event *event = file->private_data;
1749
1750 file->private_data = NULL;
1751
1752 return perf_event_release_kernel(event);
1753}
1727 1754
1728static int perf_event_read_size(struct perf_event *event) 1755static int perf_event_read_size(struct perf_event *event)
1729{ 1756{
@@ -1750,91 +1777,94 @@ static int perf_event_read_size(struct perf_event *event)
1750 return size; 1777 return size;
1751} 1778}
1752 1779
1753static u64 perf_event_read_value(struct perf_event *event) 1780u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
1754{ 1781{
1755 struct perf_event *child; 1782 struct perf_event *child;
1756 u64 total = 0; 1783 u64 total = 0;
1757 1784
1785 *enabled = 0;
1786 *running = 0;
1787
1788 mutex_lock(&event->child_mutex);
1758 total += perf_event_read(event); 1789 total += perf_event_read(event);
1759 list_for_each_entry(child, &event->child_list, child_list) 1790 *enabled += event->total_time_enabled +
1791 atomic64_read(&event->child_total_time_enabled);
1792 *running += event->total_time_running +
1793 atomic64_read(&event->child_total_time_running);
1794
1795 list_for_each_entry(child, &event->child_list, child_list) {
1760 total += perf_event_read(child); 1796 total += perf_event_read(child);
1797 *enabled += child->total_time_enabled;
1798 *running += child->total_time_running;
1799 }
1800 mutex_unlock(&event->child_mutex);
1761 1801
1762 return total; 1802 return total;
1763} 1803}
1764 1804EXPORT_SYMBOL_GPL(perf_event_read_value);
1765static int perf_event_read_entry(struct perf_event *event,
1766 u64 read_format, char __user *buf)
1767{
1768 int n = 0, count = 0;
1769 u64 values[2];
1770
1771 values[n++] = perf_event_read_value(event);
1772 if (read_format & PERF_FORMAT_ID)
1773 values[n++] = primary_event_id(event);
1774
1775 count = n * sizeof(u64);
1776
1777 if (copy_to_user(buf, values, count))
1778 return -EFAULT;
1779
1780 return count;
1781}
1782 1805
1783static int perf_event_read_group(struct perf_event *event, 1806static int perf_event_read_group(struct perf_event *event,
1784 u64 read_format, char __user *buf) 1807 u64 read_format, char __user *buf)
1785{ 1808{
1786 struct perf_event *leader = event->group_leader, *sub; 1809 struct perf_event *leader = event->group_leader, *sub;
1787 int n = 0, size = 0, err = -EFAULT; 1810 int n = 0, size = 0, ret = -EFAULT;
1788 u64 values[3]; 1811 struct perf_event_context *ctx = leader->ctx;
1812 u64 values[5];
1813 u64 count, enabled, running;
1814
1815 mutex_lock(&ctx->mutex);
1816 count = perf_event_read_value(leader, &enabled, &running);
1789 1817
1790 values[n++] = 1 + leader->nr_siblings; 1818 values[n++] = 1 + leader->nr_siblings;
1791 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 1819 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1792 values[n++] = leader->total_time_enabled + 1820 values[n++] = enabled;
1793 atomic64_read(&leader->child_total_time_enabled); 1821 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1794 } 1822 values[n++] = running;
1795 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 1823 values[n++] = count;
1796 values[n++] = leader->total_time_running + 1824 if (read_format & PERF_FORMAT_ID)
1797 atomic64_read(&leader->child_total_time_running); 1825 values[n++] = primary_event_id(leader);
1798 }
1799 1826
1800 size = n * sizeof(u64); 1827 size = n * sizeof(u64);
1801 1828
1802 if (copy_to_user(buf, values, size)) 1829 if (copy_to_user(buf, values, size))
1803 return -EFAULT; 1830 goto unlock;
1804
1805 err = perf_event_read_entry(leader, read_format, buf + size);
1806 if (err < 0)
1807 return err;
1808 1831
1809 size += err; 1832 ret = size;
1810 1833
1811 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 1834 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
1812 err = perf_event_read_entry(sub, read_format, 1835 n = 0;
1813 buf + size); 1836
1814 if (err < 0) 1837 values[n++] = perf_event_read_value(sub, &enabled, &running);
1815 return err; 1838 if (read_format & PERF_FORMAT_ID)
1839 values[n++] = primary_event_id(sub);
1840
1841 size = n * sizeof(u64);
1842
1843 if (copy_to_user(buf + ret, values, size)) {
1844 ret = -EFAULT;
1845 goto unlock;
1846 }
1816 1847
1817 size += err; 1848 ret += size;
1818 } 1849 }
1850unlock:
1851 mutex_unlock(&ctx->mutex);
1819 1852
1820 return size; 1853 return ret;
1821} 1854}
1822 1855
1823static int perf_event_read_one(struct perf_event *event, 1856static int perf_event_read_one(struct perf_event *event,
1824 u64 read_format, char __user *buf) 1857 u64 read_format, char __user *buf)
1825{ 1858{
1859 u64 enabled, running;
1826 u64 values[4]; 1860 u64 values[4];
1827 int n = 0; 1861 int n = 0;
1828 1862
1829 values[n++] = perf_event_read_value(event); 1863 values[n++] = perf_event_read_value(event, &enabled, &running);
1830 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 1864 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1831 values[n++] = event->total_time_enabled + 1865 values[n++] = enabled;
1832 atomic64_read(&event->child_total_time_enabled); 1866 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1833 } 1867 values[n++] = running;
1834 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1835 values[n++] = event->total_time_running +
1836 atomic64_read(&event->child_total_time_running);
1837 }
1838 if (read_format & PERF_FORMAT_ID) 1868 if (read_format & PERF_FORMAT_ID)
1839 values[n++] = primary_event_id(event); 1869 values[n++] = primary_event_id(event);
1840 1870
@@ -1865,12 +1895,10 @@ perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
1865 return -ENOSPC; 1895 return -ENOSPC;
1866 1896
1867 WARN_ON_ONCE(event->ctx->parent_ctx); 1897 WARN_ON_ONCE(event->ctx->parent_ctx);
1868 mutex_lock(&event->child_mutex);
1869 if (read_format & PERF_FORMAT_GROUP) 1898 if (read_format & PERF_FORMAT_GROUP)
1870 ret = perf_event_read_group(event, read_format, buf); 1899 ret = perf_event_read_group(event, read_format, buf);
1871 else 1900 else
1872 ret = perf_event_read_one(event, read_format, buf); 1901 ret = perf_event_read_one(event, read_format, buf);
1873 mutex_unlock(&event->child_mutex);
1874 1902
1875 return ret; 1903 return ret;
1876} 1904}
@@ -2182,6 +2210,7 @@ static void perf_mmap_data_free(struct perf_mmap_data *data)
2182 perf_mmap_free_page((unsigned long)data->user_page); 2210 perf_mmap_free_page((unsigned long)data->user_page);
2183 for (i = 0; i < data->nr_pages; i++) 2211 for (i = 0; i < data->nr_pages; i++)
2184 perf_mmap_free_page((unsigned long)data->data_pages[i]); 2212 perf_mmap_free_page((unsigned long)data->data_pages[i]);
2213 kfree(data);
2185} 2214}
2186 2215
2187#else 2216#else
@@ -2222,6 +2251,7 @@ static void perf_mmap_data_free_work(struct work_struct *work)
2222 perf_mmap_unmark_page(base + (i * PAGE_SIZE)); 2251 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
2223 2252
2224 vfree(base); 2253 vfree(base);
2254 kfree(data);
2225} 2255}
2226 2256
2227static void perf_mmap_data_free(struct perf_mmap_data *data) 2257static void perf_mmap_data_free(struct perf_mmap_data *data)
@@ -2315,7 +2345,7 @@ perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data)
2315 } 2345 }
2316 2346
2317 if (!data->watermark) 2347 if (!data->watermark)
2318 data->watermark = max_t(long, PAGE_SIZE, max_size / 2); 2348 data->watermark = max_size / 2;
2319 2349
2320 2350
2321 rcu_assign_pointer(event->data, data); 2351 rcu_assign_pointer(event->data, data);
@@ -2327,7 +2357,6 @@ static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head)
2327 2357
2328 data = container_of(rcu_head, struct perf_mmap_data, rcu_head); 2358 data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
2329 perf_mmap_data_free(data); 2359 perf_mmap_data_free(data);
2330 kfree(data);
2331} 2360}
2332 2361
2333static void perf_mmap_data_release(struct perf_event *event) 2362static void perf_mmap_data_release(struct perf_event *event)
@@ -3245,15 +3274,10 @@ static void perf_event_task_ctx(struct perf_event_context *ctx,
3245{ 3274{
3246 struct perf_event *event; 3275 struct perf_event *event;
3247 3276
3248 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3249 return;
3250
3251 rcu_read_lock();
3252 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 3277 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3253 if (perf_event_task_match(event)) 3278 if (perf_event_task_match(event))
3254 perf_event_task_output(event, task_event); 3279 perf_event_task_output(event, task_event);
3255 } 3280 }
3256 rcu_read_unlock();
3257} 3281}
3258 3282
3259static void perf_event_task_event(struct perf_task_event *task_event) 3283static void perf_event_task_event(struct perf_task_event *task_event)
@@ -3261,11 +3285,11 @@ static void perf_event_task_event(struct perf_task_event *task_event)
3261 struct perf_cpu_context *cpuctx; 3285 struct perf_cpu_context *cpuctx;
3262 struct perf_event_context *ctx = task_event->task_ctx; 3286 struct perf_event_context *ctx = task_event->task_ctx;
3263 3287
3288 rcu_read_lock();
3264 cpuctx = &get_cpu_var(perf_cpu_context); 3289 cpuctx = &get_cpu_var(perf_cpu_context);
3265 perf_event_task_ctx(&cpuctx->ctx, task_event); 3290 perf_event_task_ctx(&cpuctx->ctx, task_event);
3266 put_cpu_var(perf_cpu_context); 3291 put_cpu_var(perf_cpu_context);
3267 3292
3268 rcu_read_lock();
3269 if (!ctx) 3293 if (!ctx)
3270 ctx = rcu_dereference(task_event->task->perf_event_ctxp); 3294 ctx = rcu_dereference(task_event->task->perf_event_ctxp);
3271 if (ctx) 3295 if (ctx)
@@ -3357,15 +3381,10 @@ static void perf_event_comm_ctx(struct perf_event_context *ctx,
3357{ 3381{
3358 struct perf_event *event; 3382 struct perf_event *event;
3359 3383
3360 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3361 return;
3362
3363 rcu_read_lock();
3364 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 3384 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3365 if (perf_event_comm_match(event)) 3385 if (perf_event_comm_match(event))
3366 perf_event_comm_output(event, comm_event); 3386 perf_event_comm_output(event, comm_event);
3367 } 3387 }
3368 rcu_read_unlock();
3369} 3388}
3370 3389
3371static void perf_event_comm_event(struct perf_comm_event *comm_event) 3390static void perf_event_comm_event(struct perf_comm_event *comm_event)
@@ -3376,7 +3395,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
3376 char comm[TASK_COMM_LEN]; 3395 char comm[TASK_COMM_LEN];
3377 3396
3378 memset(comm, 0, sizeof(comm)); 3397 memset(comm, 0, sizeof(comm));
3379 strncpy(comm, comm_event->task->comm, sizeof(comm)); 3398 strlcpy(comm, comm_event->task->comm, sizeof(comm));
3380 size = ALIGN(strlen(comm)+1, sizeof(u64)); 3399 size = ALIGN(strlen(comm)+1, sizeof(u64));
3381 3400
3382 comm_event->comm = comm; 3401 comm_event->comm = comm;
@@ -3384,11 +3403,11 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
3384 3403
3385 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; 3404 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
3386 3405
3406 rcu_read_lock();
3387 cpuctx = &get_cpu_var(perf_cpu_context); 3407 cpuctx = &get_cpu_var(perf_cpu_context);
3388 perf_event_comm_ctx(&cpuctx->ctx, comm_event); 3408 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
3389 put_cpu_var(perf_cpu_context); 3409 put_cpu_var(perf_cpu_context);
3390 3410
3391 rcu_read_lock();
3392 /* 3411 /*
3393 * doesn't really matter which of the child contexts the 3412 * doesn't really matter which of the child contexts the
3394 * events ends up in. 3413 * events ends up in.
@@ -3481,15 +3500,10 @@ static void perf_event_mmap_ctx(struct perf_event_context *ctx,
3481{ 3500{
3482 struct perf_event *event; 3501 struct perf_event *event;
3483 3502
3484 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3485 return;
3486
3487 rcu_read_lock();
3488 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 3503 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3489 if (perf_event_mmap_match(event, mmap_event)) 3504 if (perf_event_mmap_match(event, mmap_event))
3490 perf_event_mmap_output(event, mmap_event); 3505 perf_event_mmap_output(event, mmap_event);
3491 } 3506 }
3492 rcu_read_unlock();
3493} 3507}
3494 3508
3495static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) 3509static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
@@ -3545,11 +3559,11 @@ got_name:
3545 3559
3546 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; 3560 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
3547 3561
3562 rcu_read_lock();
3548 cpuctx = &get_cpu_var(perf_cpu_context); 3563 cpuctx = &get_cpu_var(perf_cpu_context);
3549 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event); 3564 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
3550 put_cpu_var(perf_cpu_context); 3565 put_cpu_var(perf_cpu_context);
3551 3566
3552 rcu_read_lock();
3553 /* 3567 /*
3554 * doesn't really matter which of the child contexts the 3568 * doesn't really matter which of the child contexts the
3555 * events ends up in. 3569 * events ends up in.
@@ -3688,7 +3702,11 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
3688 perf_event_disable(event); 3702 perf_event_disable(event);
3689 } 3703 }
3690 3704
3691 perf_event_output(event, nmi, data, regs); 3705 if (event->overflow_handler)
3706 event->overflow_handler(event, nmi, data, regs);
3707 else
3708 perf_event_output(event, nmi, data, regs);
3709
3692 return ret; 3710 return ret;
3693} 3711}
3694 3712
@@ -3733,16 +3751,16 @@ again:
3733 return nr; 3751 return nr;
3734} 3752}
3735 3753
3736static void perf_swevent_overflow(struct perf_event *event, 3754static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
3737 int nmi, struct perf_sample_data *data, 3755 int nmi, struct perf_sample_data *data,
3738 struct pt_regs *regs) 3756 struct pt_regs *regs)
3739{ 3757{
3740 struct hw_perf_event *hwc = &event->hw; 3758 struct hw_perf_event *hwc = &event->hw;
3741 int throttle = 0; 3759 int throttle = 0;
3742 u64 overflow;
3743 3760
3744 data->period = event->hw.last_period; 3761 data->period = event->hw.last_period;
3745 overflow = perf_swevent_set_period(event); 3762 if (!overflow)
3763 overflow = perf_swevent_set_period(event);
3746 3764
3747 if (hwc->interrupts == MAX_INTERRUPTS) 3765 if (hwc->interrupts == MAX_INTERRUPTS)
3748 return; 3766 return;
@@ -3775,14 +3793,19 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
3775 3793
3776 atomic64_add(nr, &event->count); 3794 atomic64_add(nr, &event->count);
3777 3795
3796 if (!regs)
3797 return;
3798
3778 if (!hwc->sample_period) 3799 if (!hwc->sample_period)
3779 return; 3800 return;
3780 3801
3781 if (!regs) 3802 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
3803 return perf_swevent_overflow(event, 1, nmi, data, regs);
3804
3805 if (atomic64_add_negative(nr, &hwc->period_left))
3782 return; 3806 return;
3783 3807
3784 if (!atomic64_add_negative(nr, &hwc->period_left)) 3808 perf_swevent_overflow(event, 0, nmi, data, regs);
3785 perf_swevent_overflow(event, nmi, data, regs);
3786} 3809}
3787 3810
3788static int perf_swevent_is_counting(struct perf_event *event) 3811static int perf_swevent_is_counting(struct perf_event *event)
@@ -3818,6 +3841,20 @@ static int perf_swevent_is_counting(struct perf_event *event)
3818static int perf_tp_event_match(struct perf_event *event, 3841static int perf_tp_event_match(struct perf_event *event,
3819 struct perf_sample_data *data); 3842 struct perf_sample_data *data);
3820 3843
3844static int perf_exclude_event(struct perf_event *event,
3845 struct pt_regs *regs)
3846{
3847 if (regs) {
3848 if (event->attr.exclude_user && user_mode(regs))
3849 return 1;
3850
3851 if (event->attr.exclude_kernel && !user_mode(regs))
3852 return 1;
3853 }
3854
3855 return 0;
3856}
3857
3821static int perf_swevent_match(struct perf_event *event, 3858static int perf_swevent_match(struct perf_event *event,
3822 enum perf_type_id type, 3859 enum perf_type_id type,
3823 u32 event_id, 3860 u32 event_id,
@@ -3829,16 +3866,12 @@ static int perf_swevent_match(struct perf_event *event,
3829 3866
3830 if (event->attr.type != type) 3867 if (event->attr.type != type)
3831 return 0; 3868 return 0;
3869
3832 if (event->attr.config != event_id) 3870 if (event->attr.config != event_id)
3833 return 0; 3871 return 0;
3834 3872
3835 if (regs) { 3873 if (perf_exclude_event(event, regs))
3836 if (event->attr.exclude_user && user_mode(regs)) 3874 return 0;
3837 return 0;
3838
3839 if (event->attr.exclude_kernel && !user_mode(regs))
3840 return 0;
3841 }
3842 3875
3843 if (event->attr.type == PERF_TYPE_TRACEPOINT && 3876 if (event->attr.type == PERF_TYPE_TRACEPOINT &&
3844 !perf_tp_event_match(event, data)) 3877 !perf_tp_event_match(event, data))
@@ -3855,49 +3888,59 @@ static void perf_swevent_ctx_event(struct perf_event_context *ctx,
3855{ 3888{
3856 struct perf_event *event; 3889 struct perf_event *event;
3857 3890
3858 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3859 return;
3860
3861 rcu_read_lock();
3862 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 3891 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3863 if (perf_swevent_match(event, type, event_id, data, regs)) 3892 if (perf_swevent_match(event, type, event_id, data, regs))
3864 perf_swevent_add(event, nr, nmi, data, regs); 3893 perf_swevent_add(event, nr, nmi, data, regs);
3865 } 3894 }
3866 rcu_read_unlock();
3867} 3895}
3868 3896
3869static int *perf_swevent_recursion_context(struct perf_cpu_context *cpuctx) 3897int perf_swevent_get_recursion_context(void)
3870{ 3898{
3899 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
3900 int rctx;
3901
3871 if (in_nmi()) 3902 if (in_nmi())
3872 return &cpuctx->recursion[3]; 3903 rctx = 3;
3904 else if (in_irq())
3905 rctx = 2;
3906 else if (in_softirq())
3907 rctx = 1;
3908 else
3909 rctx = 0;
3873 3910
3874 if (in_irq()) 3911 if (cpuctx->recursion[rctx]) {
3875 return &cpuctx->recursion[2]; 3912 put_cpu_var(perf_cpu_context);
3913 return -1;
3914 }
3876 3915
3877 if (in_softirq()) 3916 cpuctx->recursion[rctx]++;
3878 return &cpuctx->recursion[1]; 3917 barrier();
3879 3918
3880 return &cpuctx->recursion[0]; 3919 return rctx;
3881} 3920}
3921EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
3922
3923void perf_swevent_put_recursion_context(int rctx)
3924{
3925 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
3926 barrier();
3927 cpuctx->recursion[rctx]--;
3928 put_cpu_var(perf_cpu_context);
3929}
3930EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context);
3882 3931
3883static void do_perf_sw_event(enum perf_type_id type, u32 event_id, 3932static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
3884 u64 nr, int nmi, 3933 u64 nr, int nmi,
3885 struct perf_sample_data *data, 3934 struct perf_sample_data *data,
3886 struct pt_regs *regs) 3935 struct pt_regs *regs)
3887{ 3936{
3888 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); 3937 struct perf_cpu_context *cpuctx;
3889 int *recursion = perf_swevent_recursion_context(cpuctx);
3890 struct perf_event_context *ctx; 3938 struct perf_event_context *ctx;
3891 3939
3892 if (*recursion) 3940 cpuctx = &__get_cpu_var(perf_cpu_context);
3893 goto out; 3941 rcu_read_lock();
3894
3895 (*recursion)++;
3896 barrier();
3897
3898 perf_swevent_ctx_event(&cpuctx->ctx, type, event_id, 3942 perf_swevent_ctx_event(&cpuctx->ctx, type, event_id,
3899 nr, nmi, data, regs); 3943 nr, nmi, data, regs);
3900 rcu_read_lock();
3901 /* 3944 /*
3902 * doesn't really matter which of the child contexts the 3945 * doesn't really matter which of the child contexts the
3903 * events ends up in. 3946 * events ends up in.
@@ -3906,23 +3949,24 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
3906 if (ctx) 3949 if (ctx)
3907 perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs); 3950 perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs);
3908 rcu_read_unlock(); 3951 rcu_read_unlock();
3909
3910 barrier();
3911 (*recursion)--;
3912
3913out:
3914 put_cpu_var(perf_cpu_context);
3915} 3952}
3916 3953
3917void __perf_sw_event(u32 event_id, u64 nr, int nmi, 3954void __perf_sw_event(u32 event_id, u64 nr, int nmi,
3918 struct pt_regs *regs, u64 addr) 3955 struct pt_regs *regs, u64 addr)
3919{ 3956{
3920 struct perf_sample_data data = { 3957 struct perf_sample_data data;
3921 .addr = addr, 3958 int rctx;
3922 };
3923 3959
3924 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, 3960 rctx = perf_swevent_get_recursion_context();
3925 &data, regs); 3961 if (rctx < 0)
3962 return;
3963
3964 data.addr = addr;
3965 data.raw = NULL;
3966
3967 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
3968
3969 perf_swevent_put_recursion_context(rctx);
3926} 3970}
3927 3971
3928static void perf_swevent_read(struct perf_event *event) 3972static void perf_swevent_read(struct perf_event *event)
@@ -3967,6 +4011,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
3967 event->pmu->read(event); 4011 event->pmu->read(event);
3968 4012
3969 data.addr = 0; 4013 data.addr = 0;
4014 data.period = event->hw.last_period;
3970 regs = get_irq_regs(); 4015 regs = get_irq_regs();
3971 /* 4016 /*
3972 * In case we exclude kernel IPs or are somehow not in interrupt 4017 * In case we exclude kernel IPs or are somehow not in interrupt
@@ -4145,6 +4190,7 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
4145 if (!regs) 4190 if (!regs)
4146 regs = task_pt_regs(current); 4191 regs = task_pt_regs(current);
4147 4192
4193 /* Trace events already protected against recursion */
4148 do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, 4194 do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
4149 &data, regs); 4195 &data, regs);
4150} 4196}
@@ -4231,6 +4277,53 @@ static void perf_event_free_filter(struct perf_event *event)
4231 4277
4232#endif /* CONFIG_EVENT_PROFILE */ 4278#endif /* CONFIG_EVENT_PROFILE */
4233 4279
4280#ifdef CONFIG_HAVE_HW_BREAKPOINT
4281static void bp_perf_event_destroy(struct perf_event *event)
4282{
4283 release_bp_slot(event);
4284}
4285
4286static const struct pmu *bp_perf_event_init(struct perf_event *bp)
4287{
4288 int err;
4289 /*
4290 * The breakpoint is already filled if we haven't created the counter
4291 * through perf syscall
4292 * FIXME: manage to get trigerred to NULL if it comes from syscalls
4293 */
4294 if (!bp->callback)
4295 err = register_perf_hw_breakpoint(bp);
4296 else
4297 err = __register_perf_hw_breakpoint(bp);
4298 if (err)
4299 return ERR_PTR(err);
4300
4301 bp->destroy = bp_perf_event_destroy;
4302
4303 return &perf_ops_bp;
4304}
4305
4306void perf_bp_event(struct perf_event *bp, void *data)
4307{
4308 struct perf_sample_data sample;
4309 struct pt_regs *regs = data;
4310
4311 sample.addr = bp->attr.bp_addr;
4312
4313 if (!perf_exclude_event(bp, regs))
4314 perf_swevent_add(bp, 1, 1, &sample, regs);
4315}
4316#else
4317static const struct pmu *bp_perf_event_init(struct perf_event *bp)
4318{
4319 return NULL;
4320}
4321
4322void perf_bp_event(struct perf_event *bp, void *regs)
4323{
4324}
4325#endif
4326
4234atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; 4327atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
4235 4328
4236static void sw_perf_event_destroy(struct perf_event *event) 4329static void sw_perf_event_destroy(struct perf_event *event)
@@ -4297,6 +4390,7 @@ perf_event_alloc(struct perf_event_attr *attr,
4297 struct perf_event_context *ctx, 4390 struct perf_event_context *ctx,
4298 struct perf_event *group_leader, 4391 struct perf_event *group_leader,
4299 struct perf_event *parent_event, 4392 struct perf_event *parent_event,
4393 perf_callback_t callback,
4300 gfp_t gfpflags) 4394 gfp_t gfpflags)
4301{ 4395{
4302 const struct pmu *pmu; 4396 const struct pmu *pmu;
@@ -4339,6 +4433,11 @@ perf_event_alloc(struct perf_event_attr *attr,
4339 4433
4340 event->state = PERF_EVENT_STATE_INACTIVE; 4434 event->state = PERF_EVENT_STATE_INACTIVE;
4341 4435
4436 if (!callback && parent_event)
4437 callback = parent_event->callback;
4438
4439 event->callback = callback;
4440
4342 if (attr->disabled) 4441 if (attr->disabled)
4343 event->state = PERF_EVENT_STATE_OFF; 4442 event->state = PERF_EVENT_STATE_OFF;
4344 4443
@@ -4373,6 +4472,11 @@ perf_event_alloc(struct perf_event_attr *attr,
4373 pmu = tp_perf_event_init(event); 4472 pmu = tp_perf_event_init(event);
4374 break; 4473 break;
4375 4474
4475 case PERF_TYPE_BREAKPOINT:
4476 pmu = bp_perf_event_init(event);
4477 break;
4478
4479
4376 default: 4480 default:
4377 break; 4481 break;
4378 } 4482 }
@@ -4615,7 +4719,7 @@ SYSCALL_DEFINE5(perf_event_open,
4615 } 4719 }
4616 4720
4617 event = perf_event_alloc(&attr, cpu, ctx, group_leader, 4721 event = perf_event_alloc(&attr, cpu, ctx, group_leader,
4618 NULL, GFP_KERNEL); 4722 NULL, NULL, GFP_KERNEL);
4619 err = PTR_ERR(event); 4723 err = PTR_ERR(event);
4620 if (IS_ERR(event)) 4724 if (IS_ERR(event))
4621 goto err_put_context; 4725 goto err_put_context;
@@ -4663,6 +4767,60 @@ err_put_context:
4663 return err; 4767 return err;
4664} 4768}
4665 4769
4770/**
4771 * perf_event_create_kernel_counter
4772 *
4773 * @attr: attributes of the counter to create
4774 * @cpu: cpu in which the counter is bound
4775 * @pid: task to profile
4776 */
4777struct perf_event *
4778perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
4779 pid_t pid, perf_callback_t callback)
4780{
4781 struct perf_event *event;
4782 struct perf_event_context *ctx;
4783 int err;
4784
4785 /*
4786 * Get the target context (task or percpu):
4787 */
4788
4789 ctx = find_get_context(pid, cpu);
4790 if (IS_ERR(ctx)) {
4791 err = PTR_ERR(ctx);
4792 goto err_exit;
4793 }
4794
4795 event = perf_event_alloc(attr, cpu, ctx, NULL,
4796 NULL, callback, GFP_KERNEL);
4797 if (IS_ERR(event)) {
4798 err = PTR_ERR(event);
4799 goto err_put_context;
4800 }
4801
4802 event->filp = NULL;
4803 WARN_ON_ONCE(ctx->parent_ctx);
4804 mutex_lock(&ctx->mutex);
4805 perf_install_in_context(ctx, event, cpu);
4806 ++ctx->generation;
4807 mutex_unlock(&ctx->mutex);
4808
4809 event->owner = current;
4810 get_task_struct(current);
4811 mutex_lock(&current->perf_event_mutex);
4812 list_add_tail(&event->owner_entry, &current->perf_event_list);
4813 mutex_unlock(&current->perf_event_mutex);
4814
4815 return event;
4816
4817 err_put_context:
4818 put_ctx(ctx);
4819 err_exit:
4820 return ERR_PTR(err);
4821}
4822EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
4823
4666/* 4824/*
4667 * inherit a event from parent task to child task: 4825 * inherit a event from parent task to child task:
4668 */ 4826 */
@@ -4688,7 +4846,7 @@ inherit_event(struct perf_event *parent_event,
4688 child_event = perf_event_alloc(&parent_event->attr, 4846 child_event = perf_event_alloc(&parent_event->attr,
4689 parent_event->cpu, child_ctx, 4847 parent_event->cpu, child_ctx,
4690 group_leader, parent_event, 4848 group_leader, parent_event,
4691 GFP_KERNEL); 4849 NULL, GFP_KERNEL);
4692 if (IS_ERR(child_event)) 4850 if (IS_ERR(child_event))
4693 return child_event; 4851 return child_event;
4694 get_ctx(child_ctx); 4852 get_ctx(child_ctx);
@@ -4706,6 +4864,8 @@ inherit_event(struct perf_event *parent_event,
4706 if (parent_event->attr.freq) 4864 if (parent_event->attr.freq)
4707 child_event->hw.sample_period = parent_event->hw.sample_period; 4865 child_event->hw.sample_period = parent_event->hw.sample_period;
4708 4866
4867 child_event->overflow_handler = parent_event->overflow_handler;
4868
4709 /* 4869 /*
4710 * Link it up in the child's context: 4870 * Link it up in the child's context:
4711 */ 4871 */
@@ -4795,7 +4955,6 @@ __perf_event_exit_task(struct perf_event *child_event,
4795{ 4955{
4796 struct perf_event *parent_event; 4956 struct perf_event *parent_event;
4797 4957
4798 update_event_times(child_event);
4799 perf_event_remove_from_context(child_event); 4958 perf_event_remove_from_context(child_event);
4800 4959
4801 parent_event = child_event->parent; 4960 parent_event = child_event->parent;
@@ -4847,6 +5006,7 @@ void perf_event_exit_task(struct task_struct *child)
4847 * the events from it. 5006 * the events from it.
4848 */ 5007 */
4849 unclone_ctx(child_ctx); 5008 unclone_ctx(child_ctx);
5009 update_context_time(child_ctx);
4850 spin_unlock_irqrestore(&child_ctx->lock, flags); 5010 spin_unlock_irqrestore(&child_ctx->lock, flags);
4851 5011
4852 /* 5012 /*