summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/events/core.c214
-rw-r--r--tools/perf/Documentation/perf-script.txt12
-rw-r--r--tools/perf/builtin-script.c108
-rw-r--r--tools/perf/builtin-stat.c9
-rw-r--r--tools/perf/util/evsel.c1
-rw-r--r--tools/perf/util/evsel.h1
-rw-r--r--tools/perf/util/python.c17
-rw-r--r--tools/perf/util/session.c61
-rw-r--r--tools/perf/util/session.h5
9 files changed, 263 insertions, 165 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 9efe7108ccaf..ba89f40abe6a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -200,6 +200,22 @@ __get_cpu_context(struct perf_event_context *ctx)
200 return this_cpu_ptr(ctx->pmu->pmu_cpu_context); 200 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
201} 201}
202 202
203static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
204 struct perf_event_context *ctx)
205{
206 raw_spin_lock(&cpuctx->ctx.lock);
207 if (ctx)
208 raw_spin_lock(&ctx->lock);
209}
210
211static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
212 struct perf_event_context *ctx)
213{
214 if (ctx)
215 raw_spin_unlock(&ctx->lock);
216 raw_spin_unlock(&cpuctx->ctx.lock);
217}
218
203#ifdef CONFIG_CGROUP_PERF 219#ifdef CONFIG_CGROUP_PERF
204 220
205/* 221/*
@@ -340,11 +356,8 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
340 rcu_read_lock(); 356 rcu_read_lock();
341 357
342 list_for_each_entry_rcu(pmu, &pmus, entry) { 358 list_for_each_entry_rcu(pmu, &pmus, entry) {
343
344 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 359 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
345 360
346 perf_pmu_disable(cpuctx->ctx.pmu);
347
348 /* 361 /*
349 * perf_cgroup_events says at least one 362 * perf_cgroup_events says at least one
350 * context on this CPU has cgroup events. 363 * context on this CPU has cgroup events.
@@ -353,6 +366,8 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
353 * events for a context. 366 * events for a context.
354 */ 367 */
355 if (cpuctx->ctx.nr_cgroups > 0) { 368 if (cpuctx->ctx.nr_cgroups > 0) {
369 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
370 perf_pmu_disable(cpuctx->ctx.pmu);
356 371
357 if (mode & PERF_CGROUP_SWOUT) { 372 if (mode & PERF_CGROUP_SWOUT) {
358 cpu_ctx_sched_out(cpuctx, EVENT_ALL); 373 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
@@ -372,9 +387,9 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
372 cpuctx->cgrp = perf_cgroup_from_task(task); 387 cpuctx->cgrp = perf_cgroup_from_task(task);
373 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); 388 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
374 } 389 }
390 perf_pmu_enable(cpuctx->ctx.pmu);
391 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
375 } 392 }
376
377 perf_pmu_enable(cpuctx->ctx.pmu);
378 } 393 }
379 394
380 rcu_read_unlock(); 395 rcu_read_unlock();
@@ -1105,6 +1120,10 @@ static int __perf_remove_from_context(void *info)
1105 raw_spin_lock(&ctx->lock); 1120 raw_spin_lock(&ctx->lock);
1106 event_sched_out(event, cpuctx, ctx); 1121 event_sched_out(event, cpuctx, ctx);
1107 list_del_event(event, ctx); 1122 list_del_event(event, ctx);
1123 if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
1124 ctx->is_active = 0;
1125 cpuctx->task_ctx = NULL;
1126 }
1108 raw_spin_unlock(&ctx->lock); 1127 raw_spin_unlock(&ctx->lock);
1109 1128
1110 return 0; 1129 return 0;
@@ -1454,8 +1473,24 @@ static void add_event_to_ctx(struct perf_event *event,
1454 event->tstamp_stopped = tstamp; 1473 event->tstamp_stopped = tstamp;
1455} 1474}
1456 1475
1457static void perf_event_context_sched_in(struct perf_event_context *ctx, 1476static void task_ctx_sched_out(struct perf_event_context *ctx);
1458 struct task_struct *tsk); 1477static void
1478ctx_sched_in(struct perf_event_context *ctx,
1479 struct perf_cpu_context *cpuctx,
1480 enum event_type_t event_type,
1481 struct task_struct *task);
1482
1483static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
1484 struct perf_event_context *ctx,
1485 struct task_struct *task)
1486{
1487 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
1488 if (ctx)
1489 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
1490 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
1491 if (ctx)
1492 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
1493}
1459 1494
1460/* 1495/*
1461 * Cross CPU call to install and enable a performance event 1496 * Cross CPU call to install and enable a performance event
@@ -1466,20 +1501,31 @@ static int __perf_install_in_context(void *info)
1466{ 1501{
1467 struct perf_event *event = info; 1502 struct perf_event *event = info;
1468 struct perf_event_context *ctx = event->ctx; 1503 struct perf_event_context *ctx = event->ctx;
1469 struct perf_event *leader = event->group_leader;
1470 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1504 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1471 int err; 1505 struct perf_event_context *task_ctx = cpuctx->task_ctx;
1506 struct task_struct *task = current;
1507
1508 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
1509 perf_pmu_disable(cpuctx->ctx.pmu);
1472 1510
1473 /* 1511 /*
1474 * In case we're installing a new context to an already running task, 1512 * If there was an active task_ctx schedule it out.
1475 * could also happen before perf_event_task_sched_in() on architectures
1476 * which do context switches with IRQs enabled.
1477 */ 1513 */
1478 if (ctx->task && !cpuctx->task_ctx) 1514 if (task_ctx) {
1479 perf_event_context_sched_in(ctx, ctx->task); 1515 task_ctx_sched_out(task_ctx);
1516 /*
1517 * If the context we're installing events in is not the
1518 * active task_ctx, flip them.
1519 */
1520 if (ctx->task && task_ctx != ctx) {
1521 raw_spin_unlock(&cpuctx->ctx.lock);
1522 raw_spin_lock(&ctx->lock);
1523 cpuctx->task_ctx = task_ctx = ctx;
1524 }
1525 task = task_ctx->task;
1526 }
1527 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
1480 1528
1481 raw_spin_lock(&ctx->lock);
1482 ctx->is_active = 1;
1483 update_context_time(ctx); 1529 update_context_time(ctx);
1484 /* 1530 /*
1485 * update cgrp time only if current cgrp 1531 * update cgrp time only if current cgrp
@@ -1490,43 +1536,13 @@ static int __perf_install_in_context(void *info)
1490 1536
1491 add_event_to_ctx(event, ctx); 1537 add_event_to_ctx(event, ctx);
1492 1538
1493 if (!event_filter_match(event))
1494 goto unlock;
1495
1496 /*
1497 * Don't put the event on if it is disabled or if
1498 * it is in a group and the group isn't on.
1499 */
1500 if (event->state != PERF_EVENT_STATE_INACTIVE ||
1501 (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
1502 goto unlock;
1503
1504 /* 1539 /*
1505 * An exclusive event can't go on if there are already active 1540 * Schedule everything back in
1506 * hardware events, and no hardware event can go on if there
1507 * is already an exclusive event on.
1508 */ 1541 */
1509 if (!group_can_go_on(event, cpuctx, 1)) 1542 perf_event_sched_in(cpuctx, task_ctx, task);
1510 err = -EEXIST;
1511 else
1512 err = event_sched_in(event, cpuctx, ctx);
1513 1543
1514 if (err) { 1544 perf_pmu_enable(cpuctx->ctx.pmu);
1515 /* 1545 perf_ctx_unlock(cpuctx, task_ctx);
1516 * This event couldn't go on. If it is in a group
1517 * then we have to pull the whole group off.
1518 * If the event group is pinned then put it in error state.
1519 */
1520 if (leader != event)
1521 group_sched_out(leader, cpuctx, ctx);
1522 if (leader->attr.pinned) {
1523 update_group_times(leader);
1524 leader->state = PERF_EVENT_STATE_ERROR;
1525 }
1526 }
1527
1528unlock:
1529 raw_spin_unlock(&ctx->lock);
1530 1546
1531 return 0; 1547 return 0;
1532} 1548}
@@ -1758,30 +1774,28 @@ static void ctx_sched_out(struct perf_event_context *ctx,
1758 enum event_type_t event_type) 1774 enum event_type_t event_type)
1759{ 1775{
1760 struct perf_event *event; 1776 struct perf_event *event;
1777 int is_active = ctx->is_active;
1761 1778
1762 raw_spin_lock(&ctx->lock); 1779 ctx->is_active &= ~event_type;
1763 perf_pmu_disable(ctx->pmu);
1764 ctx->is_active = 0;
1765 if (likely(!ctx->nr_events)) 1780 if (likely(!ctx->nr_events))
1766 goto out; 1781 return;
1782
1767 update_context_time(ctx); 1783 update_context_time(ctx);
1768 update_cgrp_time_from_cpuctx(cpuctx); 1784 update_cgrp_time_from_cpuctx(cpuctx);
1769
1770 if (!ctx->nr_active) 1785 if (!ctx->nr_active)
1771 goto out; 1786 return;
1772 1787
1773 if (event_type & EVENT_PINNED) { 1788 perf_pmu_disable(ctx->pmu);
1789 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
1774 list_for_each_entry(event, &ctx->pinned_groups, group_entry) 1790 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1775 group_sched_out(event, cpuctx, ctx); 1791 group_sched_out(event, cpuctx, ctx);
1776 } 1792 }
1777 1793
1778 if (event_type & EVENT_FLEXIBLE) { 1794 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
1779 list_for_each_entry(event, &ctx->flexible_groups, group_entry) 1795 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
1780 group_sched_out(event, cpuctx, ctx); 1796 group_sched_out(event, cpuctx, ctx);
1781 } 1797 }
1782out:
1783 perf_pmu_enable(ctx->pmu); 1798 perf_pmu_enable(ctx->pmu);
1784 raw_spin_unlock(&ctx->lock);
1785} 1799}
1786 1800
1787/* 1801/*
@@ -1929,8 +1943,10 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
1929 rcu_read_unlock(); 1943 rcu_read_unlock();
1930 1944
1931 if (do_switch) { 1945 if (do_switch) {
1946 raw_spin_lock(&ctx->lock);
1932 ctx_sched_out(ctx, cpuctx, EVENT_ALL); 1947 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
1933 cpuctx->task_ctx = NULL; 1948 cpuctx->task_ctx = NULL;
1949 raw_spin_unlock(&ctx->lock);
1934 } 1950 }
1935} 1951}
1936 1952
@@ -1965,8 +1981,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
1965 perf_cgroup_sched_out(task); 1981 perf_cgroup_sched_out(task);
1966} 1982}
1967 1983
1968static void task_ctx_sched_out(struct perf_event_context *ctx, 1984static void task_ctx_sched_out(struct perf_event_context *ctx)
1969 enum event_type_t event_type)
1970{ 1985{
1971 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1986 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1972 1987
@@ -1976,7 +1991,7 @@ static void task_ctx_sched_out(struct perf_event_context *ctx,
1976 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) 1991 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1977 return; 1992 return;
1978 1993
1979 ctx_sched_out(ctx, cpuctx, event_type); 1994 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
1980 cpuctx->task_ctx = NULL; 1995 cpuctx->task_ctx = NULL;
1981} 1996}
1982 1997
@@ -2055,11 +2070,11 @@ ctx_sched_in(struct perf_event_context *ctx,
2055 struct task_struct *task) 2070 struct task_struct *task)
2056{ 2071{
2057 u64 now; 2072 u64 now;
2073 int is_active = ctx->is_active;
2058 2074
2059 raw_spin_lock(&ctx->lock); 2075 ctx->is_active |= event_type;
2060 ctx->is_active = 1;
2061 if (likely(!ctx->nr_events)) 2076 if (likely(!ctx->nr_events))
2062 goto out; 2077 return;
2063 2078
2064 now = perf_clock(); 2079 now = perf_clock();
2065 ctx->timestamp = now; 2080 ctx->timestamp = now;
@@ -2068,15 +2083,12 @@ ctx_sched_in(struct perf_event_context *ctx,
2068 * First go through the list and put on any pinned groups 2083 * First go through the list and put on any pinned groups
2069 * in order to give them the best chance of going on. 2084 * in order to give them the best chance of going on.
2070 */ 2085 */
2071 if (event_type & EVENT_PINNED) 2086 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
2072 ctx_pinned_sched_in(ctx, cpuctx); 2087 ctx_pinned_sched_in(ctx, cpuctx);
2073 2088
2074 /* Then walk through the lower prio flexible groups */ 2089 /* Then walk through the lower prio flexible groups */
2075 if (event_type & EVENT_FLEXIBLE) 2090 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
2076 ctx_flexible_sched_in(ctx, cpuctx); 2091 ctx_flexible_sched_in(ctx, cpuctx);
2077
2078out:
2079 raw_spin_unlock(&ctx->lock);
2080} 2092}
2081 2093
2082static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, 2094static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
@@ -2088,19 +2100,6 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
2088 ctx_sched_in(ctx, cpuctx, event_type, task); 2100 ctx_sched_in(ctx, cpuctx, event_type, task);
2089} 2101}
2090 2102
2091static void task_ctx_sched_in(struct perf_event_context *ctx,
2092 enum event_type_t event_type)
2093{
2094 struct perf_cpu_context *cpuctx;
2095
2096 cpuctx = __get_cpu_context(ctx);
2097 if (cpuctx->task_ctx == ctx)
2098 return;
2099
2100 ctx_sched_in(ctx, cpuctx, event_type, NULL);
2101 cpuctx->task_ctx = ctx;
2102}
2103
2104static void perf_event_context_sched_in(struct perf_event_context *ctx, 2103static void perf_event_context_sched_in(struct perf_event_context *ctx,
2105 struct task_struct *task) 2104 struct task_struct *task)
2106{ 2105{
@@ -2110,6 +2109,7 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
2110 if (cpuctx->task_ctx == ctx) 2109 if (cpuctx->task_ctx == ctx)
2111 return; 2110 return;
2112 2111
2112 perf_ctx_lock(cpuctx, ctx);
2113 perf_pmu_disable(ctx->pmu); 2113 perf_pmu_disable(ctx->pmu);
2114 /* 2114 /*
2115 * We want to keep the following priority order: 2115 * We want to keep the following priority order:
@@ -2118,18 +2118,18 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
2118 */ 2118 */
2119 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 2119 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2120 2120
2121 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); 2121 perf_event_sched_in(cpuctx, ctx, task);
2122 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
2123 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
2124 2122
2125 cpuctx->task_ctx = ctx; 2123 cpuctx->task_ctx = ctx;
2126 2124
2125 perf_pmu_enable(ctx->pmu);
2126 perf_ctx_unlock(cpuctx, ctx);
2127
2127 /* 2128 /*
2128 * Since these rotations are per-cpu, we need to ensure the 2129 * Since these rotations are per-cpu, we need to ensure the
2129 * cpu-context we got scheduled on is actually rotating. 2130 * cpu-context we got scheduled on is actually rotating.
2130 */ 2131 */
2131 perf_pmu_rotate_start(ctx->pmu); 2132 perf_pmu_rotate_start(ctx->pmu);
2132 perf_pmu_enable(ctx->pmu);
2133} 2133}
2134 2134
2135/* 2135/*
@@ -2269,7 +2269,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
2269 u64 interrupts, now; 2269 u64 interrupts, now;
2270 s64 delta; 2270 s64 delta;
2271 2271
2272 raw_spin_lock(&ctx->lock);
2273 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 2272 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
2274 if (event->state != PERF_EVENT_STATE_ACTIVE) 2273 if (event->state != PERF_EVENT_STATE_ACTIVE)
2275 continue; 2274 continue;
@@ -2301,7 +2300,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
2301 if (delta > 0) 2300 if (delta > 0)
2302 perf_adjust_period(event, period, delta); 2301 perf_adjust_period(event, period, delta);
2303 } 2302 }
2304 raw_spin_unlock(&ctx->lock);
2305} 2303}
2306 2304
2307/* 2305/*
@@ -2309,16 +2307,12 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
2309 */ 2307 */
2310static void rotate_ctx(struct perf_event_context *ctx) 2308static void rotate_ctx(struct perf_event_context *ctx)
2311{ 2309{
2312 raw_spin_lock(&ctx->lock);
2313
2314 /* 2310 /*
2315 * Rotate the first entry last of non-pinned groups. Rotation might be 2311 * Rotate the first entry last of non-pinned groups. Rotation might be
2316 * disabled by the inheritance code. 2312 * disabled by the inheritance code.
2317 */ 2313 */
2318 if (!ctx->rotate_disable) 2314 if (!ctx->rotate_disable)
2319 list_rotate_left(&ctx->flexible_groups); 2315 list_rotate_left(&ctx->flexible_groups);
2320
2321 raw_spin_unlock(&ctx->lock);
2322} 2316}
2323 2317
2324/* 2318/*
@@ -2345,6 +2339,7 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)
2345 rotate = 1; 2339 rotate = 1;
2346 } 2340 }
2347 2341
2342 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2348 perf_pmu_disable(cpuctx->ctx.pmu); 2343 perf_pmu_disable(cpuctx->ctx.pmu);
2349 perf_ctx_adjust_freq(&cpuctx->ctx, interval); 2344 perf_ctx_adjust_freq(&cpuctx->ctx, interval);
2350 if (ctx) 2345 if (ctx)
@@ -2355,21 +2350,20 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)
2355 2350
2356 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 2351 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2357 if (ctx) 2352 if (ctx)
2358 task_ctx_sched_out(ctx, EVENT_FLEXIBLE); 2353 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
2359 2354
2360 rotate_ctx(&cpuctx->ctx); 2355 rotate_ctx(&cpuctx->ctx);
2361 if (ctx) 2356 if (ctx)
2362 rotate_ctx(ctx); 2357 rotate_ctx(ctx);
2363 2358
2364 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, current); 2359 perf_event_sched_in(cpuctx, ctx, current);
2365 if (ctx)
2366 task_ctx_sched_in(ctx, EVENT_FLEXIBLE);
2367 2360
2368done: 2361done:
2369 if (remove) 2362 if (remove)
2370 list_del_init(&cpuctx->rotation_list); 2363 list_del_init(&cpuctx->rotation_list);
2371 2364
2372 perf_pmu_enable(cpuctx->ctx.pmu); 2365 perf_pmu_enable(cpuctx->ctx.pmu);
2366 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2373} 2367}
2374 2368
2375void perf_event_task_tick(void) 2369void perf_event_task_tick(void)
@@ -2424,9 +2418,9 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2424 * in. 2418 * in.
2425 */ 2419 */
2426 perf_cgroup_sched_out(current); 2420 perf_cgroup_sched_out(current);
2427 task_ctx_sched_out(ctx, EVENT_ALL);
2428 2421
2429 raw_spin_lock(&ctx->lock); 2422 raw_spin_lock(&ctx->lock);
2423 task_ctx_sched_out(ctx);
2430 2424
2431 list_for_each_entry(event, &ctx->pinned_groups, group_entry) { 2425 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2432 ret = event_enable_on_exec(event, ctx); 2426 ret = event_enable_on_exec(event, ctx);
@@ -2835,16 +2829,12 @@ retry:
2835 unclone_ctx(ctx); 2829 unclone_ctx(ctx);
2836 ++ctx->pin_count; 2830 ++ctx->pin_count;
2837 raw_spin_unlock_irqrestore(&ctx->lock, flags); 2831 raw_spin_unlock_irqrestore(&ctx->lock, flags);
2838 } 2832 } else {
2839
2840 if (!ctx) {
2841 ctx = alloc_perf_context(pmu, task); 2833 ctx = alloc_perf_context(pmu, task);
2842 err = -ENOMEM; 2834 err = -ENOMEM;
2843 if (!ctx) 2835 if (!ctx)
2844 goto errout; 2836 goto errout;
2845 2837
2846 get_ctx(ctx);
2847
2848 err = 0; 2838 err = 0;
2849 mutex_lock(&task->perf_event_mutex); 2839 mutex_lock(&task->perf_event_mutex);
2850 /* 2840 /*
@@ -2856,14 +2846,14 @@ retry:
2856 else if (task->perf_event_ctxp[ctxn]) 2846 else if (task->perf_event_ctxp[ctxn])
2857 err = -EAGAIN; 2847 err = -EAGAIN;
2858 else { 2848 else {
2849 get_ctx(ctx);
2859 ++ctx->pin_count; 2850 ++ctx->pin_count;
2860 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx); 2851 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
2861 } 2852 }
2862 mutex_unlock(&task->perf_event_mutex); 2853 mutex_unlock(&task->perf_event_mutex);
2863 2854
2864 if (unlikely(err)) { 2855 if (unlikely(err)) {
2865 put_task_struct(task); 2856 put_ctx(ctx);
2866 kfree(ctx);
2867 2857
2868 if (err == -EAGAIN) 2858 if (err == -EAGAIN)
2869 goto retry; 2859 goto retry;
@@ -2934,12 +2924,6 @@ int perf_event_release_kernel(struct perf_event *event)
2934{ 2924{
2935 struct perf_event_context *ctx = event->ctx; 2925 struct perf_event_context *ctx = event->ctx;
2936 2926
2937 /*
2938 * Remove from the PMU, can't get re-enabled since we got
2939 * here because the last ref went.
2940 */
2941 perf_event_disable(event);
2942
2943 WARN_ON_ONCE(ctx->parent_ctx); 2927 WARN_ON_ONCE(ctx->parent_ctx);
2944 /* 2928 /*
2945 * There are two ways this annotation is useful: 2929 * There are two ways this annotation is useful:
@@ -2956,8 +2940,8 @@ int perf_event_release_kernel(struct perf_event *event)
2956 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING); 2940 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
2957 raw_spin_lock_irq(&ctx->lock); 2941 raw_spin_lock_irq(&ctx->lock);
2958 perf_group_detach(event); 2942 perf_group_detach(event);
2959 list_del_event(event, ctx);
2960 raw_spin_unlock_irq(&ctx->lock); 2943 raw_spin_unlock_irq(&ctx->lock);
2944 perf_remove_from_context(event);
2961 mutex_unlock(&ctx->mutex); 2945 mutex_unlock(&ctx->mutex);
2962 2946
2963 free_event(event); 2947 free_event(event);
@@ -5986,6 +5970,7 @@ free_dev:
5986} 5970}
5987 5971
5988static struct lock_class_key cpuctx_mutex; 5972static struct lock_class_key cpuctx_mutex;
5973static struct lock_class_key cpuctx_lock;
5989 5974
5990int perf_pmu_register(struct pmu *pmu, char *name, int type) 5975int perf_pmu_register(struct pmu *pmu, char *name, int type)
5991{ 5976{
@@ -6036,6 +6021,7 @@ skip_type:
6036 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 6021 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
6037 __perf_event_init_context(&cpuctx->ctx); 6022 __perf_event_init_context(&cpuctx->ctx);
6038 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex); 6023 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
6024 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
6039 cpuctx->ctx.type = cpu_context; 6025 cpuctx->ctx.type = cpu_context;
6040 cpuctx->ctx.pmu = pmu; 6026 cpuctx->ctx.pmu = pmu;
6041 cpuctx->jiffies_interval = 1; 6027 cpuctx->jiffies_interval = 1;
@@ -6780,7 +6766,6 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
6780 * our context. 6766 * our context.
6781 */ 6767 */
6782 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]); 6768 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
6783 task_ctx_sched_out(child_ctx, EVENT_ALL);
6784 6769
6785 /* 6770 /*
6786 * Take the context lock here so that if find_get_context is 6771 * Take the context lock here so that if find_get_context is
@@ -6788,6 +6773,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
6788 * incremented the context's refcount before we do put_ctx below. 6773 * incremented the context's refcount before we do put_ctx below.
6789 */ 6774 */
6790 raw_spin_lock(&child_ctx->lock); 6775 raw_spin_lock(&child_ctx->lock);
6776 task_ctx_sched_out(child_ctx);
6791 child->perf_event_ctxp[ctxn] = NULL; 6777 child->perf_event_ctxp[ctxn] = NULL;
6792 /* 6778 /*
6793 * If this context is a clone; unclone it so it can't get 6779 * If this context is a clone; unclone it so it can't get
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 86c87e214b11..c6068cb43f57 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -115,10 +115,10 @@ OPTIONS
115-f:: 115-f::
116--fields:: 116--fields::
117 Comma separated list of fields to print. Options are: 117 Comma separated list of fields to print. Options are:
118 comm, tid, pid, time, cpu, event, trace, sym. Field 118 comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr.
119 list can be prepended with the type, trace, sw or hw, 119 Field list can be prepended with the type, trace, sw or hw,
120 to indicate to which event type the field list applies. 120 to indicate to which event type the field list applies.
121 e.g., -f sw:comm,tid,time,sym and -f trace:time,cpu,trace 121 e.g., -f sw:comm,tid,time,ip,sym and -f trace:time,cpu,trace
122 122
123 perf script -f <fields> 123 perf script -f <fields>
124 124
@@ -132,17 +132,17 @@ OPTIONS
132 The arguments are processed in the order received. A later usage can 132 The arguments are processed in the order received. A later usage can
133 reset a prior request. e.g.: 133 reset a prior request. e.g.:
134 134
135 -f trace: -f comm,tid,time,sym 135 -f trace: -f comm,tid,time,ip,sym
136 136
137 The first -f suppresses trace events (field list is ""), but then the 137 The first -f suppresses trace events (field list is ""), but then the
138 second invocation sets the fields to comm,tid,time,sym. In this case a 138 second invocation sets the fields to comm,tid,time,ip,sym. In this case a
139 warning is given to the user: 139 warning is given to the user:
140 140
141 "Overriding previous field request for all events." 141 "Overriding previous field request for all events."
142 142
143 Alternativey, consider the order: 143 Alternativey, consider the order:
144 144
145 -f comm,tid,time,sym -f trace: 145 -f comm,tid,time,ip,sym -f trace:
146 146
147 The first -f sets the fields for all events and the second -f 147 The first -f sets the fields for all events and the second -f
148 suppresses trace events. The user is given a warning message about 148 suppresses trace events. The user is given a warning message about
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 22747de7234b..3056b45b3dd6 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -30,7 +30,10 @@ enum perf_output_field {
30 PERF_OUTPUT_CPU = 1U << 4, 30 PERF_OUTPUT_CPU = 1U << 4,
31 PERF_OUTPUT_EVNAME = 1U << 5, 31 PERF_OUTPUT_EVNAME = 1U << 5,
32 PERF_OUTPUT_TRACE = 1U << 6, 32 PERF_OUTPUT_TRACE = 1U << 6,
33 PERF_OUTPUT_SYM = 1U << 7, 33 PERF_OUTPUT_IP = 1U << 7,
34 PERF_OUTPUT_SYM = 1U << 8,
35 PERF_OUTPUT_DSO = 1U << 9,
36 PERF_OUTPUT_ADDR = 1U << 10,
34}; 37};
35 38
36struct output_option { 39struct output_option {
@@ -44,7 +47,10 @@ struct output_option {
44 {.str = "cpu", .field = PERF_OUTPUT_CPU}, 47 {.str = "cpu", .field = PERF_OUTPUT_CPU},
45 {.str = "event", .field = PERF_OUTPUT_EVNAME}, 48 {.str = "event", .field = PERF_OUTPUT_EVNAME},
46 {.str = "trace", .field = PERF_OUTPUT_TRACE}, 49 {.str = "trace", .field = PERF_OUTPUT_TRACE},
50 {.str = "ip", .field = PERF_OUTPUT_IP},
47 {.str = "sym", .field = PERF_OUTPUT_SYM}, 51 {.str = "sym", .field = PERF_OUTPUT_SYM},
52 {.str = "dso", .field = PERF_OUTPUT_DSO},
53 {.str = "addr", .field = PERF_OUTPUT_ADDR},
48}; 54};
49 55
50/* default set to maintain compatibility with current format */ 56/* default set to maintain compatibility with current format */
@@ -60,7 +66,8 @@ static struct {
60 66
61 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | 67 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
62 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | 68 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
63 PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, 69 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
70 PERF_OUTPUT_SYM | PERF_OUTPUT_DSO,
64 71
65 .invalid_fields = PERF_OUTPUT_TRACE, 72 .invalid_fields = PERF_OUTPUT_TRACE,
66 }, 73 },
@@ -70,7 +77,8 @@ static struct {
70 77
71 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | 78 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
72 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | 79 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
73 PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, 80 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
81 PERF_OUTPUT_SYM | PERF_OUTPUT_DSO,
74 82
75 .invalid_fields = PERF_OUTPUT_TRACE, 83 .invalid_fields = PERF_OUTPUT_TRACE,
76 }, 84 },
@@ -88,7 +96,8 @@ static struct {
88 96
89 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID | 97 .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
90 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME | 98 PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
91 PERF_OUTPUT_EVNAME | PERF_OUTPUT_SYM, 99 PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
100 PERF_OUTPUT_SYM | PERF_OUTPUT_DSO,
92 101
93 .invalid_fields = PERF_OUTPUT_TRACE, 102 .invalid_fields = PERF_OUTPUT_TRACE,
94 }, 103 },
@@ -157,9 +166,9 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
157 !perf_session__has_traces(session, "record -R")) 166 !perf_session__has_traces(session, "record -R"))
158 return -EINVAL; 167 return -EINVAL;
159 168
160 if (PRINT_FIELD(SYM)) { 169 if (PRINT_FIELD(IP)) {
161 if (perf_event_attr__check_stype(attr, PERF_SAMPLE_IP, "IP", 170 if (perf_event_attr__check_stype(attr, PERF_SAMPLE_IP, "IP",
162 PERF_OUTPUT_SYM)) 171 PERF_OUTPUT_IP))
163 return -EINVAL; 172 return -EINVAL;
164 173
165 if (!no_callchain && 174 if (!no_callchain &&
@@ -167,6 +176,24 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
167 symbol_conf.use_callchain = false; 176 symbol_conf.use_callchain = false;
168 } 177 }
169 178
179 if (PRINT_FIELD(ADDR) &&
180 perf_event_attr__check_stype(attr, PERF_SAMPLE_ADDR, "ADDR",
181 PERF_OUTPUT_ADDR))
182 return -EINVAL;
183
184 if (PRINT_FIELD(SYM) && !PRINT_FIELD(IP) && !PRINT_FIELD(ADDR)) {
185 pr_err("Display of symbols requested but neither sample IP nor "
186 "sample address\nis selected. Hence, no addresses to convert "
187 "to symbols.\n");
188 return -EINVAL;
189 }
190 if (PRINT_FIELD(DSO) && !PRINT_FIELD(IP) && !PRINT_FIELD(ADDR)) {
191 pr_err("Display of DSO requested but neither sample IP nor "
192 "sample address\nis selected. Hence, no addresses to convert "
193 "to DSO.\n");
194 return -EINVAL;
195 }
196
170 if ((PRINT_FIELD(PID) || PRINT_FIELD(TID)) && 197 if ((PRINT_FIELD(PID) || PRINT_FIELD(TID)) &&
171 perf_event_attr__check_stype(attr, PERF_SAMPLE_TID, "TID", 198 perf_event_attr__check_stype(attr, PERF_SAMPLE_TID, "TID",
172 PERF_OUTPUT_TID|PERF_OUTPUT_PID)) 199 PERF_OUTPUT_TID|PERF_OUTPUT_PID))
@@ -230,7 +257,7 @@ static void print_sample_start(struct perf_sample *sample,
230 if (PRINT_FIELD(COMM)) { 257 if (PRINT_FIELD(COMM)) {
231 if (latency_format) 258 if (latency_format)
232 printf("%8.8s ", thread->comm); 259 printf("%8.8s ", thread->comm);
233 else if (PRINT_FIELD(SYM) && symbol_conf.use_callchain) 260 else if (PRINT_FIELD(IP) && symbol_conf.use_callchain)
234 printf("%s ", thread->comm); 261 printf("%s ", thread->comm);
235 else 262 else
236 printf("%16s ", thread->comm); 263 printf("%16s ", thread->comm);
@@ -271,6 +298,63 @@ static void print_sample_start(struct perf_sample *sample,
271 } 298 }
272} 299}
273 300
301static bool sample_addr_correlates_sym(struct perf_event_attr *attr)
302{
303 if ((attr->type == PERF_TYPE_SOFTWARE) &&
304 ((attr->config == PERF_COUNT_SW_PAGE_FAULTS) ||
305 (attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN) ||
306 (attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)))
307 return true;
308
309 return false;
310}
311
312static void print_sample_addr(union perf_event *event,
313 struct perf_sample *sample,
314 struct perf_session *session,
315 struct thread *thread,
316 struct perf_event_attr *attr)
317{
318 struct addr_location al;
319 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
320 const char *symname, *dsoname;
321
322 printf("%16" PRIx64, sample->addr);
323
324 if (!sample_addr_correlates_sym(attr))
325 return;
326
327 thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
328 event->ip.pid, sample->addr, &al);
329 if (!al.map)
330 thread__find_addr_map(thread, session, cpumode, MAP__VARIABLE,
331 event->ip.pid, sample->addr, &al);
332
333 al.cpu = sample->cpu;
334 al.sym = NULL;
335
336 if (al.map)
337 al.sym = map__find_symbol(al.map, al.addr, NULL);
338
339 if (PRINT_FIELD(SYM)) {
340 if (al.sym && al.sym->name)
341 symname = al.sym->name;
342 else
343 symname = "";
344
345 printf(" %16s", symname);
346 }
347
348 if (PRINT_FIELD(DSO)) {
349 if (al.map && al.map->dso && al.map->dso->name)
350 dsoname = al.map->dso->name;
351 else
352 dsoname = "";
353
354 printf(" (%s)", dsoname);
355 }
356}
357
274static void process_event(union perf_event *event __unused, 358static void process_event(union perf_event *event __unused,
275 struct perf_sample *sample, 359 struct perf_sample *sample,
276 struct perf_evsel *evsel, 360 struct perf_evsel *evsel,
@@ -288,12 +372,16 @@ static void process_event(union perf_event *event __unused,
288 print_trace_event(sample->cpu, sample->raw_data, 372 print_trace_event(sample->cpu, sample->raw_data,
289 sample->raw_size); 373 sample->raw_size);
290 374
291 if (PRINT_FIELD(SYM)) { 375 if (PRINT_FIELD(ADDR))
376 print_sample_addr(event, sample, session, thread, attr);
377
378 if (PRINT_FIELD(IP)) {
292 if (!symbol_conf.use_callchain) 379 if (!symbol_conf.use_callchain)
293 printf(" "); 380 printf(" ");
294 else 381 else
295 printf("\n"); 382 printf("\n");
296 perf_session__print_symbols(event, sample, session); 383 perf_session__print_ip(event, sample, session,
384 PRINT_FIELD(SYM), PRINT_FIELD(DSO));
297 } 385 }
298 386
299 printf("\n"); 387 printf("\n");
@@ -985,7 +1073,7 @@ static const struct option options[] = {
985 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", 1073 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
986 "Look for files with symbols relative to this directory"), 1074 "Look for files with symbols relative to this directory"),
987 OPT_CALLBACK('f', "fields", NULL, "str", 1075 OPT_CALLBACK('f', "fields", NULL, "str",
988 "comma separated output fields prepend with 'type:'. Valid types: hw,sw,trace,raw. Fields: comm,tid,pid,time,cpu,event,trace,sym", 1076 "comma separated output fields prepend with 'type:'. Valid types: hw,sw,trace,raw. Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,addr",
989 parse_output_fields), 1077 parse_output_fields),
990 1078
991 OPT_END() 1079 OPT_END()
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index a9f06715e44d..784ed6d6e0d6 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -61,6 +61,8 @@
61#include <locale.h> 61#include <locale.h>
62 62
63#define DEFAULT_SEPARATOR " " 63#define DEFAULT_SEPARATOR " "
64#define CNTR_NOT_SUPPORTED "<not supported>"
65#define CNTR_NOT_COUNTED "<not counted>"
64 66
65static struct perf_event_attr default_attrs[] = { 67static struct perf_event_attr default_attrs[] = {
66 68
@@ -448,6 +450,7 @@ static int run_perf_stat(int argc __used, const char **argv)
448 if (verbose) 450 if (verbose)
449 ui__warning("%s event is not supported by the kernel.\n", 451 ui__warning("%s event is not supported by the kernel.\n",
450 event_name(counter)); 452 event_name(counter));
453 counter->supported = false;
451 continue; 454 continue;
452 } 455 }
453 456
@@ -466,6 +469,7 @@ static int run_perf_stat(int argc __used, const char **argv)
466 die("Not all events could be opened.\n"); 469 die("Not all events could be opened.\n");
467 return -1; 470 return -1;
468 } 471 }
472 counter->supported = true;
469 } 473 }
470 474
471 if (perf_evlist__set_filters(evsel_list)) { 475 if (perf_evlist__set_filters(evsel_list)) {
@@ -861,7 +865,7 @@ static void print_counter_aggr(struct perf_evsel *counter)
861 if (scaled == -1) { 865 if (scaled == -1) {
862 fprintf(stderr, "%*s%s%*s", 866 fprintf(stderr, "%*s%s%*s",
863 csv_output ? 0 : 18, 867 csv_output ? 0 : 18,
864 "<not counted>", 868 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
865 csv_sep, 869 csv_sep,
866 csv_output ? 0 : -24, 870 csv_output ? 0 : -24,
867 event_name(counter)); 871 event_name(counter));
@@ -914,7 +918,8 @@ static void print_counter(struct perf_evsel *counter)
914 csv_output ? 0 : -4, 918 csv_output ? 0 : -4,
915 evsel_list->cpus->map[cpu], csv_sep, 919 evsel_list->cpus->map[cpu], csv_sep,
916 csv_output ? 0 : 18, 920 csv_output ? 0 : 18,
917 "<not counted>", csv_sep, 921 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
922 csv_sep,
918 csv_output ? 0 : -24, 923 csv_output ? 0 : -24,
919 event_name(counter)); 924 event_name(counter));
920 925
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 0239eb87b232..a03a36b7908a 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -377,6 +377,7 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
377 array++; 377 array++;
378 } 378 }
379 379
380 data->addr = 0;
380 if (type & PERF_SAMPLE_ADDR) { 381 if (type & PERF_SAMPLE_ADDR) {
381 data->addr = *array; 382 data->addr = *array;
382 array++; 383 array++;
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 7e9366e4490b..e9a31554e265 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -61,6 +61,7 @@ struct perf_evsel {
61 off_t id_offset; 61 off_t id_offset;
62 }; 62 };
63 struct cgroup_sel *cgrp; 63 struct cgroup_sel *cgrp;
64 bool supported;
64}; 65};
65 66
66struct cpu_map; 67struct cpu_map;
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index a9ac0504aabd..8e0b5a39d8a7 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -247,7 +247,7 @@ struct pyrf_cpu_map {
247static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus, 247static int pyrf_cpu_map__init(struct pyrf_cpu_map *pcpus,
248 PyObject *args, PyObject *kwargs) 248 PyObject *args, PyObject *kwargs)
249{ 249{
250 static char *kwlist[] = { "cpustr", NULL, NULL, }; 250 static char *kwlist[] = { "cpustr", NULL };
251 char *cpustr = NULL; 251 char *cpustr = NULL;
252 252
253 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s", 253 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|s",
@@ -316,7 +316,7 @@ struct pyrf_thread_map {
316static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads, 316static int pyrf_thread_map__init(struct pyrf_thread_map *pthreads,
317 PyObject *args, PyObject *kwargs) 317 PyObject *args, PyObject *kwargs)
318{ 318{
319 static char *kwlist[] = { "pid", "tid", NULL, NULL, }; 319 static char *kwlist[] = { "pid", "tid", NULL };
320 int pid = -1, tid = -1; 320 int pid = -1, tid = -1;
321 321
322 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", 322 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii",
@@ -418,7 +418,9 @@ static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
418 "wakeup_events", 418 "wakeup_events",
419 "bp_type", 419 "bp_type",
420 "bp_addr", 420 "bp_addr",
421 "bp_len", NULL, NULL, }; 421 "bp_len",
422 NULL
423 };
422 u64 sample_period = 0; 424 u64 sample_period = 0;
423 u32 disabled = 0, 425 u32 disabled = 0,
424 inherit = 0, 426 inherit = 0,
@@ -499,7 +501,7 @@ static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel,
499 struct thread_map *threads = NULL; 501 struct thread_map *threads = NULL;
500 PyObject *pcpus = NULL, *pthreads = NULL; 502 PyObject *pcpus = NULL, *pthreads = NULL;
501 int group = 0, inherit = 0; 503 int group = 0, inherit = 0;
502 static char *kwlist[] = {"cpus", "threads", "group", "inherit", NULL, NULL}; 504 static char *kwlist[] = { "cpus", "threads", "group", "inherit", NULL };
503 505
504 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, 506 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist,
505 &pcpus, &pthreads, &group, &inherit)) 507 &pcpus, &pthreads, &group, &inherit))
@@ -582,8 +584,7 @@ static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
582 PyObject *args, PyObject *kwargs) 584 PyObject *args, PyObject *kwargs)
583{ 585{
584 struct perf_evlist *evlist = &pevlist->evlist; 586 struct perf_evlist *evlist = &pevlist->evlist;
585 static char *kwlist[] = {"pages", "overwrite", 587 static char *kwlist[] = { "pages", "overwrite", NULL };
586 NULL, NULL};
587 int pages = 128, overwrite = false; 588 int pages = 128, overwrite = false;
588 589
589 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist, 590 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist,
@@ -603,7 +604,7 @@ static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist,
603 PyObject *args, PyObject *kwargs) 604 PyObject *args, PyObject *kwargs)
604{ 605{
605 struct perf_evlist *evlist = &pevlist->evlist; 606 struct perf_evlist *evlist = &pevlist->evlist;
606 static char *kwlist[] = {"timeout", NULL, NULL}; 607 static char *kwlist[] = { "timeout", NULL };
607 int timeout = -1, n; 608 int timeout = -1, n;
608 609
609 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout)) 610 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", kwlist, &timeout))
@@ -674,7 +675,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
674 struct perf_evlist *evlist = &pevlist->evlist; 675 struct perf_evlist *evlist = &pevlist->evlist;
675 union perf_event *event; 676 union perf_event *event;
676 int sample_id_all = 1, cpu; 677 int sample_id_all = 1, cpu;
677 static char *kwlist[] = {"cpu", "sample_id_all", NULL, NULL}; 678 static char *kwlist[] = { "cpu", "sample_id_all", NULL };
678 int err; 679 int err;
679 680
680 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist, 681 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index f5a8fbdd3f76..b723f211881c 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -708,9 +708,9 @@ static void dump_sample(struct perf_session *session, union perf_event *event,
708 if (!dump_trace) 708 if (!dump_trace)
709 return; 709 return;
710 710
711 printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 "\n", 711 printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
712 event->header.misc, sample->pid, sample->tid, sample->ip, 712 event->header.misc, sample->pid, sample->tid, sample->ip,
713 sample->period); 713 sample->period, sample->addr);
714 714
715 if (session->sample_type & PERF_SAMPLE_CALLCHAIN) 715 if (session->sample_type & PERF_SAMPLE_CALLCHAIN)
716 callchain__printf(sample); 716 callchain__printf(sample);
@@ -1202,9 +1202,10 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
1202 return NULL; 1202 return NULL;
1203} 1203}
1204 1204
1205void perf_session__print_symbols(union perf_event *event, 1205void perf_session__print_ip(union perf_event *event,
1206 struct perf_sample *sample, 1206 struct perf_sample *sample,
1207 struct perf_session *session) 1207 struct perf_session *session,
1208 int print_sym, int print_dso)
1208{ 1209{
1209 struct addr_location al; 1210 struct addr_location al;
1210 const char *symname, *dsoname; 1211 const char *symname, *dsoname;
@@ -1233,32 +1234,46 @@ void perf_session__print_symbols(union perf_event *event,
1233 if (!node) 1234 if (!node)
1234 break; 1235 break;
1235 1236
1236 if (node->sym && node->sym->name) 1237 printf("\t%16" PRIx64, node->ip);
1237 symname = node->sym->name; 1238 if (print_sym) {
1238 else 1239 if (node->sym && node->sym->name)
1239 symname = ""; 1240 symname = node->sym->name;
1241 else
1242 symname = "";
1240 1243
1241 if (node->map && node->map->dso && node->map->dso->name) 1244 printf(" %s", symname);
1242 dsoname = node->map->dso->name; 1245 }
1243 else 1246 if (print_dso) {
1244 dsoname = ""; 1247 if (node->map && node->map->dso && node->map->dso->name)
1248 dsoname = node->map->dso->name;
1249 else
1250 dsoname = "";
1245 1251
1246 printf("\t%16" PRIx64 " %s (%s)\n", node->ip, symname, dsoname); 1252 printf(" (%s)", dsoname);
1253 }
1254 printf("\n");
1247 1255
1248 callchain_cursor_advance(cursor); 1256 callchain_cursor_advance(cursor);
1249 } 1257 }
1250 1258
1251 } else { 1259 } else {
1252 if (al.sym && al.sym->name) 1260 printf("%16" PRIx64, al.addr);
1253 symname = al.sym->name; 1261 if (print_sym) {
1254 else 1262 if (al.sym && al.sym->name)
1255 symname = ""; 1263 symname = al.sym->name;
1264 else
1265 symname = "";
1256 1266
1257 if (al.map && al.map->dso && al.map->dso->name) 1267 printf(" %s", symname);
1258 dsoname = al.map->dso->name; 1268 }
1259 else
1260 dsoname = "";
1261 1269
1262 printf("%16" PRIx64 " %s (%s)", al.addr, symname, dsoname); 1270 if (print_dso) {
1271 if (al.map && al.map->dso && al.map->dso->name)
1272 dsoname = al.map->dso->name;
1273 else
1274 dsoname = "";
1275
1276 printf(" (%s)", dsoname);
1277 }
1263 } 1278 }
1264} 1279}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 66d4e1490879..de4178d7bb7b 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -167,8 +167,9 @@ static inline int perf_session__parse_sample(struct perf_session *session,
167struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, 167struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
168 unsigned int type); 168 unsigned int type);
169 169
170void perf_session__print_symbols(union perf_event *event, 170void perf_session__print_ip(union perf_event *event,
171 struct perf_sample *sample, 171 struct perf_sample *sample,
172 struct perf_session *session); 172 struct perf_session *session,
173 int print_sym, int print_dso);
173 174
174#endif /* __PERF_SESSION_H */ 175#endif /* __PERF_SESSION_H */