aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util/evsel.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/evsel.c')
-rw-r--r--tools/perf/util/evsel.c43
1 files changed, 33 insertions, 10 deletions
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 0ce9febf1ba0..46dd4c2a41ce 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -168,7 +168,7 @@ void perf_evsel__init(struct perf_evsel *evsel,
168 perf_evsel__calc_id_pos(evsel); 168 perf_evsel__calc_id_pos(evsel);
169} 169}
170 170
171struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx) 171struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
172{ 172{
173 struct perf_evsel *evsel = zalloc(sizeof(*evsel)); 173 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
174 174
@@ -219,7 +219,7 @@ out:
219 return format; 219 return format;
220} 220}
221 221
222struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx) 222struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
223{ 223{
224 struct perf_evsel *evsel = zalloc(sizeof(*evsel)); 224 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
225 225
@@ -645,7 +645,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
645 } 645 }
646 } 646 }
647 647
648 if (perf_target__has_cpu(&opts->target)) 648 if (target__has_cpu(&opts->target) || opts->target.force_per_cpu)
649 perf_evsel__set_sample_bit(evsel, CPU); 649 perf_evsel__set_sample_bit(evsel, CPU);
650 650
651 if (opts->period) 651 if (opts->period)
@@ -653,7 +653,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
653 653
654 if (!perf_missing_features.sample_id_all && 654 if (!perf_missing_features.sample_id_all &&
655 (opts->sample_time || !opts->no_inherit || 655 (opts->sample_time || !opts->no_inherit ||
656 perf_target__has_cpu(&opts->target))) 656 target__has_cpu(&opts->target) || opts->target.force_per_cpu))
657 perf_evsel__set_sample_bit(evsel, TIME); 657 perf_evsel__set_sample_bit(evsel, TIME);
658 658
659 if (opts->raw_samples) { 659 if (opts->raw_samples) {
@@ -663,7 +663,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
663 } 663 }
664 664
665 if (opts->sample_address) 665 if (opts->sample_address)
666 attr->sample_type |= PERF_SAMPLE_DATA_SRC; 666 perf_evsel__set_sample_bit(evsel, DATA_SRC);
667 667
668 if (opts->no_delay) { 668 if (opts->no_delay) {
669 attr->watermark = 0; 669 attr->watermark = 0;
@@ -675,12 +675,14 @@ void perf_evsel__config(struct perf_evsel *evsel,
675 } 675 }
676 676
677 if (opts->sample_weight) 677 if (opts->sample_weight)
678 attr->sample_type |= PERF_SAMPLE_WEIGHT; 678 perf_evsel__set_sample_bit(evsel, WEIGHT);
679 679
680 attr->mmap = track; 680 attr->mmap = track;
681 attr->mmap2 = track && !perf_missing_features.mmap2;
682 attr->comm = track; 681 attr->comm = track;
683 682
683 if (opts->sample_transaction)
684 perf_evsel__set_sample_bit(evsel, TRANSACTION);
685
684 /* 686 /*
685 * XXX see the function comment above 687 * XXX see the function comment above
686 * 688 *
@@ -694,7 +696,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
694 * Setting enable_on_exec for independent events and 696 * Setting enable_on_exec for independent events and
695 * group leaders for traced executed by perf. 697 * group leaders for traced executed by perf.
696 */ 698 */
697 if (perf_target__none(&opts->target) && perf_evsel__is_group_leader(evsel)) 699 if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel))
698 attr->enable_on_exec = 1; 700 attr->enable_on_exec = 1;
699} 701}
700 702
@@ -983,6 +985,7 @@ static size_t perf_event_attr__fprintf(struct perf_event_attr *attr, FILE *fp)
983 ret += PRINT_ATTR2(exclude_host, exclude_guest); 985 ret += PRINT_ATTR2(exclude_host, exclude_guest);
984 ret += PRINT_ATTR2N("excl.callchain_kern", exclude_callchain_kernel, 986 ret += PRINT_ATTR2N("excl.callchain_kern", exclude_callchain_kernel,
985 "excl.callchain_user", exclude_callchain_user); 987 "excl.callchain_user", exclude_callchain_user);
988 ret += PRINT_ATTR_U32(mmap2);
986 989
987 ret += PRINT_ATTR_U32(wakeup_events); 990 ret += PRINT_ATTR_U32(wakeup_events);
988 ret += PRINT_ATTR_U32(wakeup_watermark); 991 ret += PRINT_ATTR_U32(wakeup_watermark);
@@ -1048,6 +1051,8 @@ retry_open:
1048 group_fd, flags); 1051 group_fd, flags);
1049 if (FD(evsel, cpu, thread) < 0) { 1052 if (FD(evsel, cpu, thread) < 0) {
1050 err = -errno; 1053 err = -errno;
1054 pr_debug2("perf_event_open failed, error %d\n",
1055 err);
1051 goto try_fallback; 1056 goto try_fallback;
1052 } 1057 }
1053 set_rlimit = NO_CHANGE; 1058 set_rlimit = NO_CHANGE;
@@ -1214,6 +1219,7 @@ static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
1214 1219
1215 sample->pid = u.val32[0]; 1220 sample->pid = u.val32[0];
1216 sample->tid = u.val32[1]; 1221 sample->tid = u.val32[1];
1222 array--;
1217 } 1223 }
1218 1224
1219 return 0; 1225 return 0;
@@ -1453,6 +1459,9 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
1453 array = (void *)array + sz; 1459 array = (void *)array + sz;
1454 OVERFLOW_CHECK_u64(array); 1460 OVERFLOW_CHECK_u64(array);
1455 data->user_stack.size = *array++; 1461 data->user_stack.size = *array++;
1462 if (WARN_ONCE(data->user_stack.size > sz,
1463 "user stack dump failure\n"))
1464 return -EFAULT;
1456 } 1465 }
1457 } 1466 }
1458 1467
@@ -1470,6 +1479,13 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
1470 array++; 1479 array++;
1471 } 1480 }
1472 1481
1482 data->transaction = 0;
1483 if (type & PERF_SAMPLE_TRANSACTION) {
1484 OVERFLOW_CHECK_u64(array);
1485 data->transaction = *array;
1486 array++;
1487 }
1488
1473 return 0; 1489 return 0;
1474} 1490}
1475 1491
@@ -1562,6 +1578,9 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
1562 if (type & PERF_SAMPLE_DATA_SRC) 1578 if (type & PERF_SAMPLE_DATA_SRC)
1563 result += sizeof(u64); 1579 result += sizeof(u64);
1564 1580
1581 if (type & PERF_SAMPLE_TRANSACTION)
1582 result += sizeof(u64);
1583
1565 return result; 1584 return result;
1566} 1585}
1567 1586
@@ -1735,6 +1754,11 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
1735 array++; 1754 array++;
1736 } 1755 }
1737 1756
1757 if (type & PERF_SAMPLE_TRANSACTION) {
1758 *array = sample->transaction;
1759 array++;
1760 }
1761
1738 return 0; 1762 return 0;
1739} 1763}
1740 1764
@@ -1982,8 +2006,7 @@ bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
1982 return false; 2006 return false;
1983} 2007}
1984 2008
1985int perf_evsel__open_strerror(struct perf_evsel *evsel, 2009int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
1986 struct perf_target *target,
1987 int err, char *msg, size_t size) 2010 int err, char *msg, size_t size)
1988{ 2011{
1989 switch (err) { 2012 switch (err) {