diff options
Diffstat (limited to 'tools/perf/util/evsel.c')
-rw-r--r-- | tools/perf/util/evsel.c | 240 |
1 files changed, 173 insertions, 67 deletions
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index d5fbcf8c7aa7..ff359c9ece2e 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include "debug.h" | 36 | #include "debug.h" |
37 | #include "trace-event.h" | 37 | #include "trace-event.h" |
38 | #include "stat.h" | 38 | #include "stat.h" |
39 | #include "memswap.h" | ||
39 | #include "util/parse-branch-options.h" | 40 | #include "util/parse-branch-options.h" |
40 | 41 | ||
41 | #include "sane_ctype.h" | 42 | #include "sane_ctype.h" |
@@ -650,9 +651,9 @@ int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size) | |||
650 | return ret; | 651 | return ret; |
651 | } | 652 | } |
652 | 653 | ||
653 | void perf_evsel__config_callchain(struct perf_evsel *evsel, | 654 | static void __perf_evsel__config_callchain(struct perf_evsel *evsel, |
654 | struct record_opts *opts, | 655 | struct record_opts *opts, |
655 | struct callchain_param *param) | 656 | struct callchain_param *param) |
656 | { | 657 | { |
657 | bool function = perf_evsel__is_function_event(evsel); | 658 | bool function = perf_evsel__is_function_event(evsel); |
658 | struct perf_event_attr *attr = &evsel->attr; | 659 | struct perf_event_attr *attr = &evsel->attr; |
@@ -698,6 +699,14 @@ void perf_evsel__config_callchain(struct perf_evsel *evsel, | |||
698 | } | 699 | } |
699 | } | 700 | } |
700 | 701 | ||
702 | void perf_evsel__config_callchain(struct perf_evsel *evsel, | ||
703 | struct record_opts *opts, | ||
704 | struct callchain_param *param) | ||
705 | { | ||
706 | if (param->enabled) | ||
707 | return __perf_evsel__config_callchain(evsel, opts, param); | ||
708 | } | ||
709 | |||
701 | static void | 710 | static void |
702 | perf_evsel__reset_callgraph(struct perf_evsel *evsel, | 711 | perf_evsel__reset_callgraph(struct perf_evsel *evsel, |
703 | struct callchain_param *param) | 712 | struct callchain_param *param) |
@@ -717,31 +726,33 @@ perf_evsel__reset_callgraph(struct perf_evsel *evsel, | |||
717 | } | 726 | } |
718 | 727 | ||
719 | static void apply_config_terms(struct perf_evsel *evsel, | 728 | static void apply_config_terms(struct perf_evsel *evsel, |
720 | struct record_opts *opts) | 729 | struct record_opts *opts, bool track) |
721 | { | 730 | { |
722 | struct perf_evsel_config_term *term; | 731 | struct perf_evsel_config_term *term; |
723 | struct list_head *config_terms = &evsel->config_terms; | 732 | struct list_head *config_terms = &evsel->config_terms; |
724 | struct perf_event_attr *attr = &evsel->attr; | 733 | struct perf_event_attr *attr = &evsel->attr; |
725 | struct callchain_param param; | 734 | /* callgraph default */ |
735 | struct callchain_param param = { | ||
736 | .record_mode = callchain_param.record_mode, | ||
737 | }; | ||
726 | u32 dump_size = 0; | 738 | u32 dump_size = 0; |
727 | int max_stack = 0; | 739 | int max_stack = 0; |
728 | const char *callgraph_buf = NULL; | 740 | const char *callgraph_buf = NULL; |
729 | 741 | ||
730 | /* callgraph default */ | ||
731 | param.record_mode = callchain_param.record_mode; | ||
732 | |||
733 | list_for_each_entry(term, config_terms, list) { | 742 | list_for_each_entry(term, config_terms, list) { |
734 | switch (term->type) { | 743 | switch (term->type) { |
735 | case PERF_EVSEL__CONFIG_TERM_PERIOD: | 744 | case PERF_EVSEL__CONFIG_TERM_PERIOD: |
736 | if (!(term->weak && opts->user_interval != ULLONG_MAX)) { | 745 | if (!(term->weak && opts->user_interval != ULLONG_MAX)) { |
737 | attr->sample_period = term->val.period; | 746 | attr->sample_period = term->val.period; |
738 | attr->freq = 0; | 747 | attr->freq = 0; |
748 | perf_evsel__reset_sample_bit(evsel, PERIOD); | ||
739 | } | 749 | } |
740 | break; | 750 | break; |
741 | case PERF_EVSEL__CONFIG_TERM_FREQ: | 751 | case PERF_EVSEL__CONFIG_TERM_FREQ: |
742 | if (!(term->weak && opts->user_freq != UINT_MAX)) { | 752 | if (!(term->weak && opts->user_freq != UINT_MAX)) { |
743 | attr->sample_freq = term->val.freq; | 753 | attr->sample_freq = term->val.freq; |
744 | attr->freq = 1; | 754 | attr->freq = 1; |
755 | perf_evsel__set_sample_bit(evsel, PERIOD); | ||
745 | } | 756 | } |
746 | break; | 757 | break; |
747 | case PERF_EVSEL__CONFIG_TERM_TIME: | 758 | case PERF_EVSEL__CONFIG_TERM_TIME: |
@@ -779,6 +790,8 @@ static void apply_config_terms(struct perf_evsel *evsel, | |||
779 | case PERF_EVSEL__CONFIG_TERM_OVERWRITE: | 790 | case PERF_EVSEL__CONFIG_TERM_OVERWRITE: |
780 | attr->write_backward = term->val.overwrite ? 1 : 0; | 791 | attr->write_backward = term->val.overwrite ? 1 : 0; |
781 | break; | 792 | break; |
793 | case PERF_EVSEL__CONFIG_TERM_DRV_CFG: | ||
794 | break; | ||
782 | default: | 795 | default: |
783 | break; | 796 | break; |
784 | } | 797 | } |
@@ -786,6 +799,8 @@ static void apply_config_terms(struct perf_evsel *evsel, | |||
786 | 799 | ||
787 | /* User explicitly set per-event callgraph, clear the old setting and reset. */ | 800 | /* User explicitly set per-event callgraph, clear the old setting and reset. */ |
788 | if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) { | 801 | if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) { |
802 | bool sample_address = false; | ||
803 | |||
789 | if (max_stack) { | 804 | if (max_stack) { |
790 | param.max_stack = max_stack; | 805 | param.max_stack = max_stack; |
791 | if (callgraph_buf == NULL) | 806 | if (callgraph_buf == NULL) |
@@ -805,6 +820,8 @@ static void apply_config_terms(struct perf_evsel *evsel, | |||
805 | evsel->name); | 820 | evsel->name); |
806 | return; | 821 | return; |
807 | } | 822 | } |
823 | if (param.record_mode == CALLCHAIN_DWARF) | ||
824 | sample_address = true; | ||
808 | } | 825 | } |
809 | } | 826 | } |
810 | if (dump_size > 0) { | 827 | if (dump_size > 0) { |
@@ -817,8 +834,14 @@ static void apply_config_terms(struct perf_evsel *evsel, | |||
817 | perf_evsel__reset_callgraph(evsel, &callchain_param); | 834 | perf_evsel__reset_callgraph(evsel, &callchain_param); |
818 | 835 | ||
819 | /* set perf-event callgraph */ | 836 | /* set perf-event callgraph */ |
820 | if (param.enabled) | 837 | if (param.enabled) { |
838 | if (sample_address) { | ||
839 | perf_evsel__set_sample_bit(evsel, ADDR); | ||
840 | perf_evsel__set_sample_bit(evsel, DATA_SRC); | ||
841 | evsel->attr.mmap_data = track; | ||
842 | } | ||
821 | perf_evsel__config_callchain(evsel, opts, ¶m); | 843 | perf_evsel__config_callchain(evsel, opts, ¶m); |
844 | } | ||
822 | } | 845 | } |
823 | } | 846 | } |
824 | 847 | ||
@@ -948,9 +971,6 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts, | |||
948 | if (target__has_cpu(&opts->target) || opts->sample_cpu) | 971 | if (target__has_cpu(&opts->target) || opts->sample_cpu) |
949 | perf_evsel__set_sample_bit(evsel, CPU); | 972 | perf_evsel__set_sample_bit(evsel, CPU); |
950 | 973 | ||
951 | if (opts->period) | ||
952 | perf_evsel__set_sample_bit(evsel, PERIOD); | ||
953 | |||
954 | /* | 974 | /* |
955 | * When the user explicitly disabled time don't force it here. | 975 | * When the user explicitly disabled time don't force it here. |
956 | */ | 976 | */ |
@@ -1049,9 +1069,17 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts, | |||
1049 | * Apply event specific term settings, | 1069 | * Apply event specific term settings, |
1050 | * it overloads any global configuration. | 1070 | * it overloads any global configuration. |
1051 | */ | 1071 | */ |
1052 | apply_config_terms(evsel, opts); | 1072 | apply_config_terms(evsel, opts, track); |
1053 | 1073 | ||
1054 | evsel->ignore_missing_thread = opts->ignore_missing_thread; | 1074 | evsel->ignore_missing_thread = opts->ignore_missing_thread; |
1075 | |||
1076 | /* The --period option takes the precedence. */ | ||
1077 | if (opts->period_set) { | ||
1078 | if (opts->period) | ||
1079 | perf_evsel__set_sample_bit(evsel, PERIOD); | ||
1080 | else | ||
1081 | perf_evsel__reset_sample_bit(evsel, PERIOD); | ||
1082 | } | ||
1055 | } | 1083 | } |
1056 | 1084 | ||
1057 | static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) | 1085 | static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) |
@@ -1574,6 +1602,7 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr, | |||
1574 | PRINT_ATTRf(use_clockid, p_unsigned); | 1602 | PRINT_ATTRf(use_clockid, p_unsigned); |
1575 | PRINT_ATTRf(context_switch, p_unsigned); | 1603 | PRINT_ATTRf(context_switch, p_unsigned); |
1576 | PRINT_ATTRf(write_backward, p_unsigned); | 1604 | PRINT_ATTRf(write_backward, p_unsigned); |
1605 | PRINT_ATTRf(namespaces, p_unsigned); | ||
1577 | 1606 | ||
1578 | PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned); | 1607 | PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned); |
1579 | PRINT_ATTRf(bp_type, p_unsigned); | 1608 | PRINT_ATTRf(bp_type, p_unsigned); |
@@ -1596,10 +1625,46 @@ static int __open_attr__fprintf(FILE *fp, const char *name, const char *val, | |||
1596 | return fprintf(fp, " %-32s %s\n", name, val); | 1625 | return fprintf(fp, " %-32s %s\n", name, val); |
1597 | } | 1626 | } |
1598 | 1627 | ||
1628 | static void perf_evsel__remove_fd(struct perf_evsel *pos, | ||
1629 | int nr_cpus, int nr_threads, | ||
1630 | int thread_idx) | ||
1631 | { | ||
1632 | for (int cpu = 0; cpu < nr_cpus; cpu++) | ||
1633 | for (int thread = thread_idx; thread < nr_threads - 1; thread++) | ||
1634 | FD(pos, cpu, thread) = FD(pos, cpu, thread + 1); | ||
1635 | } | ||
1636 | |||
1637 | static int update_fds(struct perf_evsel *evsel, | ||
1638 | int nr_cpus, int cpu_idx, | ||
1639 | int nr_threads, int thread_idx) | ||
1640 | { | ||
1641 | struct perf_evsel *pos; | ||
1642 | |||
1643 | if (cpu_idx >= nr_cpus || thread_idx >= nr_threads) | ||
1644 | return -EINVAL; | ||
1645 | |||
1646 | evlist__for_each_entry(evsel->evlist, pos) { | ||
1647 | nr_cpus = pos != evsel ? nr_cpus : cpu_idx; | ||
1648 | |||
1649 | perf_evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx); | ||
1650 | |||
1651 | /* | ||
1652 | * Since fds for next evsel has not been created, | ||
1653 | * there is no need to iterate whole event list. | ||
1654 | */ | ||
1655 | if (pos == evsel) | ||
1656 | break; | ||
1657 | } | ||
1658 | return 0; | ||
1659 | } | ||
1660 | |||
1599 | static bool ignore_missing_thread(struct perf_evsel *evsel, | 1661 | static bool ignore_missing_thread(struct perf_evsel *evsel, |
1662 | int nr_cpus, int cpu, | ||
1600 | struct thread_map *threads, | 1663 | struct thread_map *threads, |
1601 | int thread, int err) | 1664 | int thread, int err) |
1602 | { | 1665 | { |
1666 | pid_t ignore_pid = thread_map__pid(threads, thread); | ||
1667 | |||
1603 | if (!evsel->ignore_missing_thread) | 1668 | if (!evsel->ignore_missing_thread) |
1604 | return false; | 1669 | return false; |
1605 | 1670 | ||
@@ -1615,11 +1680,18 @@ static bool ignore_missing_thread(struct perf_evsel *evsel, | |||
1615 | if (threads->nr == 1) | 1680 | if (threads->nr == 1) |
1616 | return false; | 1681 | return false; |
1617 | 1682 | ||
1683 | /* | ||
1684 | * We should remove fd for missing_thread first | ||
1685 | * because thread_map__remove() will decrease threads->nr. | ||
1686 | */ | ||
1687 | if (update_fds(evsel, nr_cpus, cpu, threads->nr, thread)) | ||
1688 | return false; | ||
1689 | |||
1618 | if (thread_map__remove(threads, thread)) | 1690 | if (thread_map__remove(threads, thread)) |
1619 | return false; | 1691 | return false; |
1620 | 1692 | ||
1621 | pr_warning("WARNING: Ignored open failure for pid %d\n", | 1693 | pr_warning("WARNING: Ignored open failure for pid %d\n", |
1622 | thread_map__pid(threads, thread)); | 1694 | ignore_pid); |
1623 | return true; | 1695 | return true; |
1624 | } | 1696 | } |
1625 | 1697 | ||
@@ -1724,7 +1796,7 @@ retry_open: | |||
1724 | if (fd < 0) { | 1796 | if (fd < 0) { |
1725 | err = -errno; | 1797 | err = -errno; |
1726 | 1798 | ||
1727 | if (ignore_missing_thread(evsel, threads, thread, err)) { | 1799 | if (ignore_missing_thread(evsel, cpus->nr, cpu, threads, thread, err)) { |
1728 | /* | 1800 | /* |
1729 | * We just removed 1 thread, so take a step | 1801 | * We just removed 1 thread, so take a step |
1730 | * back on thread index and lower the upper | 1802 | * back on thread index and lower the upper |
@@ -1960,6 +2032,20 @@ static inline bool overflow(const void *endp, u16 max_size, const void *offset, | |||
1960 | #define OVERFLOW_CHECK_u64(offset) \ | 2032 | #define OVERFLOW_CHECK_u64(offset) \ |
1961 | OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64)) | 2033 | OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64)) |
1962 | 2034 | ||
2035 | static int | ||
2036 | perf_event__check_size(union perf_event *event, unsigned int sample_size) | ||
2037 | { | ||
2038 | /* | ||
2039 | * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes | ||
2040 | * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to | ||
2041 | * check the format does not go past the end of the event. | ||
2042 | */ | ||
2043 | if (sample_size + sizeof(event->header) > event->header.size) | ||
2044 | return -EFAULT; | ||
2045 | |||
2046 | return 0; | ||
2047 | } | ||
2048 | |||
1963 | int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | 2049 | int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, |
1964 | struct perf_sample *data) | 2050 | struct perf_sample *data) |
1965 | { | 2051 | { |
@@ -1981,6 +2067,9 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | |||
1981 | data->stream_id = data->id = data->time = -1ULL; | 2067 | data->stream_id = data->id = data->time = -1ULL; |
1982 | data->period = evsel->attr.sample_period; | 2068 | data->period = evsel->attr.sample_period; |
1983 | data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; | 2069 | data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; |
2070 | data->misc = event->header.misc; | ||
2071 | data->id = -1ULL; | ||
2072 | data->data_src = PERF_MEM_DATA_SRC_NONE; | ||
1984 | 2073 | ||
1985 | if (event->header.type != PERF_RECORD_SAMPLE) { | 2074 | if (event->header.type != PERF_RECORD_SAMPLE) { |
1986 | if (!evsel->attr.sample_id_all) | 2075 | if (!evsel->attr.sample_id_all) |
@@ -1990,15 +2079,9 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | |||
1990 | 2079 | ||
1991 | array = event->sample.array; | 2080 | array = event->sample.array; |
1992 | 2081 | ||
1993 | /* | 2082 | if (perf_event__check_size(event, evsel->sample_size)) |
1994 | * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes | ||
1995 | * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to | ||
1996 | * check the format does not go past the end of the event. | ||
1997 | */ | ||
1998 | if (evsel->sample_size + sizeof(event->header) > event->header.size) | ||
1999 | return -EFAULT; | 2083 | return -EFAULT; |
2000 | 2084 | ||
2001 | data->id = -1ULL; | ||
2002 | if (type & PERF_SAMPLE_IDENTIFIER) { | 2085 | if (type & PERF_SAMPLE_IDENTIFIER) { |
2003 | data->id = *array; | 2086 | data->id = *array; |
2004 | array++; | 2087 | array++; |
@@ -2028,7 +2111,6 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | |||
2028 | array++; | 2111 | array++; |
2029 | } | 2112 | } |
2030 | 2113 | ||
2031 | data->addr = 0; | ||
2032 | if (type & PERF_SAMPLE_ADDR) { | 2114 | if (type & PERF_SAMPLE_ADDR) { |
2033 | data->addr = *array; | 2115 | data->addr = *array; |
2034 | array++; | 2116 | array++; |
@@ -2120,14 +2202,27 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | |||
2120 | if (type & PERF_SAMPLE_RAW) { | 2202 | if (type & PERF_SAMPLE_RAW) { |
2121 | OVERFLOW_CHECK_u64(array); | 2203 | OVERFLOW_CHECK_u64(array); |
2122 | u.val64 = *array; | 2204 | u.val64 = *array; |
2123 | if (WARN_ONCE(swapped, | 2205 | |
2124 | "Endianness of raw data not corrected!\n")) { | 2206 | /* |
2125 | /* undo swap of u64, then swap on individual u32s */ | 2207 | * Undo swap of u64, then swap on individual u32s, |
2208 | * get the size of the raw area and undo all of the | ||
2209 | * swap. The pevent interface handles endianity by | ||
2210 | * itself. | ||
2211 | */ | ||
2212 | if (swapped) { | ||
2126 | u.val64 = bswap_64(u.val64); | 2213 | u.val64 = bswap_64(u.val64); |
2127 | u.val32[0] = bswap_32(u.val32[0]); | 2214 | u.val32[0] = bswap_32(u.val32[0]); |
2128 | u.val32[1] = bswap_32(u.val32[1]); | 2215 | u.val32[1] = bswap_32(u.val32[1]); |
2129 | } | 2216 | } |
2130 | data->raw_size = u.val32[0]; | 2217 | data->raw_size = u.val32[0]; |
2218 | |||
2219 | /* | ||
2220 | * The raw data is aligned on 64bits including the | ||
2221 | * u32 size, so it's safe to use mem_bswap_64. | ||
2222 | */ | ||
2223 | if (swapped) | ||
2224 | mem_bswap_64((void *) array, data->raw_size); | ||
2225 | |||
2131 | array = (void *)array + sizeof(u32); | 2226 | array = (void *)array + sizeof(u32); |
2132 | 2227 | ||
2133 | OVERFLOW_CHECK(array, data->raw_size, max_size); | 2228 | OVERFLOW_CHECK(array, data->raw_size, max_size); |
@@ -2192,14 +2287,12 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | |||
2192 | array++; | 2287 | array++; |
2193 | } | 2288 | } |
2194 | 2289 | ||
2195 | data->data_src = PERF_MEM_DATA_SRC_NONE; | ||
2196 | if (type & PERF_SAMPLE_DATA_SRC) { | 2290 | if (type & PERF_SAMPLE_DATA_SRC) { |
2197 | OVERFLOW_CHECK_u64(array); | 2291 | OVERFLOW_CHECK_u64(array); |
2198 | data->data_src = *array; | 2292 | data->data_src = *array; |
2199 | array++; | 2293 | array++; |
2200 | } | 2294 | } |
2201 | 2295 | ||
2202 | data->transaction = 0; | ||
2203 | if (type & PERF_SAMPLE_TRANSACTION) { | 2296 | if (type & PERF_SAMPLE_TRANSACTION) { |
2204 | OVERFLOW_CHECK_u64(array); | 2297 | OVERFLOW_CHECK_u64(array); |
2205 | data->transaction = *array; | 2298 | data->transaction = *array; |
@@ -2232,6 +2325,50 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | |||
2232 | return 0; | 2325 | return 0; |
2233 | } | 2326 | } |
2234 | 2327 | ||
2328 | int perf_evsel__parse_sample_timestamp(struct perf_evsel *evsel, | ||
2329 | union perf_event *event, | ||
2330 | u64 *timestamp) | ||
2331 | { | ||
2332 | u64 type = evsel->attr.sample_type; | ||
2333 | const u64 *array; | ||
2334 | |||
2335 | if (!(type & PERF_SAMPLE_TIME)) | ||
2336 | return -1; | ||
2337 | |||
2338 | if (event->header.type != PERF_RECORD_SAMPLE) { | ||
2339 | struct perf_sample data = { | ||
2340 | .time = -1ULL, | ||
2341 | }; | ||
2342 | |||
2343 | if (!evsel->attr.sample_id_all) | ||
2344 | return -1; | ||
2345 | if (perf_evsel__parse_id_sample(evsel, event, &data)) | ||
2346 | return -1; | ||
2347 | |||
2348 | *timestamp = data.time; | ||
2349 | return 0; | ||
2350 | } | ||
2351 | |||
2352 | array = event->sample.array; | ||
2353 | |||
2354 | if (perf_event__check_size(event, evsel->sample_size)) | ||
2355 | return -EFAULT; | ||
2356 | |||
2357 | if (type & PERF_SAMPLE_IDENTIFIER) | ||
2358 | array++; | ||
2359 | |||
2360 | if (type & PERF_SAMPLE_IP) | ||
2361 | array++; | ||
2362 | |||
2363 | if (type & PERF_SAMPLE_TID) | ||
2364 | array++; | ||
2365 | |||
2366 | if (type & PERF_SAMPLE_TIME) | ||
2367 | *timestamp = *array; | ||
2368 | |||
2369 | return 0; | ||
2370 | } | ||
2371 | |||
2235 | size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, | 2372 | size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, |
2236 | u64 read_format) | 2373 | u64 read_format) |
2237 | { | 2374 | { |
@@ -2342,8 +2479,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, | |||
2342 | 2479 | ||
2343 | int perf_event__synthesize_sample(union perf_event *event, u64 type, | 2480 | int perf_event__synthesize_sample(union perf_event *event, u64 type, |
2344 | u64 read_format, | 2481 | u64 read_format, |
2345 | const struct perf_sample *sample, | 2482 | const struct perf_sample *sample) |
2346 | bool swapped) | ||
2347 | { | 2483 | { |
2348 | u64 *array; | 2484 | u64 *array; |
2349 | size_t sz; | 2485 | size_t sz; |
@@ -2368,15 +2504,6 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, | |||
2368 | if (type & PERF_SAMPLE_TID) { | 2504 | if (type & PERF_SAMPLE_TID) { |
2369 | u.val32[0] = sample->pid; | 2505 | u.val32[0] = sample->pid; |
2370 | u.val32[1] = sample->tid; | 2506 | u.val32[1] = sample->tid; |
2371 | if (swapped) { | ||
2372 | /* | ||
2373 | * Inverse of what is done in perf_evsel__parse_sample | ||
2374 | */ | ||
2375 | u.val32[0] = bswap_32(u.val32[0]); | ||
2376 | u.val32[1] = bswap_32(u.val32[1]); | ||
2377 | u.val64 = bswap_64(u.val64); | ||
2378 | } | ||
2379 | |||
2380 | *array = u.val64; | 2507 | *array = u.val64; |
2381 | array++; | 2508 | array++; |
2382 | } | 2509 | } |
@@ -2403,13 +2530,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, | |||
2403 | 2530 | ||
2404 | if (type & PERF_SAMPLE_CPU) { | 2531 | if (type & PERF_SAMPLE_CPU) { |
2405 | u.val32[0] = sample->cpu; | 2532 | u.val32[0] = sample->cpu; |
2406 | if (swapped) { | 2533 | u.val32[1] = 0; |
2407 | /* | ||
2408 | * Inverse of what is done in perf_evsel__parse_sample | ||
2409 | */ | ||
2410 | u.val32[0] = bswap_32(u.val32[0]); | ||
2411 | u.val64 = bswap_64(u.val64); | ||
2412 | } | ||
2413 | *array = u.val64; | 2534 | *array = u.val64; |
2414 | array++; | 2535 | array++; |
2415 | } | 2536 | } |
@@ -2456,15 +2577,6 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, | |||
2456 | 2577 | ||
2457 | if (type & PERF_SAMPLE_RAW) { | 2578 | if (type & PERF_SAMPLE_RAW) { |
2458 | u.val32[0] = sample->raw_size; | 2579 | u.val32[0] = sample->raw_size; |
2459 | if (WARN_ONCE(swapped, | ||
2460 | "Endianness of raw data not corrected!\n")) { | ||
2461 | /* | ||
2462 | * Inverse of what is done in perf_evsel__parse_sample | ||
2463 | */ | ||
2464 | u.val32[0] = bswap_32(u.val32[0]); | ||
2465 | u.val32[1] = bswap_32(u.val32[1]); | ||
2466 | u.val64 = bswap_64(u.val64); | ||
2467 | } | ||
2468 | *array = u.val64; | 2580 | *array = u.val64; |
2469 | array = (void *)array + sizeof(u32); | 2581 | array = (void *)array + sizeof(u32); |
2470 | 2582 | ||
@@ -2743,8 +2855,9 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, | |||
2743 | break; | 2855 | break; |
2744 | case EOPNOTSUPP: | 2856 | case EOPNOTSUPP: |
2745 | if (evsel->attr.sample_period != 0) | 2857 | if (evsel->attr.sample_period != 0) |
2746 | return scnprintf(msg, size, "%s", | 2858 | return scnprintf(msg, size, |
2747 | "PMU Hardware doesn't support sampling/overflow-interrupts."); | 2859 | "%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'", |
2860 | perf_evsel__name(evsel)); | ||
2748 | if (evsel->attr.precise_ip) | 2861 | if (evsel->attr.precise_ip) |
2749 | return scnprintf(msg, size, "%s", | 2862 | return scnprintf(msg, size, "%s", |
2750 | "\'precise\' request may not be supported. Try removing 'p' modifier."); | 2863 | "\'precise\' request may not be supported. Try removing 'p' modifier."); |
@@ -2781,16 +2894,9 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, | |||
2781 | perf_evsel__name(evsel)); | 2894 | perf_evsel__name(evsel)); |
2782 | } | 2895 | } |
2783 | 2896 | ||
2784 | char *perf_evsel__env_arch(struct perf_evsel *evsel) | 2897 | struct perf_env *perf_evsel__env(struct perf_evsel *evsel) |
2785 | { | ||
2786 | if (evsel && evsel->evlist && evsel->evlist->env) | ||
2787 | return evsel->evlist->env->arch; | ||
2788 | return NULL; | ||
2789 | } | ||
2790 | |||
2791 | char *perf_evsel__env_cpuid(struct perf_evsel *evsel) | ||
2792 | { | 2898 | { |
2793 | if (evsel && evsel->evlist && evsel->evlist->env) | 2899 | if (evsel && evsel->evlist) |
2794 | return evsel->evlist->env->cpuid; | 2900 | return evsel->evlist->env; |
2795 | return NULL; | 2901 | return NULL; |
2796 | } | 2902 | } |