diff options
Diffstat (limited to 'tools/perf/util/evsel.c')
-rw-r--r-- | tools/perf/util/evsel.c | 227 |
1 files changed, 163 insertions, 64 deletions
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index d5fbcf8c7aa7..66fa45198a11 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include "debug.h" | 36 | #include "debug.h" |
37 | #include "trace-event.h" | 37 | #include "trace-event.h" |
38 | #include "stat.h" | 38 | #include "stat.h" |
39 | #include "memswap.h" | ||
39 | #include "util/parse-branch-options.h" | 40 | #include "util/parse-branch-options.h" |
40 | 41 | ||
41 | #include "sane_ctype.h" | 42 | #include "sane_ctype.h" |
@@ -650,9 +651,9 @@ int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size) | |||
650 | return ret; | 651 | return ret; |
651 | } | 652 | } |
652 | 653 | ||
653 | void perf_evsel__config_callchain(struct perf_evsel *evsel, | 654 | static void __perf_evsel__config_callchain(struct perf_evsel *evsel, |
654 | struct record_opts *opts, | 655 | struct record_opts *opts, |
655 | struct callchain_param *param) | 656 | struct callchain_param *param) |
656 | { | 657 | { |
657 | bool function = perf_evsel__is_function_event(evsel); | 658 | bool function = perf_evsel__is_function_event(evsel); |
658 | struct perf_event_attr *attr = &evsel->attr; | 659 | struct perf_event_attr *attr = &evsel->attr; |
@@ -698,6 +699,14 @@ void perf_evsel__config_callchain(struct perf_evsel *evsel, | |||
698 | } | 699 | } |
699 | } | 700 | } |
700 | 701 | ||
702 | void perf_evsel__config_callchain(struct perf_evsel *evsel, | ||
703 | struct record_opts *opts, | ||
704 | struct callchain_param *param) | ||
705 | { | ||
706 | if (param->enabled) | ||
707 | return __perf_evsel__config_callchain(evsel, opts, param); | ||
708 | } | ||
709 | |||
701 | static void | 710 | static void |
702 | perf_evsel__reset_callgraph(struct perf_evsel *evsel, | 711 | perf_evsel__reset_callgraph(struct perf_evsel *evsel, |
703 | struct callchain_param *param) | 712 | struct callchain_param *param) |
@@ -717,19 +726,19 @@ perf_evsel__reset_callgraph(struct perf_evsel *evsel, | |||
717 | } | 726 | } |
718 | 727 | ||
719 | static void apply_config_terms(struct perf_evsel *evsel, | 728 | static void apply_config_terms(struct perf_evsel *evsel, |
720 | struct record_opts *opts) | 729 | struct record_opts *opts, bool track) |
721 | { | 730 | { |
722 | struct perf_evsel_config_term *term; | 731 | struct perf_evsel_config_term *term; |
723 | struct list_head *config_terms = &evsel->config_terms; | 732 | struct list_head *config_terms = &evsel->config_terms; |
724 | struct perf_event_attr *attr = &evsel->attr; | 733 | struct perf_event_attr *attr = &evsel->attr; |
725 | struct callchain_param param; | 734 | /* callgraph default */ |
735 | struct callchain_param param = { | ||
736 | .record_mode = callchain_param.record_mode, | ||
737 | }; | ||
726 | u32 dump_size = 0; | 738 | u32 dump_size = 0; |
727 | int max_stack = 0; | 739 | int max_stack = 0; |
728 | const char *callgraph_buf = NULL; | 740 | const char *callgraph_buf = NULL; |
729 | 741 | ||
730 | /* callgraph default */ | ||
731 | param.record_mode = callchain_param.record_mode; | ||
732 | |||
733 | list_for_each_entry(term, config_terms, list) { | 742 | list_for_each_entry(term, config_terms, list) { |
734 | switch (term->type) { | 743 | switch (term->type) { |
735 | case PERF_EVSEL__CONFIG_TERM_PERIOD: | 744 | case PERF_EVSEL__CONFIG_TERM_PERIOD: |
@@ -779,6 +788,8 @@ static void apply_config_terms(struct perf_evsel *evsel, | |||
779 | case PERF_EVSEL__CONFIG_TERM_OVERWRITE: | 788 | case PERF_EVSEL__CONFIG_TERM_OVERWRITE: |
780 | attr->write_backward = term->val.overwrite ? 1 : 0; | 789 | attr->write_backward = term->val.overwrite ? 1 : 0; |
781 | break; | 790 | break; |
791 | case PERF_EVSEL__CONFIG_TERM_DRV_CFG: | ||
792 | break; | ||
782 | default: | 793 | default: |
783 | break; | 794 | break; |
784 | } | 795 | } |
@@ -786,6 +797,8 @@ static void apply_config_terms(struct perf_evsel *evsel, | |||
786 | 797 | ||
787 | /* User explicitly set per-event callgraph, clear the old setting and reset. */ | 798 | /* User explicitly set per-event callgraph, clear the old setting and reset. */ |
788 | if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) { | 799 | if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) { |
800 | bool sample_address = false; | ||
801 | |||
789 | if (max_stack) { | 802 | if (max_stack) { |
790 | param.max_stack = max_stack; | 803 | param.max_stack = max_stack; |
791 | if (callgraph_buf == NULL) | 804 | if (callgraph_buf == NULL) |
@@ -805,6 +818,8 @@ static void apply_config_terms(struct perf_evsel *evsel, | |||
805 | evsel->name); | 818 | evsel->name); |
806 | return; | 819 | return; |
807 | } | 820 | } |
821 | if (param.record_mode == CALLCHAIN_DWARF) | ||
822 | sample_address = true; | ||
808 | } | 823 | } |
809 | } | 824 | } |
810 | if (dump_size > 0) { | 825 | if (dump_size > 0) { |
@@ -817,8 +832,14 @@ static void apply_config_terms(struct perf_evsel *evsel, | |||
817 | perf_evsel__reset_callgraph(evsel, &callchain_param); | 832 | perf_evsel__reset_callgraph(evsel, &callchain_param); |
818 | 833 | ||
819 | /* set perf-event callgraph */ | 834 | /* set perf-event callgraph */ |
820 | if (param.enabled) | 835 | if (param.enabled) { |
836 | if (sample_address) { | ||
837 | perf_evsel__set_sample_bit(evsel, ADDR); | ||
838 | perf_evsel__set_sample_bit(evsel, DATA_SRC); | ||
839 | evsel->attr.mmap_data = track; | ||
840 | } | ||
821 | perf_evsel__config_callchain(evsel, opts, ¶m); | 841 | perf_evsel__config_callchain(evsel, opts, ¶m); |
842 | } | ||
822 | } | 843 | } |
823 | } | 844 | } |
824 | 845 | ||
@@ -1049,7 +1070,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts, | |||
1049 | * Apply event specific term settings, | 1070 | * Apply event specific term settings, |
1050 | * it overloads any global configuration. | 1071 | * it overloads any global configuration. |
1051 | */ | 1072 | */ |
1052 | apply_config_terms(evsel, opts); | 1073 | apply_config_terms(evsel, opts, track); |
1053 | 1074 | ||
1054 | evsel->ignore_missing_thread = opts->ignore_missing_thread; | 1075 | evsel->ignore_missing_thread = opts->ignore_missing_thread; |
1055 | } | 1076 | } |
@@ -1574,6 +1595,7 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr, | |||
1574 | PRINT_ATTRf(use_clockid, p_unsigned); | 1595 | PRINT_ATTRf(use_clockid, p_unsigned); |
1575 | PRINT_ATTRf(context_switch, p_unsigned); | 1596 | PRINT_ATTRf(context_switch, p_unsigned); |
1576 | PRINT_ATTRf(write_backward, p_unsigned); | 1597 | PRINT_ATTRf(write_backward, p_unsigned); |
1598 | PRINT_ATTRf(namespaces, p_unsigned); | ||
1577 | 1599 | ||
1578 | PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned); | 1600 | PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned); |
1579 | PRINT_ATTRf(bp_type, p_unsigned); | 1601 | PRINT_ATTRf(bp_type, p_unsigned); |
@@ -1596,10 +1618,46 @@ static int __open_attr__fprintf(FILE *fp, const char *name, const char *val, | |||
1596 | return fprintf(fp, " %-32s %s\n", name, val); | 1618 | return fprintf(fp, " %-32s %s\n", name, val); |
1597 | } | 1619 | } |
1598 | 1620 | ||
1621 | static void perf_evsel__remove_fd(struct perf_evsel *pos, | ||
1622 | int nr_cpus, int nr_threads, | ||
1623 | int thread_idx) | ||
1624 | { | ||
1625 | for (int cpu = 0; cpu < nr_cpus; cpu++) | ||
1626 | for (int thread = thread_idx; thread < nr_threads - 1; thread++) | ||
1627 | FD(pos, cpu, thread) = FD(pos, cpu, thread + 1); | ||
1628 | } | ||
1629 | |||
1630 | static int update_fds(struct perf_evsel *evsel, | ||
1631 | int nr_cpus, int cpu_idx, | ||
1632 | int nr_threads, int thread_idx) | ||
1633 | { | ||
1634 | struct perf_evsel *pos; | ||
1635 | |||
1636 | if (cpu_idx >= nr_cpus || thread_idx >= nr_threads) | ||
1637 | return -EINVAL; | ||
1638 | |||
1639 | evlist__for_each_entry(evsel->evlist, pos) { | ||
1640 | nr_cpus = pos != evsel ? nr_cpus : cpu_idx; | ||
1641 | |||
1642 | perf_evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx); | ||
1643 | |||
1644 | /* | ||
1645 | * Since fds for next evsel has not been created, | ||
1646 | * there is no need to iterate whole event list. | ||
1647 | */ | ||
1648 | if (pos == evsel) | ||
1649 | break; | ||
1650 | } | ||
1651 | return 0; | ||
1652 | } | ||
1653 | |||
1599 | static bool ignore_missing_thread(struct perf_evsel *evsel, | 1654 | static bool ignore_missing_thread(struct perf_evsel *evsel, |
1655 | int nr_cpus, int cpu, | ||
1600 | struct thread_map *threads, | 1656 | struct thread_map *threads, |
1601 | int thread, int err) | 1657 | int thread, int err) |
1602 | { | 1658 | { |
1659 | pid_t ignore_pid = thread_map__pid(threads, thread); | ||
1660 | |||
1603 | if (!evsel->ignore_missing_thread) | 1661 | if (!evsel->ignore_missing_thread) |
1604 | return false; | 1662 | return false; |
1605 | 1663 | ||
@@ -1615,11 +1673,18 @@ static bool ignore_missing_thread(struct perf_evsel *evsel, | |||
1615 | if (threads->nr == 1) | 1673 | if (threads->nr == 1) |
1616 | return false; | 1674 | return false; |
1617 | 1675 | ||
1676 | /* | ||
1677 | * We should remove fd for missing_thread first | ||
1678 | * because thread_map__remove() will decrease threads->nr. | ||
1679 | */ | ||
1680 | if (update_fds(evsel, nr_cpus, cpu, threads->nr, thread)) | ||
1681 | return false; | ||
1682 | |||
1618 | if (thread_map__remove(threads, thread)) | 1683 | if (thread_map__remove(threads, thread)) |
1619 | return false; | 1684 | return false; |
1620 | 1685 | ||
1621 | pr_warning("WARNING: Ignored open failure for pid %d\n", | 1686 | pr_warning("WARNING: Ignored open failure for pid %d\n", |
1622 | thread_map__pid(threads, thread)); | 1687 | ignore_pid); |
1623 | return true; | 1688 | return true; |
1624 | } | 1689 | } |
1625 | 1690 | ||
@@ -1724,7 +1789,7 @@ retry_open: | |||
1724 | if (fd < 0) { | 1789 | if (fd < 0) { |
1725 | err = -errno; | 1790 | err = -errno; |
1726 | 1791 | ||
1727 | if (ignore_missing_thread(evsel, threads, thread, err)) { | 1792 | if (ignore_missing_thread(evsel, cpus->nr, cpu, threads, thread, err)) { |
1728 | /* | 1793 | /* |
1729 | * We just removed 1 thread, so take a step | 1794 | * We just removed 1 thread, so take a step |
1730 | * back on thread index and lower the upper | 1795 | * back on thread index and lower the upper |
@@ -1960,6 +2025,20 @@ static inline bool overflow(const void *endp, u16 max_size, const void *offset, | |||
1960 | #define OVERFLOW_CHECK_u64(offset) \ | 2025 | #define OVERFLOW_CHECK_u64(offset) \ |
1961 | OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64)) | 2026 | OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64)) |
1962 | 2027 | ||
2028 | static int | ||
2029 | perf_event__check_size(union perf_event *event, unsigned int sample_size) | ||
2030 | { | ||
2031 | /* | ||
2032 | * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes | ||
2033 | * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to | ||
2034 | * check the format does not go past the end of the event. | ||
2035 | */ | ||
2036 | if (sample_size + sizeof(event->header) > event->header.size) | ||
2037 | return -EFAULT; | ||
2038 | |||
2039 | return 0; | ||
2040 | } | ||
2041 | |||
1963 | int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | 2042 | int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, |
1964 | struct perf_sample *data) | 2043 | struct perf_sample *data) |
1965 | { | 2044 | { |
@@ -1981,6 +2060,9 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | |||
1981 | data->stream_id = data->id = data->time = -1ULL; | 2060 | data->stream_id = data->id = data->time = -1ULL; |
1982 | data->period = evsel->attr.sample_period; | 2061 | data->period = evsel->attr.sample_period; |
1983 | data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; | 2062 | data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; |
2063 | data->misc = event->header.misc; | ||
2064 | data->id = -1ULL; | ||
2065 | data->data_src = PERF_MEM_DATA_SRC_NONE; | ||
1984 | 2066 | ||
1985 | if (event->header.type != PERF_RECORD_SAMPLE) { | 2067 | if (event->header.type != PERF_RECORD_SAMPLE) { |
1986 | if (!evsel->attr.sample_id_all) | 2068 | if (!evsel->attr.sample_id_all) |
@@ -1990,15 +2072,9 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | |||
1990 | 2072 | ||
1991 | array = event->sample.array; | 2073 | array = event->sample.array; |
1992 | 2074 | ||
1993 | /* | 2075 | if (perf_event__check_size(event, evsel->sample_size)) |
1994 | * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes | ||
1995 | * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to | ||
1996 | * check the format does not go past the end of the event. | ||
1997 | */ | ||
1998 | if (evsel->sample_size + sizeof(event->header) > event->header.size) | ||
1999 | return -EFAULT; | 2076 | return -EFAULT; |
2000 | 2077 | ||
2001 | data->id = -1ULL; | ||
2002 | if (type & PERF_SAMPLE_IDENTIFIER) { | 2078 | if (type & PERF_SAMPLE_IDENTIFIER) { |
2003 | data->id = *array; | 2079 | data->id = *array; |
2004 | array++; | 2080 | array++; |
@@ -2028,7 +2104,6 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | |||
2028 | array++; | 2104 | array++; |
2029 | } | 2105 | } |
2030 | 2106 | ||
2031 | data->addr = 0; | ||
2032 | if (type & PERF_SAMPLE_ADDR) { | 2107 | if (type & PERF_SAMPLE_ADDR) { |
2033 | data->addr = *array; | 2108 | data->addr = *array; |
2034 | array++; | 2109 | array++; |
@@ -2120,14 +2195,27 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | |||
2120 | if (type & PERF_SAMPLE_RAW) { | 2195 | if (type & PERF_SAMPLE_RAW) { |
2121 | OVERFLOW_CHECK_u64(array); | 2196 | OVERFLOW_CHECK_u64(array); |
2122 | u.val64 = *array; | 2197 | u.val64 = *array; |
2123 | if (WARN_ONCE(swapped, | 2198 | |
2124 | "Endianness of raw data not corrected!\n")) { | 2199 | /* |
2125 | /* undo swap of u64, then swap on individual u32s */ | 2200 | * Undo swap of u64, then swap on individual u32s, |
2201 | * get the size of the raw area and undo all of the | ||
2202 | * swap. The pevent interface handles endianity by | ||
2203 | * itself. | ||
2204 | */ | ||
2205 | if (swapped) { | ||
2126 | u.val64 = bswap_64(u.val64); | 2206 | u.val64 = bswap_64(u.val64); |
2127 | u.val32[0] = bswap_32(u.val32[0]); | 2207 | u.val32[0] = bswap_32(u.val32[0]); |
2128 | u.val32[1] = bswap_32(u.val32[1]); | 2208 | u.val32[1] = bswap_32(u.val32[1]); |
2129 | } | 2209 | } |
2130 | data->raw_size = u.val32[0]; | 2210 | data->raw_size = u.val32[0]; |
2211 | |||
2212 | /* | ||
2213 | * The raw data is aligned on 64bits including the | ||
2214 | * u32 size, so it's safe to use mem_bswap_64. | ||
2215 | */ | ||
2216 | if (swapped) | ||
2217 | mem_bswap_64((void *) array, data->raw_size); | ||
2218 | |||
2131 | array = (void *)array + sizeof(u32); | 2219 | array = (void *)array + sizeof(u32); |
2132 | 2220 | ||
2133 | OVERFLOW_CHECK(array, data->raw_size, max_size); | 2221 | OVERFLOW_CHECK(array, data->raw_size, max_size); |
@@ -2192,14 +2280,12 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | |||
2192 | array++; | 2280 | array++; |
2193 | } | 2281 | } |
2194 | 2282 | ||
2195 | data->data_src = PERF_MEM_DATA_SRC_NONE; | ||
2196 | if (type & PERF_SAMPLE_DATA_SRC) { | 2283 | if (type & PERF_SAMPLE_DATA_SRC) { |
2197 | OVERFLOW_CHECK_u64(array); | 2284 | OVERFLOW_CHECK_u64(array); |
2198 | data->data_src = *array; | 2285 | data->data_src = *array; |
2199 | array++; | 2286 | array++; |
2200 | } | 2287 | } |
2201 | 2288 | ||
2202 | data->transaction = 0; | ||
2203 | if (type & PERF_SAMPLE_TRANSACTION) { | 2289 | if (type & PERF_SAMPLE_TRANSACTION) { |
2204 | OVERFLOW_CHECK_u64(array); | 2290 | OVERFLOW_CHECK_u64(array); |
2205 | data->transaction = *array; | 2291 | data->transaction = *array; |
@@ -2232,6 +2318,50 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, | |||
2232 | return 0; | 2318 | return 0; |
2233 | } | 2319 | } |
2234 | 2320 | ||
2321 | int perf_evsel__parse_sample_timestamp(struct perf_evsel *evsel, | ||
2322 | union perf_event *event, | ||
2323 | u64 *timestamp) | ||
2324 | { | ||
2325 | u64 type = evsel->attr.sample_type; | ||
2326 | const u64 *array; | ||
2327 | |||
2328 | if (!(type & PERF_SAMPLE_TIME)) | ||
2329 | return -1; | ||
2330 | |||
2331 | if (event->header.type != PERF_RECORD_SAMPLE) { | ||
2332 | struct perf_sample data = { | ||
2333 | .time = -1ULL, | ||
2334 | }; | ||
2335 | |||
2336 | if (!evsel->attr.sample_id_all) | ||
2337 | return -1; | ||
2338 | if (perf_evsel__parse_id_sample(evsel, event, &data)) | ||
2339 | return -1; | ||
2340 | |||
2341 | *timestamp = data.time; | ||
2342 | return 0; | ||
2343 | } | ||
2344 | |||
2345 | array = event->sample.array; | ||
2346 | |||
2347 | if (perf_event__check_size(event, evsel->sample_size)) | ||
2348 | return -EFAULT; | ||
2349 | |||
2350 | if (type & PERF_SAMPLE_IDENTIFIER) | ||
2351 | array++; | ||
2352 | |||
2353 | if (type & PERF_SAMPLE_IP) | ||
2354 | array++; | ||
2355 | |||
2356 | if (type & PERF_SAMPLE_TID) | ||
2357 | array++; | ||
2358 | |||
2359 | if (type & PERF_SAMPLE_TIME) | ||
2360 | *timestamp = *array; | ||
2361 | |||
2362 | return 0; | ||
2363 | } | ||
2364 | |||
2235 | size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, | 2365 | size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, |
2236 | u64 read_format) | 2366 | u64 read_format) |
2237 | { | 2367 | { |
@@ -2342,8 +2472,7 @@ size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, | |||
2342 | 2472 | ||
2343 | int perf_event__synthesize_sample(union perf_event *event, u64 type, | 2473 | int perf_event__synthesize_sample(union perf_event *event, u64 type, |
2344 | u64 read_format, | 2474 | u64 read_format, |
2345 | const struct perf_sample *sample, | 2475 | const struct perf_sample *sample) |
2346 | bool swapped) | ||
2347 | { | 2476 | { |
2348 | u64 *array; | 2477 | u64 *array; |
2349 | size_t sz; | 2478 | size_t sz; |
@@ -2368,15 +2497,6 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, | |||
2368 | if (type & PERF_SAMPLE_TID) { | 2497 | if (type & PERF_SAMPLE_TID) { |
2369 | u.val32[0] = sample->pid; | 2498 | u.val32[0] = sample->pid; |
2370 | u.val32[1] = sample->tid; | 2499 | u.val32[1] = sample->tid; |
2371 | if (swapped) { | ||
2372 | /* | ||
2373 | * Inverse of what is done in perf_evsel__parse_sample | ||
2374 | */ | ||
2375 | u.val32[0] = bswap_32(u.val32[0]); | ||
2376 | u.val32[1] = bswap_32(u.val32[1]); | ||
2377 | u.val64 = bswap_64(u.val64); | ||
2378 | } | ||
2379 | |||
2380 | *array = u.val64; | 2500 | *array = u.val64; |
2381 | array++; | 2501 | array++; |
2382 | } | 2502 | } |
@@ -2403,13 +2523,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, | |||
2403 | 2523 | ||
2404 | if (type & PERF_SAMPLE_CPU) { | 2524 | if (type & PERF_SAMPLE_CPU) { |
2405 | u.val32[0] = sample->cpu; | 2525 | u.val32[0] = sample->cpu; |
2406 | if (swapped) { | 2526 | u.val32[1] = 0; |
2407 | /* | ||
2408 | * Inverse of what is done in perf_evsel__parse_sample | ||
2409 | */ | ||
2410 | u.val32[0] = bswap_32(u.val32[0]); | ||
2411 | u.val64 = bswap_64(u.val64); | ||
2412 | } | ||
2413 | *array = u.val64; | 2527 | *array = u.val64; |
2414 | array++; | 2528 | array++; |
2415 | } | 2529 | } |
@@ -2456,15 +2570,6 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, | |||
2456 | 2570 | ||
2457 | if (type & PERF_SAMPLE_RAW) { | 2571 | if (type & PERF_SAMPLE_RAW) { |
2458 | u.val32[0] = sample->raw_size; | 2572 | u.val32[0] = sample->raw_size; |
2459 | if (WARN_ONCE(swapped, | ||
2460 | "Endianness of raw data not corrected!\n")) { | ||
2461 | /* | ||
2462 | * Inverse of what is done in perf_evsel__parse_sample | ||
2463 | */ | ||
2464 | u.val32[0] = bswap_32(u.val32[0]); | ||
2465 | u.val32[1] = bswap_32(u.val32[1]); | ||
2466 | u.val64 = bswap_64(u.val64); | ||
2467 | } | ||
2468 | *array = u.val64; | 2573 | *array = u.val64; |
2469 | array = (void *)array + sizeof(u32); | 2574 | array = (void *)array + sizeof(u32); |
2470 | 2575 | ||
@@ -2743,8 +2848,9 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, | |||
2743 | break; | 2848 | break; |
2744 | case EOPNOTSUPP: | 2849 | case EOPNOTSUPP: |
2745 | if (evsel->attr.sample_period != 0) | 2850 | if (evsel->attr.sample_period != 0) |
2746 | return scnprintf(msg, size, "%s", | 2851 | return scnprintf(msg, size, |
2747 | "PMU Hardware doesn't support sampling/overflow-interrupts."); | 2852 | "%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'", |
2853 | perf_evsel__name(evsel)); | ||
2748 | if (evsel->attr.precise_ip) | 2854 | if (evsel->attr.precise_ip) |
2749 | return scnprintf(msg, size, "%s", | 2855 | return scnprintf(msg, size, "%s", |
2750 | "\'precise\' request may not be supported. Try removing 'p' modifier."); | 2856 | "\'precise\' request may not be supported. Try removing 'p' modifier."); |
@@ -2781,16 +2887,9 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, | |||
2781 | perf_evsel__name(evsel)); | 2887 | perf_evsel__name(evsel)); |
2782 | } | 2888 | } |
2783 | 2889 | ||
2784 | char *perf_evsel__env_arch(struct perf_evsel *evsel) | 2890 | struct perf_env *perf_evsel__env(struct perf_evsel *evsel) |
2785 | { | ||
2786 | if (evsel && evsel->evlist && evsel->evlist->env) | ||
2787 | return evsel->evlist->env->arch; | ||
2788 | return NULL; | ||
2789 | } | ||
2790 | |||
2791 | char *perf_evsel__env_cpuid(struct perf_evsel *evsel) | ||
2792 | { | 2891 | { |
2793 | if (evsel && evsel->evlist && evsel->evlist->env) | 2892 | if (evsel && evsel->evlist) |
2794 | return evsel->evlist->env->cpuid; | 2893 | return evsel->evlist->env; |
2795 | return NULL; | 2894 | return NULL; |
2796 | } | 2895 | } |