diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-04-03 08:22:12 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-04-03 08:22:12 -0400 |
commit | 4c3b73c6a2eea0cf27fb058086a0eb94e90bffd4 (patch) | |
tree | 2add808dc63a3f6ec461a95af7ddc6e7a117a331 | |
parent | 7b367f5dba5c5162a7308e85d3fc9170b0cb3e5f (diff) | |
parent | 85dc600263c2291cea33bffa90038808ee64198b (diff) |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar:
"Misc kernel side fixes:
- fix event leak
- fix AMD PMU driver bug
- fix core event handling bug
- fix build bug on certain randconfigs
Plus misc tooling fixes"
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf/x86/amd/ibs: Fix pmu::stop() nesting
perf/core: Don't leak event in the syscall error path
perf/core: Fix time tracking bug with multiplexing
perf jit: genelf makes assumptions about endian
perf hists: Fix determination of a callchain node's childlessness
perf tools: Add missing initialization of perf_sample.cpumode in synthesized samples
perf tools: Fix build break on powerpc
perf/x86: Move events_sysfs_show() outside CPU_SUP_INTEL
perf bench: Fix detached tarball building due to missing 'perf bench memcpy' headers
perf tests: Fix tarpkg build test error output redirection
-rw-r--r-- | arch/x86/events/amd/ibs.c | 52 | ||||
-rw-r--r-- | arch/x86/events/perf_event.h | 6 | ||||
-rw-r--r-- | kernel/events/core.c | 15 | ||||
-rw-r--r-- | tools/perf/MANIFEST | 1 | ||||
-rw-r--r-- | tools/perf/arch/powerpc/util/header.c | 2 | ||||
-rwxr-xr-x | tools/perf/tests/perf-targz-src-pkg | 2 | ||||
-rw-r--r-- | tools/perf/ui/browsers/hists.c | 2 | ||||
-rw-r--r-- | tools/perf/util/event.c | 23 | ||||
-rw-r--r-- | tools/perf/util/genelf.h | 24 | ||||
-rw-r--r-- | tools/perf/util/intel-bts.c | 1 | ||||
-rw-r--r-- | tools/perf/util/intel-pt.c | 3 | ||||
-rw-r--r-- | tools/perf/util/jitdump.c | 2 |
12 files changed, 98 insertions, 35 deletions
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index 3ea25c3917c0..feb90f6730e8 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c | |||
@@ -28,10 +28,46 @@ static u32 ibs_caps; | |||
28 | #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT) | 28 | #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT) |
29 | #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT | 29 | #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT |
30 | 30 | ||
31 | |||
32 | /* | ||
33 | * IBS states: | ||
34 | * | ||
35 | * ENABLED; tracks the pmu::add(), pmu::del() state, when set the counter is taken | ||
36 | * and any further add()s must fail. | ||
37 | * | ||
38 | * STARTED/STOPPING/STOPPED; deal with pmu::start(), pmu::stop() state but are | ||
39 | * complicated by the fact that the IBS hardware can send late NMIs (ie. after | ||
40 | * we've cleared the EN bit). | ||
41 | * | ||
42 | * In order to consume these late NMIs we have the STOPPED state, any NMI that | ||
43 | * happens after we've cleared the EN state will clear this bit and report the | ||
44 | * NMI handled (this is fundamentally racy in the face or multiple NMI sources, | ||
45 | * someone else can consume our BIT and our NMI will go unhandled). | ||
46 | * | ||
47 | * And since we cannot set/clear this separate bit together with the EN bit, | ||
48 | * there are races; if we cleared STARTED early, an NMI could land in | ||
49 | * between clearing STARTED and clearing the EN bit (in fact multiple NMIs | ||
50 | * could happen if the period is small enough), and consume our STOPPED bit | ||
51 | * and trigger streams of unhandled NMIs. | ||
52 | * | ||
53 | * If, however, we clear STARTED late, an NMI can hit between clearing the | ||
54 | * EN bit and clearing STARTED, still see STARTED set and process the event. | ||
55 | * If this event will have the VALID bit clear, we bail properly, but this | ||
56 | * is not a given. With VALID set we can end up calling pmu::stop() again | ||
57 | * (the throttle logic) and trigger the WARNs in there. | ||
58 | * | ||
59 | * So what we do is set STOPPING before clearing EN to avoid the pmu::stop() | ||
60 | * nesting, and clear STARTED late, so that we have a well defined state over | ||
61 | * the clearing of the EN bit. | ||
62 | * | ||
63 | * XXX: we could probably be using !atomic bitops for all this. | ||
64 | */ | ||
65 | |||
31 | enum ibs_states { | 66 | enum ibs_states { |
32 | IBS_ENABLED = 0, | 67 | IBS_ENABLED = 0, |
33 | IBS_STARTED = 1, | 68 | IBS_STARTED = 1, |
34 | IBS_STOPPING = 2, | 69 | IBS_STOPPING = 2, |
70 | IBS_STOPPED = 3, | ||
35 | 71 | ||
36 | IBS_MAX_STATES, | 72 | IBS_MAX_STATES, |
37 | }; | 73 | }; |
@@ -377,11 +413,10 @@ static void perf_ibs_start(struct perf_event *event, int flags) | |||
377 | 413 | ||
378 | perf_ibs_set_period(perf_ibs, hwc, &period); | 414 | perf_ibs_set_period(perf_ibs, hwc, &period); |
379 | /* | 415 | /* |
380 | * Set STARTED before enabling the hardware, such that | 416 | * Set STARTED before enabling the hardware, such that a subsequent NMI |
381 | * a subsequent NMI must observe it. Then clear STOPPING | 417 | * must observe it. |
382 | * such that we don't consume NMIs by accident. | ||
383 | */ | 418 | */ |
384 | set_bit(IBS_STARTED, pcpu->state); | 419 | set_bit(IBS_STARTED, pcpu->state); |
385 | clear_bit(IBS_STOPPING, pcpu->state); | 420 | clear_bit(IBS_STOPPING, pcpu->state); |
386 | perf_ibs_enable_event(perf_ibs, hwc, period >> 4); | 421 | perf_ibs_enable_event(perf_ibs, hwc, period >> 4); |
387 | 422 | ||
@@ -396,6 +431,9 @@ static void perf_ibs_stop(struct perf_event *event, int flags) | |||
396 | u64 config; | 431 | u64 config; |
397 | int stopping; | 432 | int stopping; |
398 | 433 | ||
434 | if (test_and_set_bit(IBS_STOPPING, pcpu->state)) | ||
435 | return; | ||
436 | |||
399 | stopping = test_bit(IBS_STARTED, pcpu->state); | 437 | stopping = test_bit(IBS_STARTED, pcpu->state); |
400 | 438 | ||
401 | if (!stopping && (hwc->state & PERF_HES_UPTODATE)) | 439 | if (!stopping && (hwc->state & PERF_HES_UPTODATE)) |
@@ -405,12 +443,12 @@ static void perf_ibs_stop(struct perf_event *event, int flags) | |||
405 | 443 | ||
406 | if (stopping) { | 444 | if (stopping) { |
407 | /* | 445 | /* |
408 | * Set STOPPING before disabling the hardware, such that it | 446 | * Set STOPPED before disabling the hardware, such that it |
409 | * must be visible to NMIs the moment we clear the EN bit, | 447 | * must be visible to NMIs the moment we clear the EN bit, |
410 | * at which point we can generate an !VALID sample which | 448 | * at which point we can generate an !VALID sample which |
411 | * we need to consume. | 449 | * we need to consume. |
412 | */ | 450 | */ |
413 | set_bit(IBS_STOPPING, pcpu->state); | 451 | set_bit(IBS_STOPPED, pcpu->state); |
414 | perf_ibs_disable_event(perf_ibs, hwc, config); | 452 | perf_ibs_disable_event(perf_ibs, hwc, config); |
415 | /* | 453 | /* |
416 | * Clear STARTED after disabling the hardware; if it were | 454 | * Clear STARTED after disabling the hardware; if it were |
@@ -556,7 +594,7 @@ fail: | |||
556 | * with samples that even have the valid bit cleared. | 594 | * with samples that even have the valid bit cleared. |
557 | * Mark all this NMIs as handled. | 595 | * Mark all this NMIs as handled. |
558 | */ | 596 | */ |
559 | if (test_and_clear_bit(IBS_STOPPING, pcpu->state)) | 597 | if (test_and_clear_bit(IBS_STOPPED, pcpu->state)) |
560 | return 1; | 598 | return 1; |
561 | 599 | ||
562 | return 0; | 600 | return 0; |
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 716d0482f5db..ad4dc7ffffb5 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h | |||
@@ -800,6 +800,9 @@ ssize_t intel_event_sysfs_show(char *page, u64 config); | |||
800 | 800 | ||
801 | struct attribute **merge_attr(struct attribute **a, struct attribute **b); | 801 | struct attribute **merge_attr(struct attribute **a, struct attribute **b); |
802 | 802 | ||
803 | ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, | ||
804 | char *page); | ||
805 | |||
803 | #ifdef CONFIG_CPU_SUP_AMD | 806 | #ifdef CONFIG_CPU_SUP_AMD |
804 | 807 | ||
805 | int amd_pmu_init(void); | 808 | int amd_pmu_init(void); |
@@ -930,9 +933,6 @@ int p6_pmu_init(void); | |||
930 | 933 | ||
931 | int knc_pmu_init(void); | 934 | int knc_pmu_init(void); |
932 | 935 | ||
933 | ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, | ||
934 | char *page); | ||
935 | |||
936 | static inline int is_ht_workaround_enabled(void) | 936 | static inline int is_ht_workaround_enabled(void) |
937 | { | 937 | { |
938 | return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED); | 938 | return !!(x86_pmu.flags & PMU_FL_EXCL_ENABLED); |
diff --git a/kernel/events/core.c b/kernel/events/core.c index de24fbce5277..52bedc5a5aaa 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -2417,14 +2417,24 @@ static void ctx_sched_out(struct perf_event_context *ctx, | |||
2417 | cpuctx->task_ctx = NULL; | 2417 | cpuctx->task_ctx = NULL; |
2418 | } | 2418 | } |
2419 | 2419 | ||
2420 | is_active ^= ctx->is_active; /* changed bits */ | 2420 | /* |
2421 | 2421 | * Always update time if it was set; not only when it changes. | |
2422 | * Otherwise we can 'forget' to update time for any but the last | ||
2423 | * context we sched out. For example: | ||
2424 | * | ||
2425 | * ctx_sched_out(.event_type = EVENT_FLEXIBLE) | ||
2426 | * ctx_sched_out(.event_type = EVENT_PINNED) | ||
2427 | * | ||
2428 | * would only update time for the pinned events. | ||
2429 | */ | ||
2422 | if (is_active & EVENT_TIME) { | 2430 | if (is_active & EVENT_TIME) { |
2423 | /* update (and stop) ctx time */ | 2431 | /* update (and stop) ctx time */ |
2424 | update_context_time(ctx); | 2432 | update_context_time(ctx); |
2425 | update_cgrp_time_from_cpuctx(cpuctx); | 2433 | update_cgrp_time_from_cpuctx(cpuctx); |
2426 | } | 2434 | } |
2427 | 2435 | ||
2436 | is_active ^= ctx->is_active; /* changed bits */ | ||
2437 | |||
2428 | if (!ctx->nr_active || !(is_active & EVENT_ALL)) | 2438 | if (!ctx->nr_active || !(is_active & EVENT_ALL)) |
2429 | return; | 2439 | return; |
2430 | 2440 | ||
@@ -8532,6 +8542,7 @@ SYSCALL_DEFINE5(perf_event_open, | |||
8532 | f_flags); | 8542 | f_flags); |
8533 | if (IS_ERR(event_file)) { | 8543 | if (IS_ERR(event_file)) { |
8534 | err = PTR_ERR(event_file); | 8544 | err = PTR_ERR(event_file); |
8545 | event_file = NULL; | ||
8535 | goto err_context; | 8546 | goto err_context; |
8536 | } | 8547 | } |
8537 | 8548 | ||
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST index 2e1fa2357528..8c8c6b9ce915 100644 --- a/tools/perf/MANIFEST +++ b/tools/perf/MANIFEST | |||
@@ -74,6 +74,7 @@ arch/*/include/uapi/asm/unistd*.h | |||
74 | arch/*/include/uapi/asm/perf_regs.h | 74 | arch/*/include/uapi/asm/perf_regs.h |
75 | arch/*/lib/memcpy*.S | 75 | arch/*/lib/memcpy*.S |
76 | arch/*/lib/memset*.S | 76 | arch/*/lib/memset*.S |
77 | arch/*/include/asm/*features.h | ||
77 | include/linux/poison.h | 78 | include/linux/poison.h |
78 | include/linux/hw_breakpoint.h | 79 | include/linux/hw_breakpoint.h |
79 | include/uapi/linux/perf_event.h | 80 | include/uapi/linux/perf_event.h |
diff --git a/tools/perf/arch/powerpc/util/header.c b/tools/perf/arch/powerpc/util/header.c index 6138bdef6e63..f8ccee132867 100644 --- a/tools/perf/arch/powerpc/util/header.c +++ b/tools/perf/arch/powerpc/util/header.c | |||
@@ -4,6 +4,8 @@ | |||
4 | #include <stdlib.h> | 4 | #include <stdlib.h> |
5 | #include <string.h> | 5 | #include <string.h> |
6 | #include <linux/stringify.h> | 6 | #include <linux/stringify.h> |
7 | #include "header.h" | ||
8 | #include "util.h" | ||
7 | 9 | ||
8 | #define mfspr(rn) ({unsigned long rval; \ | 10 | #define mfspr(rn) ({unsigned long rval; \ |
9 | asm volatile("mfspr %0," __stringify(rn) \ | 11 | asm volatile("mfspr %0," __stringify(rn) \ |
diff --git a/tools/perf/tests/perf-targz-src-pkg b/tools/perf/tests/perf-targz-src-pkg index 238aa3927c71..f2d9c5fe58e0 100755 --- a/tools/perf/tests/perf-targz-src-pkg +++ b/tools/perf/tests/perf-targz-src-pkg | |||
@@ -15,7 +15,7 @@ TMP_DEST=$(mktemp -d) | |||
15 | tar xf ${TARBALL} -C $TMP_DEST | 15 | tar xf ${TARBALL} -C $TMP_DEST |
16 | rm -f ${TARBALL} | 16 | rm -f ${TARBALL} |
17 | cd - > /dev/null | 17 | cd - > /dev/null |
18 | make -C $TMP_DEST/perf*/tools/perf > /dev/null 2>&1 | 18 | make -C $TMP_DEST/perf*/tools/perf > /dev/null |
19 | RC=$? | 19 | RC=$? |
20 | rm -rf ${TMP_DEST} | 20 | rm -rf ${TMP_DEST} |
21 | exit $RC | 21 | exit $RC |
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 4b9816555946..2a83414159a6 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c | |||
@@ -337,7 +337,7 @@ static void callchain_node__init_have_children(struct callchain_node *node, | |||
337 | chain = list_entry(node->val.next, struct callchain_list, list); | 337 | chain = list_entry(node->val.next, struct callchain_list, list); |
338 | chain->has_children = has_sibling; | 338 | chain->has_children = has_sibling; |
339 | 339 | ||
340 | if (node->val.next != node->val.prev) { | 340 | if (!list_empty(&node->val)) { |
341 | chain = list_entry(node->val.prev, struct callchain_list, list); | 341 | chain = list_entry(node->val.prev, struct callchain_list, list); |
342 | chain->has_children = !RB_EMPTY_ROOT(&node->rb_root); | 342 | chain->has_children = !RB_EMPTY_ROOT(&node->rb_root); |
343 | } | 343 | } |
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 52cf479bc593..dad55d04ffdd 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c | |||
@@ -56,13 +56,22 @@ const char *perf_event__name(unsigned int id) | |||
56 | return perf_event__names[id]; | 56 | return perf_event__names[id]; |
57 | } | 57 | } |
58 | 58 | ||
59 | static struct perf_sample synth_sample = { | 59 | static int perf_tool__process_synth_event(struct perf_tool *tool, |
60 | union perf_event *event, | ||
61 | struct machine *machine, | ||
62 | perf_event__handler_t process) | ||
63 | { | ||
64 | struct perf_sample synth_sample = { | ||
60 | .pid = -1, | 65 | .pid = -1, |
61 | .tid = -1, | 66 | .tid = -1, |
62 | .time = -1, | 67 | .time = -1, |
63 | .stream_id = -1, | 68 | .stream_id = -1, |
64 | .cpu = -1, | 69 | .cpu = -1, |
65 | .period = 1, | 70 | .period = 1, |
71 | .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK, | ||
72 | }; | ||
73 | |||
74 | return process(tool, event, &synth_sample, machine); | ||
66 | }; | 75 | }; |
67 | 76 | ||
68 | /* | 77 | /* |
@@ -186,7 +195,7 @@ pid_t perf_event__synthesize_comm(struct perf_tool *tool, | |||
186 | if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0) | 195 | if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0) |
187 | return -1; | 196 | return -1; |
188 | 197 | ||
189 | if (process(tool, event, &synth_sample, machine) != 0) | 198 | if (perf_tool__process_synth_event(tool, event, machine, process) != 0) |
190 | return -1; | 199 | return -1; |
191 | 200 | ||
192 | return tgid; | 201 | return tgid; |
@@ -218,7 +227,7 @@ static int perf_event__synthesize_fork(struct perf_tool *tool, | |||
218 | 227 | ||
219 | event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size); | 228 | event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size); |
220 | 229 | ||
221 | if (process(tool, event, &synth_sample, machine) != 0) | 230 | if (perf_tool__process_synth_event(tool, event, machine, process) != 0) |
222 | return -1; | 231 | return -1; |
223 | 232 | ||
224 | return 0; | 233 | return 0; |
@@ -344,7 +353,7 @@ out: | |||
344 | event->mmap2.pid = tgid; | 353 | event->mmap2.pid = tgid; |
345 | event->mmap2.tid = pid; | 354 | event->mmap2.tid = pid; |
346 | 355 | ||
347 | if (process(tool, event, &synth_sample, machine) != 0) { | 356 | if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { |
348 | rc = -1; | 357 | rc = -1; |
349 | break; | 358 | break; |
350 | } | 359 | } |
@@ -402,7 +411,7 @@ int perf_event__synthesize_modules(struct perf_tool *tool, | |||
402 | 411 | ||
403 | memcpy(event->mmap.filename, pos->dso->long_name, | 412 | memcpy(event->mmap.filename, pos->dso->long_name, |
404 | pos->dso->long_name_len + 1); | 413 | pos->dso->long_name_len + 1); |
405 | if (process(tool, event, &synth_sample, machine) != 0) { | 414 | if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { |
406 | rc = -1; | 415 | rc = -1; |
407 | break; | 416 | break; |
408 | } | 417 | } |
@@ -472,7 +481,7 @@ static int __event__synthesize_thread(union perf_event *comm_event, | |||
472 | /* | 481 | /* |
473 | * Send the prepared comm event | 482 | * Send the prepared comm event |
474 | */ | 483 | */ |
475 | if (process(tool, comm_event, &synth_sample, machine) != 0) | 484 | if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0) |
476 | break; | 485 | break; |
477 | 486 | ||
478 | rc = 0; | 487 | rc = 0; |
@@ -701,7 +710,7 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, | |||
701 | event->mmap.len = map->end - event->mmap.start; | 710 | event->mmap.len = map->end - event->mmap.start; |
702 | event->mmap.pid = machine->pid; | 711 | event->mmap.pid = machine->pid; |
703 | 712 | ||
704 | err = process(tool, event, &synth_sample, machine); | 713 | err = perf_tool__process_synth_event(tool, event, machine, process); |
705 | free(event); | 714 | free(event); |
706 | 715 | ||
707 | return err; | 716 | return err; |
diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h index cd67e64a0494..2fbeb59c4bdd 100644 --- a/tools/perf/util/genelf.h +++ b/tools/perf/util/genelf.h | |||
@@ -9,36 +9,32 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent | |||
9 | 9 | ||
10 | #if defined(__arm__) | 10 | #if defined(__arm__) |
11 | #define GEN_ELF_ARCH EM_ARM | 11 | #define GEN_ELF_ARCH EM_ARM |
12 | #define GEN_ELF_ENDIAN ELFDATA2LSB | ||
13 | #define GEN_ELF_CLASS ELFCLASS32 | 12 | #define GEN_ELF_CLASS ELFCLASS32 |
14 | #elif defined(__aarch64__) | 13 | #elif defined(__aarch64__) |
15 | #define GEN_ELF_ARCH EM_AARCH64 | 14 | #define GEN_ELF_ARCH EM_AARCH64 |
16 | #define GEN_ELF_ENDIAN ELFDATA2LSB | ||
17 | #define GEN_ELF_CLASS ELFCLASS64 | 15 | #define GEN_ELF_CLASS ELFCLASS64 |
18 | #elif defined(__x86_64__) | 16 | #elif defined(__x86_64__) |
19 | #define GEN_ELF_ARCH EM_X86_64 | 17 | #define GEN_ELF_ARCH EM_X86_64 |
20 | #define GEN_ELF_ENDIAN ELFDATA2LSB | ||
21 | #define GEN_ELF_CLASS ELFCLASS64 | 18 | #define GEN_ELF_CLASS ELFCLASS64 |
22 | #elif defined(__i386__) | 19 | #elif defined(__i386__) |
23 | #define GEN_ELF_ARCH EM_386 | 20 | #define GEN_ELF_ARCH EM_386 |
24 | #define GEN_ELF_ENDIAN ELFDATA2LSB | ||
25 | #define GEN_ELF_CLASS ELFCLASS32 | 21 | #define GEN_ELF_CLASS ELFCLASS32 |
26 | #elif defined(__ppcle__) | 22 | #elif defined(__powerpc64__) |
27 | #define GEN_ELF_ARCH EM_PPC | ||
28 | #define GEN_ELF_ENDIAN ELFDATA2LSB | ||
29 | #define GEN_ELF_CLASS ELFCLASS64 | ||
30 | #elif defined(__powerpc__) | ||
31 | #define GEN_ELF_ARCH EM_PPC64 | ||
32 | #define GEN_ELF_ENDIAN ELFDATA2MSB | ||
33 | #define GEN_ELF_CLASS ELFCLASS64 | ||
34 | #elif defined(__powerpcle__) | ||
35 | #define GEN_ELF_ARCH EM_PPC64 | 23 | #define GEN_ELF_ARCH EM_PPC64 |
36 | #define GEN_ELF_ENDIAN ELFDATA2LSB | ||
37 | #define GEN_ELF_CLASS ELFCLASS64 | 24 | #define GEN_ELF_CLASS ELFCLASS64 |
25 | #elif defined(__powerpc__) | ||
26 | #define GEN_ELF_ARCH EM_PPC | ||
27 | #define GEN_ELF_CLASS ELFCLASS32 | ||
38 | #else | 28 | #else |
39 | #error "unsupported architecture" | 29 | #error "unsupported architecture" |
40 | #endif | 30 | #endif |
41 | 31 | ||
32 | #if __BYTE_ORDER == __BIG_ENDIAN | ||
33 | #define GEN_ELF_ENDIAN ELFDATA2MSB | ||
34 | #else | ||
35 | #define GEN_ELF_ENDIAN ELFDATA2LSB | ||
36 | #endif | ||
37 | |||
42 | #if GEN_ELF_CLASS == ELFCLASS64 | 38 | #if GEN_ELF_CLASS == ELFCLASS64 |
43 | #define elf_newehdr elf64_newehdr | 39 | #define elf_newehdr elf64_newehdr |
44 | #define elf_getshdr elf64_getshdr | 40 | #define elf_getshdr elf64_getshdr |
diff --git a/tools/perf/util/intel-bts.c b/tools/perf/util/intel-bts.c index 6bc3ecd2e7ca..abf1366e2a24 100644 --- a/tools/perf/util/intel-bts.c +++ b/tools/perf/util/intel-bts.c | |||
@@ -279,6 +279,7 @@ static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq, | |||
279 | event.sample.header.misc = PERF_RECORD_MISC_USER; | 279 | event.sample.header.misc = PERF_RECORD_MISC_USER; |
280 | event.sample.header.size = sizeof(struct perf_event_header); | 280 | event.sample.header.size = sizeof(struct perf_event_header); |
281 | 281 | ||
282 | sample.cpumode = PERF_RECORD_MISC_USER; | ||
282 | sample.ip = le64_to_cpu(branch->from); | 283 | sample.ip = le64_to_cpu(branch->from); |
283 | sample.pid = btsq->pid; | 284 | sample.pid = btsq->pid; |
284 | sample.tid = btsq->tid; | 285 | sample.tid = btsq->tid; |
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c index 05d815851be1..407f11b97c8d 100644 --- a/tools/perf/util/intel-pt.c +++ b/tools/perf/util/intel-pt.c | |||
@@ -979,6 +979,7 @@ static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq) | |||
979 | if (!pt->timeless_decoding) | 979 | if (!pt->timeless_decoding) |
980 | sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc); | 980 | sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc); |
981 | 981 | ||
982 | sample.cpumode = PERF_RECORD_MISC_USER; | ||
982 | sample.ip = ptq->state->from_ip; | 983 | sample.ip = ptq->state->from_ip; |
983 | sample.pid = ptq->pid; | 984 | sample.pid = ptq->pid; |
984 | sample.tid = ptq->tid; | 985 | sample.tid = ptq->tid; |
@@ -1035,6 +1036,7 @@ static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq) | |||
1035 | if (!pt->timeless_decoding) | 1036 | if (!pt->timeless_decoding) |
1036 | sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc); | 1037 | sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc); |
1037 | 1038 | ||
1039 | sample.cpumode = PERF_RECORD_MISC_USER; | ||
1038 | sample.ip = ptq->state->from_ip; | 1040 | sample.ip = ptq->state->from_ip; |
1039 | sample.pid = ptq->pid; | 1041 | sample.pid = ptq->pid; |
1040 | sample.tid = ptq->tid; | 1042 | sample.tid = ptq->tid; |
@@ -1092,6 +1094,7 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq) | |||
1092 | if (!pt->timeless_decoding) | 1094 | if (!pt->timeless_decoding) |
1093 | sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc); | 1095 | sample.time = tsc_to_perf_time(ptq->timestamp, &pt->tc); |
1094 | 1096 | ||
1097 | sample.cpumode = PERF_RECORD_MISC_USER; | ||
1095 | sample.ip = ptq->state->from_ip; | 1098 | sample.ip = ptq->state->from_ip; |
1096 | sample.pid = ptq->pid; | 1099 | sample.pid = ptq->pid; |
1097 | sample.tid = ptq->tid; | 1100 | sample.tid = ptq->tid; |
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c index cd272cc21e05..ad0c0bb1fbc7 100644 --- a/tools/perf/util/jitdump.c +++ b/tools/perf/util/jitdump.c | |||
@@ -417,6 +417,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr) | |||
417 | * use first address as sample address | 417 | * use first address as sample address |
418 | */ | 418 | */ |
419 | memset(&sample, 0, sizeof(sample)); | 419 | memset(&sample, 0, sizeof(sample)); |
420 | sample.cpumode = PERF_RECORD_MISC_USER; | ||
420 | sample.pid = pid; | 421 | sample.pid = pid; |
421 | sample.tid = tid; | 422 | sample.tid = tid; |
422 | sample.time = id->time; | 423 | sample.time = id->time; |
@@ -505,6 +506,7 @@ static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr) | |||
505 | * use first address as sample address | 506 | * use first address as sample address |
506 | */ | 507 | */ |
507 | memset(&sample, 0, sizeof(sample)); | 508 | memset(&sample, 0, sizeof(sample)); |
509 | sample.cpumode = PERF_RECORD_MISC_USER; | ||
508 | sample.pid = pid; | 510 | sample.pid = pid; |
509 | sample.tid = tid; | 511 | sample.tid = tid; |
510 | sample.time = id->time; | 512 | sample.time = id->time; |