aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util/thread-stack.h
diff options
context:
space:
mode:
authorAdrian Hunter <adrian.hunter@intel.com>2018-12-21 07:06:19 -0500
committerArnaldo Carvalho de Melo <acme@redhat.com>2019-01-02 09:03:17 -0500
commit256d92bc93fd40411a02be5cdba74a7bf91e6e09 (patch)
tree92c8ed48b49b018b802461536265b8772339d6c7 /tools/perf/util/thread-stack.h
parent139f42f3b3b495e61bb2cfef40e1dd5e845e3052 (diff)
perf thread-stack: Fix thread stack processing for the idle task
perf creates a single 'struct thread' to represent the idle task. That is because threads are identified by PID and TID, and the idle task always has PID == TID == 0. However, there are actually separate idle tasks for each CPU. That creates a problem for thread stack processing which assumes that each thread has a single stack, not one stack per CPU. Fix that by passing through the CPU number, and in the case of the idle "thread", pick the thread stack from an array based on the CPU number. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Acked-by: Jiri Olsa <jolsa@kernel.org> Link: http://lkml.kernel.org/r/20181221120620.9659-8-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/util/thread-stack.h')
-rw-r--r--tools/perf/util/thread-stack.h8
1 files changed, 4 insertions, 4 deletions
diff --git a/tools/perf/util/thread-stack.h b/tools/perf/util/thread-stack.h
index f97c00a8c251..1f626f4a1c40 100644
--- a/tools/perf/util/thread-stack.h
+++ b/tools/perf/util/thread-stack.h
@@ -80,14 +80,14 @@ struct call_return_processor {
80 void *data; 80 void *data;
81}; 81};
82 82
83int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip, 83int thread_stack__event(struct thread *thread, int cpu, u32 flags, u64 from_ip,
84 u64 to_ip, u16 insn_len, u64 trace_nr); 84 u64 to_ip, u16 insn_len, u64 trace_nr);
85void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr); 85void thread_stack__set_trace_nr(struct thread *thread, int cpu, u64 trace_nr);
86void thread_stack__sample(struct thread *thread, struct ip_callchain *chain, 86void thread_stack__sample(struct thread *thread, int cpu, struct ip_callchain *chain,
87 size_t sz, u64 ip, u64 kernel_start); 87 size_t sz, u64 ip, u64 kernel_start);
88int thread_stack__flush(struct thread *thread); 88int thread_stack__flush(struct thread *thread);
89void thread_stack__free(struct thread *thread); 89void thread_stack__free(struct thread *thread);
90size_t thread_stack__depth(struct thread *thread); 90size_t thread_stack__depth(struct thread *thread, int cpu);
91 91
92struct call_return_processor * 92struct call_return_processor *
93call_return_processor__new(int (*process)(struct call_return *cr, void *data), 93call_return_processor__new(int (*process)(struct call_return *cr, void *data),