diff options
| -rw-r--r-- | tools/perf/util/thread-stack.c | 81 |
1 files changed, 49 insertions, 32 deletions
diff --git a/tools/perf/util/thread-stack.c b/tools/perf/util/thread-stack.c index 068c7c8db4be..d93cd286b048 100644 --- a/tools/perf/util/thread-stack.c +++ b/tools/perf/util/thread-stack.c | |||
| @@ -111,9 +111,16 @@ static struct thread_stack *thread_stack__new(struct thread *thread, | |||
| 111 | ts->kernel_start = 1ULL << 63; | 111 | ts->kernel_start = 1ULL << 63; |
| 112 | ts->crp = crp; | 112 | ts->crp = crp; |
| 113 | 113 | ||
| 114 | thread->ts = ts; | ||
| 115 | |||
| 114 | return ts; | 116 | return ts; |
| 115 | } | 117 | } |
| 116 | 118 | ||
| 119 | static inline struct thread_stack *thread__stack(struct thread *thread) | ||
| 120 | { | ||
| 121 | return thread ? thread->ts : NULL; | ||
| 122 | } | ||
| 123 | |||
| 117 | static int thread_stack__push(struct thread_stack *ts, u64 ret_addr, | 124 | static int thread_stack__push(struct thread_stack *ts, u64 ret_addr, |
| 118 | bool trace_end) | 125 | bool trace_end) |
| 119 | { | 126 | { |
| @@ -226,8 +233,10 @@ static int __thread_stack__flush(struct thread *thread, struct thread_stack *ts) | |||
| 226 | 233 | ||
| 227 | int thread_stack__flush(struct thread *thread) | 234 | int thread_stack__flush(struct thread *thread) |
| 228 | { | 235 | { |
| 229 | if (thread->ts) | 236 | struct thread_stack *ts = thread->ts; |
| 230 | return __thread_stack__flush(thread, thread->ts); | 237 | |
| 238 | if (ts) | ||
| 239 | return __thread_stack__flush(thread, ts); | ||
| 231 | 240 | ||
| 232 | return 0; | 241 | return 0; |
| 233 | } | 242 | } |
| @@ -235,16 +244,18 @@ int thread_stack__flush(struct thread *thread) | |||
| 235 | int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip, | 244 | int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip, |
| 236 | u64 to_ip, u16 insn_len, u64 trace_nr) | 245 | u64 to_ip, u16 insn_len, u64 trace_nr) |
| 237 | { | 246 | { |
| 247 | struct thread_stack *ts = thread__stack(thread); | ||
| 248 | |||
| 238 | if (!thread) | 249 | if (!thread) |
| 239 | return -EINVAL; | 250 | return -EINVAL; |
| 240 | 251 | ||
| 241 | if (!thread->ts) { | 252 | if (!ts) { |
| 242 | thread->ts = thread_stack__new(thread, NULL); | 253 | ts = thread_stack__new(thread, NULL); |
| 243 | if (!thread->ts) { | 254 | if (!ts) { |
| 244 | pr_warning("Out of memory: no thread stack\n"); | 255 | pr_warning("Out of memory: no thread stack\n"); |
| 245 | return -ENOMEM; | 256 | return -ENOMEM; |
| 246 | } | 257 | } |
| 247 | thread->ts->trace_nr = trace_nr; | 258 | ts->trace_nr = trace_nr; |
| 248 | } | 259 | } |
| 249 | 260 | ||
| 250 | /* | 261 | /* |
| @@ -252,14 +263,14 @@ int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip, | |||
| 252 | * the stack might be completely invalid. Better to report nothing than | 263 | * the stack might be completely invalid. Better to report nothing than |
| 253 | * to report something misleading, so flush the stack. | 264 | * to report something misleading, so flush the stack. |
| 254 | */ | 265 | */ |
| 255 | if (trace_nr != thread->ts->trace_nr) { | 266 | if (trace_nr != ts->trace_nr) { |
| 256 | if (thread->ts->trace_nr) | 267 | if (ts->trace_nr) |
| 257 | __thread_stack__flush(thread, thread->ts); | 268 | __thread_stack__flush(thread, ts); |
| 258 | thread->ts->trace_nr = trace_nr; | 269 | ts->trace_nr = trace_nr; |
| 259 | } | 270 | } |
| 260 | 271 | ||
| 261 | /* Stop here if thread_stack__process() is in use */ | 272 | /* Stop here if thread_stack__process() is in use */ |
| 262 | if (thread->ts->crp) | 273 | if (ts->crp) |
| 263 | return 0; | 274 | return 0; |
| 264 | 275 | ||
| 265 | if (flags & PERF_IP_FLAG_CALL) { | 276 | if (flags & PERF_IP_FLAG_CALL) { |
| @@ -270,7 +281,7 @@ int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip, | |||
| 270 | ret_addr = from_ip + insn_len; | 281 | ret_addr = from_ip + insn_len; |
| 271 | if (ret_addr == to_ip) | 282 | if (ret_addr == to_ip) |
| 272 | return 0; /* Zero-length calls are excluded */ | 283 | return 0; /* Zero-length calls are excluded */ |
| 273 | return thread_stack__push(thread->ts, ret_addr, | 284 | return thread_stack__push(ts, ret_addr, |
| 274 | flags & PERF_IP_FLAG_TRACE_END); | 285 | flags & PERF_IP_FLAG_TRACE_END); |
| 275 | } else if (flags & PERF_IP_FLAG_TRACE_BEGIN) { | 286 | } else if (flags & PERF_IP_FLAG_TRACE_BEGIN) { |
| 276 | /* | 287 | /* |
| @@ -280,10 +291,10 @@ int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip, | |||
| 280 | * address, so try to pop that. Also, do not expect a call made | 291 | * address, so try to pop that. Also, do not expect a call made |
| 281 | * when the trace ended, to return, so pop that. | 292 | * when the trace ended, to return, so pop that. |
| 282 | */ | 293 | */ |
| 283 | thread_stack__pop(thread->ts, to_ip); | 294 | thread_stack__pop(ts, to_ip); |
| 284 | thread_stack__pop_trace_end(thread->ts); | 295 | thread_stack__pop_trace_end(ts); |
| 285 | } else if ((flags & PERF_IP_FLAG_RETURN) && from_ip) { | 296 | } else if ((flags & PERF_IP_FLAG_RETURN) && from_ip) { |
| 286 | thread_stack__pop(thread->ts, to_ip); | 297 | thread_stack__pop(ts, to_ip); |
| 287 | } | 298 | } |
| 288 | 299 | ||
| 289 | return 0; | 300 | return 0; |
| @@ -291,21 +302,25 @@ int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip, | |||
| 291 | 302 | ||
| 292 | void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr) | 303 | void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr) |
| 293 | { | 304 | { |
| 294 | if (!thread || !thread->ts) | 305 | struct thread_stack *ts = thread__stack(thread); |
| 306 | |||
| 307 | if (!ts) | ||
| 295 | return; | 308 | return; |
| 296 | 309 | ||
| 297 | if (trace_nr != thread->ts->trace_nr) { | 310 | if (trace_nr != ts->trace_nr) { |
| 298 | if (thread->ts->trace_nr) | 311 | if (ts->trace_nr) |
| 299 | __thread_stack__flush(thread, thread->ts); | 312 | __thread_stack__flush(thread, ts); |
| 300 | thread->ts->trace_nr = trace_nr; | 313 | ts->trace_nr = trace_nr; |
| 301 | } | 314 | } |
| 302 | } | 315 | } |
| 303 | 316 | ||
| 304 | void thread_stack__free(struct thread *thread) | 317 | void thread_stack__free(struct thread *thread) |
| 305 | { | 318 | { |
| 306 | if (thread->ts) { | 319 | struct thread_stack *ts = thread->ts; |
| 307 | __thread_stack__flush(thread, thread->ts); | 320 | |
| 308 | zfree(&thread->ts->stack); | 321 | if (ts) { |
| 322 | __thread_stack__flush(thread, ts); | ||
| 323 | zfree(&ts->stack); | ||
| 309 | zfree(&thread->ts); | 324 | zfree(&thread->ts); |
| 310 | } | 325 | } |
| 311 | } | 326 | } |
| @@ -318,6 +333,7 @@ static inline u64 callchain_context(u64 ip, u64 kernel_start) | |||
| 318 | void thread_stack__sample(struct thread *thread, struct ip_callchain *chain, | 333 | void thread_stack__sample(struct thread *thread, struct ip_callchain *chain, |
| 319 | size_t sz, u64 ip, u64 kernel_start) | 334 | size_t sz, u64 ip, u64 kernel_start) |
| 320 | { | 335 | { |
| 336 | struct thread_stack *ts = thread__stack(thread); | ||
| 321 | u64 context = callchain_context(ip, kernel_start); | 337 | u64 context = callchain_context(ip, kernel_start); |
| 322 | u64 last_context; | 338 | u64 last_context; |
| 323 | size_t i, j; | 339 | size_t i, j; |
| @@ -330,15 +346,15 @@ void thread_stack__sample(struct thread *thread, struct ip_callchain *chain, | |||
| 330 | chain->ips[0] = context; | 346 | chain->ips[0] = context; |
| 331 | chain->ips[1] = ip; | 347 | chain->ips[1] = ip; |
| 332 | 348 | ||
| 333 | if (!thread || !thread->ts) { | 349 | if (!ts) { |
| 334 | chain->nr = 2; | 350 | chain->nr = 2; |
| 335 | return; | 351 | return; |
| 336 | } | 352 | } |
| 337 | 353 | ||
| 338 | last_context = context; | 354 | last_context = context; |
| 339 | 355 | ||
| 340 | for (i = 2, j = 1; i < sz && j <= thread->ts->cnt; i++, j++) { | 356 | for (i = 2, j = 1; i < sz && j <= ts->cnt; i++, j++) { |
| 341 | ip = thread->ts->stack[thread->ts->cnt - j].ret_addr; | 357 | ip = ts->stack[ts->cnt - j].ret_addr; |
| 342 | context = callchain_context(ip, kernel_start); | 358 | context = callchain_context(ip, kernel_start); |
| 343 | if (context != last_context) { | 359 | if (context != last_context) { |
| 344 | if (i >= sz - 1) | 360 | if (i >= sz - 1) |
| @@ -590,7 +606,7 @@ int thread_stack__process(struct thread *thread, struct comm *comm, | |||
| 590 | struct addr_location *to_al, u64 ref, | 606 | struct addr_location *to_al, u64 ref, |
| 591 | struct call_return_processor *crp) | 607 | struct call_return_processor *crp) |
| 592 | { | 608 | { |
| 593 | struct thread_stack *ts = thread->ts; | 609 | struct thread_stack *ts = thread__stack(thread); |
| 594 | int err = 0; | 610 | int err = 0; |
| 595 | 611 | ||
| 596 | if (ts && !ts->crp) { | 612 | if (ts && !ts->crp) { |
| @@ -600,10 +616,9 @@ int thread_stack__process(struct thread *thread, struct comm *comm, | |||
| 600 | } | 616 | } |
| 601 | 617 | ||
| 602 | if (!ts) { | 618 | if (!ts) { |
| 603 | thread->ts = thread_stack__new(thread, crp); | 619 | ts = thread_stack__new(thread, crp); |
| 604 | if (!thread->ts) | 620 | if (!ts) |
| 605 | return -ENOMEM; | 621 | return -ENOMEM; |
| 606 | ts = thread->ts; | ||
| 607 | ts->comm = comm; | 622 | ts->comm = comm; |
| 608 | } | 623 | } |
| 609 | 624 | ||
| @@ -668,7 +683,9 @@ int thread_stack__process(struct thread *thread, struct comm *comm, | |||
| 668 | 683 | ||
| 669 | size_t thread_stack__depth(struct thread *thread) | 684 | size_t thread_stack__depth(struct thread *thread) |
| 670 | { | 685 | { |
| 671 | if (!thread->ts) | 686 | struct thread_stack *ts = thread__stack(thread); |
| 687 | |||
| 688 | if (!ts) | ||
| 672 | return 0; | 689 | return 0; |
| 673 | return thread->ts->cnt; | 690 | return ts->cnt; |
| 674 | } | 691 | } |
