aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--tools/perf/util/thread-stack.c585
-rw-r--r--tools/perf/util/thread-stack.h79
2 files changed, 659 insertions, 5 deletions
diff --git a/tools/perf/util/thread-stack.c b/tools/perf/util/thread-stack.c
index 85b60d2e738f..9ed59a452d1f 100644
--- a/tools/perf/util/thread-stack.c
+++ b/tools/perf/util/thread-stack.c
@@ -13,23 +13,96 @@
13 * 13 *
14 */ 14 */
15 15
16#include <linux/rbtree.h>
17#include <linux/list.h>
16#include "thread.h" 18#include "thread.h"
17#include "event.h" 19#include "event.h"
20#include "machine.h"
18#include "util.h" 21#include "util.h"
19#include "debug.h" 22#include "debug.h"
23#include "symbol.h"
24#include "comm.h"
20#include "thread-stack.h" 25#include "thread-stack.h"
21 26
22#define STACK_GROWTH 4096 27#define CALL_PATH_BLOCK_SHIFT 8
28#define CALL_PATH_BLOCK_SIZE (1 << CALL_PATH_BLOCK_SHIFT)
29#define CALL_PATH_BLOCK_MASK (CALL_PATH_BLOCK_SIZE - 1)
23 30
31struct call_path_block {
32 struct call_path cp[CALL_PATH_BLOCK_SIZE];
33 struct list_head node;
34};
35
36/**
37 * struct call_path_root - root of all call paths.
38 * @call_path: root call path
39 * @blocks: list of blocks to store call paths
40 * @next: next free space
41 * @sz: number of spaces
42 */
43struct call_path_root {
44 struct call_path call_path;
45 struct list_head blocks;
46 size_t next;
47 size_t sz;
48};
49
50/**
51 * struct call_return_processor - provides a call-back to consume call-return
52 * information.
53 * @cpr: call path root
54 * @process: call-back that accepts call/return information
55 * @data: anonymous data for call-back
56 */
57struct call_return_processor {
58 struct call_path_root *cpr;
59 int (*process)(struct call_return *cr, void *data);
60 void *data;
61};
62
63#define STACK_GROWTH 2048
64
65/**
66 * struct thread_stack_entry - thread stack entry.
67 * @ret_addr: return address
68 * @timestamp: timestamp (if known)
69 * @ref: external reference (e.g. db_id of sample)
70 * @branch_count: the branch count when the entry was created
71 * @cp: call path
72 * @no_call: a 'call' was not seen
73 */
24struct thread_stack_entry { 74struct thread_stack_entry {
25 u64 ret_addr; 75 u64 ret_addr;
76 u64 timestamp;
77 u64 ref;
78 u64 branch_count;
79 struct call_path *cp;
80 bool no_call;
26}; 81};
27 82
83/**
84 * struct thread_stack - thread stack constructed from 'call' and 'return'
85 * branch samples.
86 * @stack: array that holds the stack
87 * @cnt: number of entries in the stack
88 * @sz: current maximum stack size
89 * @trace_nr: current trace number
90 * @branch_count: running branch count
91 * @kernel_start: kernel start address
92 * @last_time: last timestamp
93 * @crp: call/return processor
94 * @comm: current comm
95 */
28struct thread_stack { 96struct thread_stack {
29 struct thread_stack_entry *stack; 97 struct thread_stack_entry *stack;
30 size_t cnt; 98 size_t cnt;
31 size_t sz; 99 size_t sz;
32 u64 trace_nr; 100 u64 trace_nr;
101 u64 branch_count;
102 u64 kernel_start;
103 u64 last_time;
104 struct call_return_processor *crp;
105 struct comm *comm;
33}; 106};
34 107
35static int thread_stack__grow(struct thread_stack *ts) 108static int thread_stack__grow(struct thread_stack *ts)
@@ -50,7 +123,8 @@ static int thread_stack__grow(struct thread_stack *ts)
50 return 0; 123 return 0;
51} 124}
52 125
53static struct thread_stack *thread_stack__new(void) 126static struct thread_stack *thread_stack__new(struct thread *thread,
127 struct call_return_processor *crp)
54{ 128{
55 struct thread_stack *ts; 129 struct thread_stack *ts;
56 130
@@ -63,6 +137,12 @@ static struct thread_stack *thread_stack__new(void)
63 return NULL; 137 return NULL;
64 } 138 }
65 139
140 if (thread->mg && thread->mg->machine)
141 ts->kernel_start = machine__kernel_start(thread->mg->machine);
142 else
143 ts->kernel_start = 1ULL << 63;
144 ts->crp = crp;
145
66 return ts; 146 return ts;
67} 147}
68 148
@@ -104,6 +184,64 @@ static void thread_stack__pop(struct thread_stack *ts, u64 ret_addr)
104 } 184 }
105} 185}
106 186
187static bool thread_stack__in_kernel(struct thread_stack *ts)
188{
189 if (!ts->cnt)
190 return false;
191
192 return ts->stack[ts->cnt - 1].cp->in_kernel;
193}
194
195static int thread_stack__call_return(struct thread *thread,
196 struct thread_stack *ts, size_t idx,
197 u64 timestamp, u64 ref, bool no_return)
198{
199 struct call_return_processor *crp = ts->crp;
200 struct thread_stack_entry *tse;
201 struct call_return cr = {
202 .thread = thread,
203 .comm = ts->comm,
204 .db_id = 0,
205 };
206
207 tse = &ts->stack[idx];
208 cr.cp = tse->cp;
209 cr.call_time = tse->timestamp;
210 cr.return_time = timestamp;
211 cr.branch_count = ts->branch_count - tse->branch_count;
212 cr.call_ref = tse->ref;
213 cr.return_ref = ref;
214 if (tse->no_call)
215 cr.flags |= CALL_RETURN_NO_CALL;
216 if (no_return)
217 cr.flags |= CALL_RETURN_NO_RETURN;
218
219 return crp->process(&cr, crp->data);
220}
221
222static int thread_stack__flush(struct thread *thread, struct thread_stack *ts)
223{
224 struct call_return_processor *crp = ts->crp;
225 int err;
226
227 if (!crp) {
228 ts->cnt = 0;
229 return 0;
230 }
231
232 while (ts->cnt) {
233 err = thread_stack__call_return(thread, ts, --ts->cnt,
234 ts->last_time, 0, true);
235 if (err) {
236 pr_err("Error flushing thread stack!\n");
237 ts->cnt = 0;
238 return err;
239 }
240 }
241
242 return 0;
243}
244
107int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip, 245int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
108 u64 to_ip, u16 insn_len, u64 trace_nr) 246 u64 to_ip, u16 insn_len, u64 trace_nr)
109{ 247{
@@ -111,7 +249,7 @@ int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
111 return -EINVAL; 249 return -EINVAL;
112 250
113 if (!thread->ts) { 251 if (!thread->ts) {
114 thread->ts = thread_stack__new(); 252 thread->ts = thread_stack__new(thread, NULL);
115 if (!thread->ts) { 253 if (!thread->ts) {
116 pr_warning("Out of memory: no thread stack\n"); 254 pr_warning("Out of memory: no thread stack\n");
117 return -ENOMEM; 255 return -ENOMEM;
@@ -122,13 +260,18 @@ int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
122 /* 260 /*
123 * When the trace is discontinuous, the trace_nr changes. In that case 261 * When the trace is discontinuous, the trace_nr changes. In that case
124 * the stack might be completely invalid. Better to report nothing than 262 * the stack might be completely invalid. Better to report nothing than
125 * to report something misleading, so reset the stack count to zero. 263 * to report something misleading, so flush the stack.
126 */ 264 */
127 if (trace_nr != thread->ts->trace_nr) { 265 if (trace_nr != thread->ts->trace_nr) {
266 if (thread->ts->trace_nr)
267 thread_stack__flush(thread, thread->ts);
128 thread->ts->trace_nr = trace_nr; 268 thread->ts->trace_nr = trace_nr;
129 thread->ts->cnt = 0;
130 } 269 }
131 270
271 /* Stop here if thread_stack__process() is in use */
272 if (thread->ts->crp)
273 return 0;
274
132 if (flags & PERF_IP_FLAG_CALL) { 275 if (flags & PERF_IP_FLAG_CALL) {
133 u64 ret_addr; 276 u64 ret_addr;
134 277
@@ -147,9 +290,22 @@ int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
147 return 0; 290 return 0;
148} 291}
149 292
293void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr)
294{
295 if (!thread || !thread->ts)
296 return;
297
298 if (trace_nr != thread->ts->trace_nr) {
299 if (thread->ts->trace_nr)
300 thread_stack__flush(thread, thread->ts);
301 thread->ts->trace_nr = trace_nr;
302 }
303}
304
150void thread_stack__free(struct thread *thread) 305void thread_stack__free(struct thread *thread)
151{ 306{
152 if (thread->ts) { 307 if (thread->ts) {
308 thread_stack__flush(thread, thread->ts);
153 zfree(&thread->ts->stack); 309 zfree(&thread->ts->stack);
154 zfree(&thread->ts); 310 zfree(&thread->ts);
155 } 311 }
@@ -170,3 +326,422 @@ void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
170 for (i = 1; i < chain->nr; i++) 326 for (i = 1; i < chain->nr; i++)
171 chain->ips[i] = thread->ts->stack[thread->ts->cnt - i].ret_addr; 327 chain->ips[i] = thread->ts->stack[thread->ts->cnt - i].ret_addr;
172} 328}
329
330static void call_path__init(struct call_path *cp, struct call_path *parent,
331 struct symbol *sym, u64 ip, bool in_kernel)
332{
333 cp->parent = parent;
334 cp->sym = sym;
335 cp->ip = sym ? 0 : ip;
336 cp->db_id = 0;
337 cp->in_kernel = in_kernel;
338 RB_CLEAR_NODE(&cp->rb_node);
339 cp->children = RB_ROOT;
340}
341
342static struct call_path_root *call_path_root__new(void)
343{
344 struct call_path_root *cpr;
345
346 cpr = zalloc(sizeof(struct call_path_root));
347 if (!cpr)
348 return NULL;
349 call_path__init(&cpr->call_path, NULL, NULL, 0, false);
350 INIT_LIST_HEAD(&cpr->blocks);
351 return cpr;
352}
353
354static void call_path_root__free(struct call_path_root *cpr)
355{
356 struct call_path_block *pos, *n;
357
358 list_for_each_entry_safe(pos, n, &cpr->blocks, node) {
359 list_del(&pos->node);
360 free(pos);
361 }
362 free(cpr);
363}
364
365static struct call_path *call_path__new(struct call_path_root *cpr,
366 struct call_path *parent,
367 struct symbol *sym, u64 ip,
368 bool in_kernel)
369{
370 struct call_path_block *cpb;
371 struct call_path *cp;
372 size_t n;
373
374 if (cpr->next < cpr->sz) {
375 cpb = list_last_entry(&cpr->blocks, struct call_path_block,
376 node);
377 } else {
378 cpb = zalloc(sizeof(struct call_path_block));
379 if (!cpb)
380 return NULL;
381 list_add_tail(&cpb->node, &cpr->blocks);
382 cpr->sz += CALL_PATH_BLOCK_SIZE;
383 }
384
385 n = cpr->next++ & CALL_PATH_BLOCK_MASK;
386 cp = &cpb->cp[n];
387
388 call_path__init(cp, parent, sym, ip, in_kernel);
389
390 return cp;
391}
392
393static struct call_path *call_path__findnew(struct call_path_root *cpr,
394 struct call_path *parent,
395 struct symbol *sym, u64 ip, u64 ks)
396{
397 struct rb_node **p;
398 struct rb_node *node_parent = NULL;
399 struct call_path *cp;
400 bool in_kernel = ip >= ks;
401
402 if (sym)
403 ip = 0;
404
405 if (!parent)
406 return call_path__new(cpr, parent, sym, ip, in_kernel);
407
408 p = &parent->children.rb_node;
409 while (*p != NULL) {
410 node_parent = *p;
411 cp = rb_entry(node_parent, struct call_path, rb_node);
412
413 if (cp->sym == sym && cp->ip == ip)
414 return cp;
415
416 if (sym < cp->sym || (sym == cp->sym && ip < cp->ip))
417 p = &(*p)->rb_left;
418 else
419 p = &(*p)->rb_right;
420 }
421
422 cp = call_path__new(cpr, parent, sym, ip, in_kernel);
423 if (!cp)
424 return NULL;
425
426 rb_link_node(&cp->rb_node, node_parent, p);
427 rb_insert_color(&cp->rb_node, &parent->children);
428
429 return cp;
430}
431
432struct call_return_processor *
433call_return_processor__new(int (*process)(struct call_return *cr, void *data),
434 void *data)
435{
436 struct call_return_processor *crp;
437
438 crp = zalloc(sizeof(struct call_return_processor));
439 if (!crp)
440 return NULL;
441 crp->cpr = call_path_root__new();
442 if (!crp->cpr)
443 goto out_free;
444 crp->process = process;
445 crp->data = data;
446 return crp;
447
448out_free:
449 free(crp);
450 return NULL;
451}
452
453void call_return_processor__free(struct call_return_processor *crp)
454{
455 if (crp) {
456 call_path_root__free(crp->cpr);
457 free(crp);
458 }
459}
460
461static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
462 u64 timestamp, u64 ref, struct call_path *cp,
463 bool no_call)
464{
465 struct thread_stack_entry *tse;
466 int err;
467
468 if (ts->cnt == ts->sz) {
469 err = thread_stack__grow(ts);
470 if (err)
471 return err;
472 }
473
474 tse = &ts->stack[ts->cnt++];
475 tse->ret_addr = ret_addr;
476 tse->timestamp = timestamp;
477 tse->ref = ref;
478 tse->branch_count = ts->branch_count;
479 tse->cp = cp;
480 tse->no_call = no_call;
481
482 return 0;
483}
484
485static int thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts,
486 u64 ret_addr, u64 timestamp, u64 ref,
487 struct symbol *sym)
488{
489 int err;
490
491 if (!ts->cnt)
492 return 1;
493
494 if (ts->cnt == 1) {
495 struct thread_stack_entry *tse = &ts->stack[0];
496
497 if (tse->cp->sym == sym)
498 return thread_stack__call_return(thread, ts, --ts->cnt,
499 timestamp, ref, false);
500 }
501
502 if (ts->stack[ts->cnt - 1].ret_addr == ret_addr) {
503 return thread_stack__call_return(thread, ts, --ts->cnt,
504 timestamp, ref, false);
505 } else {
506 size_t i = ts->cnt - 1;
507
508 while (i--) {
509 if (ts->stack[i].ret_addr != ret_addr)
510 continue;
511 i += 1;
512 while (ts->cnt > i) {
513 err = thread_stack__call_return(thread, ts,
514 --ts->cnt,
515 timestamp, ref,
516 true);
517 if (err)
518 return err;
519 }
520 return thread_stack__call_return(thread, ts, --ts->cnt,
521 timestamp, ref, false);
522 }
523 }
524
525 return 1;
526}
527
528static int thread_stack__bottom(struct thread *thread, struct thread_stack *ts,
529 struct perf_sample *sample,
530 struct addr_location *from_al,
531 struct addr_location *to_al, u64 ref)
532{
533 struct call_path_root *cpr = ts->crp->cpr;
534 struct call_path *cp;
535 struct symbol *sym;
536 u64 ip;
537
538 if (sample->ip) {
539 ip = sample->ip;
540 sym = from_al->sym;
541 } else if (sample->addr) {
542 ip = sample->addr;
543 sym = to_al->sym;
544 } else {
545 return 0;
546 }
547
548 cp = call_path__findnew(cpr, &cpr->call_path, sym, ip,
549 ts->kernel_start);
550 if (!cp)
551 return -ENOMEM;
552
553 return thread_stack__push_cp(thread->ts, ip, sample->time, ref, cp,
554 true);
555}
556
557static int thread_stack__no_call_return(struct thread *thread,
558 struct thread_stack *ts,
559 struct perf_sample *sample,
560 struct addr_location *from_al,
561 struct addr_location *to_al, u64 ref)
562{
563 struct call_path_root *cpr = ts->crp->cpr;
564 struct call_path *cp, *parent;
565 u64 ks = ts->kernel_start;
566 int err;
567
568 if (sample->ip >= ks && sample->addr < ks) {
569 /* Return to userspace, so pop all kernel addresses */
570 while (thread_stack__in_kernel(ts)) {
571 err = thread_stack__call_return(thread, ts, --ts->cnt,
572 sample->time, ref,
573 true);
574 if (err)
575 return err;
576 }
577
578 /* If the stack is empty, push the userspace address */
579 if (!ts->cnt) {
580 cp = call_path__findnew(cpr, &cpr->call_path,
581 to_al->sym, sample->addr,
582 ts->kernel_start);
583 if (!cp)
584 return -ENOMEM;
585 return thread_stack__push_cp(ts, 0, sample->time, ref,
586 cp, true);
587 }
588 } else if (thread_stack__in_kernel(ts) && sample->ip < ks) {
589 /* Return to userspace, so pop all kernel addresses */
590 while (thread_stack__in_kernel(ts)) {
591 err = thread_stack__call_return(thread, ts, --ts->cnt,
592 sample->time, ref,
593 true);
594 if (err)
595 return err;
596 }
597 }
598
599 if (ts->cnt)
600 parent = ts->stack[ts->cnt - 1].cp;
601 else
602 parent = &cpr->call_path;
603
604 /* This 'return' had no 'call', so push and pop top of stack */
605 cp = call_path__findnew(cpr, parent, from_al->sym, sample->ip,
606 ts->kernel_start);
607 if (!cp)
608 return -ENOMEM;
609
610 err = thread_stack__push_cp(ts, sample->addr, sample->time, ref, cp,
611 true);
612 if (err)
613 return err;
614
615 return thread_stack__pop_cp(thread, ts, sample->addr, sample->time, ref,
616 to_al->sym);
617}
618
619static int thread_stack__trace_begin(struct thread *thread,
620 struct thread_stack *ts, u64 timestamp,
621 u64 ref)
622{
623 struct thread_stack_entry *tse;
624 int err;
625
626 if (!ts->cnt)
627 return 0;
628
629 /* Pop trace end */
630 tse = &ts->stack[ts->cnt - 1];
631 if (tse->cp->sym == NULL && tse->cp->ip == 0) {
632 err = thread_stack__call_return(thread, ts, --ts->cnt,
633 timestamp, ref, false);
634 if (err)
635 return err;
636 }
637
638 return 0;
639}
640
641static int thread_stack__trace_end(struct thread_stack *ts,
642 struct perf_sample *sample, u64 ref)
643{
644 struct call_path_root *cpr = ts->crp->cpr;
645 struct call_path *cp;
646 u64 ret_addr;
647
648 /* No point having 'trace end' on the bottom of the stack */
649 if (!ts->cnt || (ts->cnt == 1 && ts->stack[0].ref == ref))
650 return 0;
651
652 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, NULL, 0,
653 ts->kernel_start);
654 if (!cp)
655 return -ENOMEM;
656
657 ret_addr = sample->ip + sample->insn_len;
658
659 return thread_stack__push_cp(ts, ret_addr, sample->time, ref, cp,
660 false);
661}
662
663int thread_stack__process(struct thread *thread, struct comm *comm,
664 struct perf_sample *sample,
665 struct addr_location *from_al,
666 struct addr_location *to_al, u64 ref,
667 struct call_return_processor *crp)
668{
669 struct thread_stack *ts = thread->ts;
670 int err = 0;
671
672 if (ts) {
673 if (!ts->crp) {
674 /* Supersede thread_stack__event() */
675 thread_stack__free(thread);
676 thread->ts = thread_stack__new(thread, crp);
677 if (!thread->ts)
678 return -ENOMEM;
679 ts = thread->ts;
680 ts->comm = comm;
681 }
682 } else {
683 thread->ts = thread_stack__new(thread, crp);
684 if (!thread->ts)
685 return -ENOMEM;
686 ts = thread->ts;
687 ts->comm = comm;
688 }
689
690 /* Flush stack on exec */
691 if (ts->comm != comm && thread->pid_ == thread->tid) {
692 err = thread_stack__flush(thread, ts);
693 if (err)
694 return err;
695 ts->comm = comm;
696 }
697
698 /* If the stack is empty, put the current symbol on the stack */
699 if (!ts->cnt) {
700 err = thread_stack__bottom(thread, ts, sample, from_al, to_al,
701 ref);
702 if (err)
703 return err;
704 }
705
706 ts->branch_count += 1;
707 ts->last_time = sample->time;
708
709 if (sample->flags & PERF_IP_FLAG_CALL) {
710 struct call_path_root *cpr = ts->crp->cpr;
711 struct call_path *cp;
712 u64 ret_addr;
713
714 if (!sample->ip || !sample->addr)
715 return 0;
716
717 ret_addr = sample->ip + sample->insn_len;
718 if (ret_addr == sample->addr)
719 return 0; /* Zero-length calls are excluded */
720
721 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
722 to_al->sym, sample->addr,
723 ts->kernel_start);
724 if (!cp)
725 return -ENOMEM;
726 err = thread_stack__push_cp(ts, ret_addr, sample->time, ref,
727 cp, false);
728 } else if (sample->flags & PERF_IP_FLAG_RETURN) {
729 if (!sample->ip || !sample->addr)
730 return 0;
731
732 err = thread_stack__pop_cp(thread, ts, sample->addr,
733 sample->time, ref, from_al->sym);
734 if (err) {
735 if (err < 0)
736 return err;
737 err = thread_stack__no_call_return(thread, ts, sample,
738 from_al, to_al, ref);
739 }
740 } else if (sample->flags & PERF_IP_FLAG_TRACE_BEGIN) {
741 err = thread_stack__trace_begin(thread, ts, sample->time, ref);
742 } else if (sample->flags & PERF_IP_FLAG_TRACE_END) {
743 err = thread_stack__trace_end(ts, sample, ref);
744 }
745
746 return err;
747}
diff --git a/tools/perf/util/thread-stack.h b/tools/perf/util/thread-stack.h
index 7c41579aec74..b843bbef8ba2 100644
--- a/tools/perf/util/thread-stack.h
+++ b/tools/perf/util/thread-stack.h
@@ -19,14 +19,93 @@
19#include <sys/types.h> 19#include <sys/types.h>
20 20
21#include <linux/types.h> 21#include <linux/types.h>
22#include <linux/rbtree.h>
22 23
23struct thread; 24struct thread;
25struct comm;
24struct ip_callchain; 26struct ip_callchain;
27struct symbol;
28struct dso;
29struct call_return_processor;
30struct comm;
31struct perf_sample;
32struct addr_location;
33
34/*
35 * Call/Return flags.
36 *
37 * CALL_RETURN_NO_CALL: 'return' but no matching 'call'
38 * CALL_RETURN_NO_RETURN: 'call' but no matching 'return'
39 */
40enum {
41 CALL_RETURN_NO_CALL = 1 << 0,
42 CALL_RETURN_NO_RETURN = 1 << 1,
43};
44
45/**
46 * struct call_return - paired call/return information.
47 * @thread: thread in which call/return occurred
48 * @comm: comm in which call/return occurred
49 * @cp: call path
50 * @call_time: timestamp of call (if known)
51 * @return_time: timestamp of return (if known)
52 * @branch_count: number of branches seen between call and return
53 * @call_ref: external reference to 'call' sample (e.g. db_id)
54 * @return_ref: external reference to 'return' sample (e.g. db_id)
55 * @db_id: id used for db-export
56 * @flags: Call/Return flags
57 */
58struct call_return {
59 struct thread *thread;
60 struct comm *comm;
61 struct call_path *cp;
62 u64 call_time;
63 u64 return_time;
64 u64 branch_count;
65 u64 call_ref;
66 u64 return_ref;
67 u64 db_id;
68 u32 flags;
69};
70
71/**
72 * struct call_path - node in list of calls leading to a function call.
73 * @parent: call path to the parent function call
74 * @sym: symbol of function called
75 * @ip: only if sym is null, the ip of the function
76 * @db_id: id used for db-export
77 * @in_kernel: whether function is a in the kernel
78 * @rb_node: node in parent's tree of called functions
79 * @children: tree of call paths of functions called
80 *
81 * In combination with the call_return structure, the call_path structure
82 * defines a context-sensitve call-graph.
83 */
84struct call_path {
85 struct call_path *parent;
86 struct symbol *sym;
87 u64 ip;
88 u64 db_id;
89 bool in_kernel;
90 struct rb_node rb_node;
91 struct rb_root children;
92};
25 93
26int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip, 94int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
27 u64 to_ip, u16 insn_len, u64 trace_nr); 95 u64 to_ip, u16 insn_len, u64 trace_nr);
96void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr);
28void thread_stack__sample(struct thread *thread, struct ip_callchain *chain, 97void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
29 size_t sz, u64 ip); 98 size_t sz, u64 ip);
30void thread_stack__free(struct thread *thread); 99void thread_stack__free(struct thread *thread);
31 100
101struct call_return_processor *
102call_return_processor__new(int (*process)(struct call_return *cr, void *data),
103 void *data);
104void call_return_processor__free(struct call_return_processor *crp);
105int thread_stack__process(struct thread *thread, struct comm *comm,
106 struct perf_sample *sample,
107 struct addr_location *from_al,
108 struct addr_location *to_al, u64 ref,
109 struct call_return_processor *crp);
110
32#endif 111#endif