aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/sysctl/kernel.txt14
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/arc/kernel/perf_event.c6
-rw-r--r--arch/arm/kernel/perf_callchain.c10
-rw-r--r--arch/arm64/kernel/perf_callchain.c14
-rw-r--r--arch/metag/kernel/perf_callchain.c10
-rw-r--r--arch/mips/kernel/perf_event.c12
-rw-r--r--arch/powerpc/perf/callchain.c20
-rw-r--r--arch/s390/kernel/perf_event.c4
-rw-r--r--arch/sh/kernel/perf_callchain.c4
-rw-r--r--arch/sparc/kernel/perf_event.c14
-rw-r--r--arch/tile/kernel/perf_event.c6
-rw-r--r--arch/x86/events/core.c14
-rw-r--r--arch/x86/events/intel/p4.c2
-rw-r--r--arch/x86/events/intel/uncore.c2
-rw-r--r--arch/xtensa/kernel/perf_event.c10
-rw-r--r--include/linux/perf_event.h34
-rw-r--r--include/uapi/linux/perf_event.h1
-rw-r--r--kernel/bpf/stackmap.c3
-rw-r--r--kernel/events/callchain.c36
-rw-r--r--kernel/sysctl.c11
-rw-r--r--tools/perf/Documentation/perf-report.txt5
-rw-r--r--tools/perf/Documentation/perf-script.txt2
-rw-r--r--tools/perf/Documentation/perf-trace.txt3
-rw-r--r--tools/perf/builtin-annotate.c5
-rw-r--r--tools/perf/builtin-buildid-cache.c8
-rw-r--r--tools/perf/builtin-diff.c5
-rw-r--r--tools/perf/builtin-record.c81
-rw-r--r--tools/perf/builtin-report.c7
-rw-r--r--tools/perf/builtin-script.c7
-rw-r--r--tools/perf/builtin-stat.c22
-rw-r--r--tools/perf/builtin-timechart.c5
-rw-r--r--tools/perf/builtin-top.c6
-rw-r--r--tools/perf/builtin-trace.c274
-rw-r--r--tools/perf/perf.c3
-rw-r--r--tools/perf/util/annotate.c32
-rw-r--r--tools/perf/util/build-id.c2
-rw-r--r--tools/perf/util/db-export.c3
-rw-r--r--tools/perf/util/dso.c7
-rw-r--r--tools/perf/util/evlist.c34
-rw-r--r--tools/perf/util/evlist.h4
-rw-r--r--tools/perf/util/evsel.c13
-rw-r--r--tools/perf/util/evsel.h1
-rw-r--r--tools/perf/util/hist.c9
-rw-r--r--tools/perf/util/hist.h2
-rw-r--r--tools/perf/util/machine.c35
-rw-r--r--tools/perf/util/machine.h1
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c3
-rw-r--r--tools/perf/util/sort.c84
-rw-r--r--tools/perf/util/sort.h2
-rw-r--r--tools/perf/util/stat-shadow.c8
-rw-r--r--tools/perf/util/symbol.c33
-rw-r--r--tools/perf/util/symbol.h7
-rw-r--r--tools/perf/util/top.h1
-rw-r--r--tools/perf/util/util.c3
-rw-r--r--tools/perf/util/util.h3
56 files changed, 618 insertions, 330 deletions
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index daabdd7ee543..a3683ce2a2f3 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -61,6 +61,7 @@ show up in /proc/sys/kernel:
61- perf_cpu_time_max_percent 61- perf_cpu_time_max_percent
62- perf_event_paranoid 62- perf_event_paranoid
63- perf_event_max_stack 63- perf_event_max_stack
64- perf_event_max_contexts_per_stack
64- pid_max 65- pid_max
65- powersave-nap [ PPC only ] 66- powersave-nap [ PPC only ]
66- printk 67- printk
@@ -668,6 +669,19 @@ The default value is 127.
668 669
669============================================================== 670==============================================================
670 671
672perf_event_max_contexts_per_stack:
673
674Controls maximum number of stack frame context entries for
675(attr.sample_type & PERF_SAMPLE_CALLCHAIN) configured events, for
676instance, when using 'perf record -g' or 'perf trace --call-graph fp'.
677
678This can only be done when no events are in use that have callchains
679enabled, otherwise writing to this file will return -EBUSY.
680
681The default value is 8.
682
683==============================================================
684
671pid_max: 685pid_max:
672 686
673PID allocation wrap value. When the kernel's next PID value 687PID allocation wrap value. When the kernel's next PID value
diff --git a/MAINTAINERS b/MAINTAINERS
index 7fb2603e3a95..ab7e05d328c1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8881,6 +8881,7 @@ F: arch/*/kernel/*/perf_event*.c
8881F: arch/*/kernel/*/*/perf_event*.c 8881F: arch/*/kernel/*/*/perf_event*.c
8882F: arch/*/include/asm/perf_event.h 8882F: arch/*/include/asm/perf_event.h
8883F: arch/*/kernel/perf_callchain.c 8883F: arch/*/kernel/perf_callchain.c
8884F: arch/*/events/*
8884F: tools/perf/ 8885F: tools/perf/
8885 8886
8886PERSONALITY HANDLING 8887PERSONALITY HANDLING
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 8b134cfe5e1f..6fd48021324b 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -48,7 +48,7 @@ struct arc_callchain_trace {
48static int callchain_trace(unsigned int addr, void *data) 48static int callchain_trace(unsigned int addr, void *data)
49{ 49{
50 struct arc_callchain_trace *ctrl = data; 50 struct arc_callchain_trace *ctrl = data;
51 struct perf_callchain_entry *entry = ctrl->perf_stuff; 51 struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff;
52 perf_callchain_store(entry, addr); 52 perf_callchain_store(entry, addr);
53 53
54 if (ctrl->depth++ < 3) 54 if (ctrl->depth++ < 3)
@@ -58,7 +58,7 @@ static int callchain_trace(unsigned int addr, void *data)
58} 58}
59 59
60void 60void
61perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) 61perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
62{ 62{
63 struct arc_callchain_trace ctrl = { 63 struct arc_callchain_trace ctrl = {
64 .depth = 0, 64 .depth = 0,
@@ -69,7 +69,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
69} 69}
70 70
71void 71void
72perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) 72perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
73{ 73{
74 /* 74 /*
75 * User stack can't be unwound trivially with kernel dwarf unwinder 75 * User stack can't be unwound trivially with kernel dwarf unwinder
diff --git a/arch/arm/kernel/perf_callchain.c b/arch/arm/kernel/perf_callchain.c
index 27563befa8a2..22bf1f64d99a 100644
--- a/arch/arm/kernel/perf_callchain.c
+++ b/arch/arm/kernel/perf_callchain.c
@@ -31,7 +31,7 @@ struct frame_tail {
31 */ 31 */
32static struct frame_tail __user * 32static struct frame_tail __user *
33user_backtrace(struct frame_tail __user *tail, 33user_backtrace(struct frame_tail __user *tail,
34 struct perf_callchain_entry *entry) 34 struct perf_callchain_entry_ctx *entry)
35{ 35{
36 struct frame_tail buftail; 36 struct frame_tail buftail;
37 unsigned long err; 37 unsigned long err;
@@ -59,7 +59,7 @@ user_backtrace(struct frame_tail __user *tail,
59} 59}
60 60
61void 61void
62perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) 62perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
63{ 63{
64 struct frame_tail __user *tail; 64 struct frame_tail __user *tail;
65 65
@@ -75,7 +75,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
75 75
76 tail = (struct frame_tail __user *)regs->ARM_fp - 1; 76 tail = (struct frame_tail __user *)regs->ARM_fp - 1;
77 77
78 while ((entry->nr < sysctl_perf_event_max_stack) && 78 while ((entry->nr < entry->max_stack) &&
79 tail && !((unsigned long)tail & 0x3)) 79 tail && !((unsigned long)tail & 0x3))
80 tail = user_backtrace(tail, entry); 80 tail = user_backtrace(tail, entry);
81} 81}
@@ -89,13 +89,13 @@ static int
89callchain_trace(struct stackframe *fr, 89callchain_trace(struct stackframe *fr,
90 void *data) 90 void *data)
91{ 91{
92 struct perf_callchain_entry *entry = data; 92 struct perf_callchain_entry_ctx *entry = data;
93 perf_callchain_store(entry, fr->pc); 93 perf_callchain_store(entry, fr->pc);
94 return 0; 94 return 0;
95} 95}
96 96
97void 97void
98perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) 98perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
99{ 99{
100 struct stackframe fr; 100 struct stackframe fr;
101 101
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
index 32c3c6e70119..713ca824f266 100644
--- a/arch/arm64/kernel/perf_callchain.c
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -31,7 +31,7 @@ struct frame_tail {
31 */ 31 */
32static struct frame_tail __user * 32static struct frame_tail __user *
33user_backtrace(struct frame_tail __user *tail, 33user_backtrace(struct frame_tail __user *tail,
34 struct perf_callchain_entry *entry) 34 struct perf_callchain_entry_ctx *entry)
35{ 35{
36 struct frame_tail buftail; 36 struct frame_tail buftail;
37 unsigned long err; 37 unsigned long err;
@@ -76,7 +76,7 @@ struct compat_frame_tail {
76 76
77static struct compat_frame_tail __user * 77static struct compat_frame_tail __user *
78compat_user_backtrace(struct compat_frame_tail __user *tail, 78compat_user_backtrace(struct compat_frame_tail __user *tail,
79 struct perf_callchain_entry *entry) 79 struct perf_callchain_entry_ctx *entry)
80{ 80{
81 struct compat_frame_tail buftail; 81 struct compat_frame_tail buftail;
82 unsigned long err; 82 unsigned long err;
@@ -106,7 +106,7 @@ compat_user_backtrace(struct compat_frame_tail __user *tail,
106} 106}
107#endif /* CONFIG_COMPAT */ 107#endif /* CONFIG_COMPAT */
108 108
109void perf_callchain_user(struct perf_callchain_entry *entry, 109void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
110 struct pt_regs *regs) 110 struct pt_regs *regs)
111{ 111{
112 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { 112 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
@@ -122,7 +122,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
122 122
123 tail = (struct frame_tail __user *)regs->regs[29]; 123 tail = (struct frame_tail __user *)regs->regs[29];
124 124
125 while (entry->nr < sysctl_perf_event_max_stack && 125 while (entry->nr < entry->max_stack &&
126 tail && !((unsigned long)tail & 0xf)) 126 tail && !((unsigned long)tail & 0xf))
127 tail = user_backtrace(tail, entry); 127 tail = user_backtrace(tail, entry);
128 } else { 128 } else {
@@ -132,7 +132,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
132 132
133 tail = (struct compat_frame_tail __user *)regs->compat_fp - 1; 133 tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
134 134
135 while ((entry->nr < sysctl_perf_event_max_stack) && 135 while ((entry->nr < entry->max_stack) &&
136 tail && !((unsigned long)tail & 0x3)) 136 tail && !((unsigned long)tail & 0x3))
137 tail = compat_user_backtrace(tail, entry); 137 tail = compat_user_backtrace(tail, entry);
138#endif 138#endif
@@ -146,12 +146,12 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
146 */ 146 */
147static int callchain_trace(struct stackframe *frame, void *data) 147static int callchain_trace(struct stackframe *frame, void *data)
148{ 148{
149 struct perf_callchain_entry *entry = data; 149 struct perf_callchain_entry_ctx *entry = data;
150 perf_callchain_store(entry, frame->pc); 150 perf_callchain_store(entry, frame->pc);
151 return 0; 151 return 0;
152} 152}
153 153
154void perf_callchain_kernel(struct perf_callchain_entry *entry, 154void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
155 struct pt_regs *regs) 155 struct pt_regs *regs)
156{ 156{
157 struct stackframe frame; 157 struct stackframe frame;
diff --git a/arch/metag/kernel/perf_callchain.c b/arch/metag/kernel/perf_callchain.c
index 252abc12a5a3..3e8e048040df 100644
--- a/arch/metag/kernel/perf_callchain.c
+++ b/arch/metag/kernel/perf_callchain.c
@@ -29,7 +29,7 @@ static bool is_valid_call(unsigned long calladdr)
29 29
30static struct metag_frame __user * 30static struct metag_frame __user *
31user_backtrace(struct metag_frame __user *user_frame, 31user_backtrace(struct metag_frame __user *user_frame,
32 struct perf_callchain_entry *entry) 32 struct perf_callchain_entry_ctx *entry)
33{ 33{
34 struct metag_frame frame; 34 struct metag_frame frame;
35 unsigned long calladdr; 35 unsigned long calladdr;
@@ -56,7 +56,7 @@ user_backtrace(struct metag_frame __user *user_frame,
56} 56}
57 57
58void 58void
59perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) 59perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
60{ 60{
61 unsigned long sp = regs->ctx.AX[0].U0; 61 unsigned long sp = regs->ctx.AX[0].U0;
62 struct metag_frame __user *frame; 62 struct metag_frame __user *frame;
@@ -65,7 +65,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
65 65
66 --frame; 66 --frame;
67 67
68 while ((entry->nr < sysctl_perf_event_max_stack) && frame) 68 while ((entry->nr < entry->max_stack) && frame)
69 frame = user_backtrace(frame, entry); 69 frame = user_backtrace(frame, entry);
70} 70}
71 71
@@ -78,13 +78,13 @@ static int
78callchain_trace(struct stackframe *fr, 78callchain_trace(struct stackframe *fr,
79 void *data) 79 void *data)
80{ 80{
81 struct perf_callchain_entry *entry = data; 81 struct perf_callchain_entry_ctx *entry = data;
82 perf_callchain_store(entry, fr->pc); 82 perf_callchain_store(entry, fr->pc);
83 return 0; 83 return 0;
84} 84}
85 85
86void 86void
87perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) 87perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
88{ 88{
89 struct stackframe fr; 89 struct stackframe fr;
90 90
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c
index 5021c546ad07..d64056e0bb56 100644
--- a/arch/mips/kernel/perf_event.c
+++ b/arch/mips/kernel/perf_event.c
@@ -25,8 +25,8 @@
25 * the user stack callchains, we will add it here. 25 * the user stack callchains, we will add it here.
26 */ 26 */
27 27
28static void save_raw_perf_callchain(struct perf_callchain_entry *entry, 28static void save_raw_perf_callchain(struct perf_callchain_entry_ctx *entry,
29 unsigned long reg29) 29 unsigned long reg29)
30{ 30{
31 unsigned long *sp = (unsigned long *)reg29; 31 unsigned long *sp = (unsigned long *)reg29;
32 unsigned long addr; 32 unsigned long addr;
@@ -35,14 +35,14 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
35 addr = *sp++; 35 addr = *sp++;
36 if (__kernel_text_address(addr)) { 36 if (__kernel_text_address(addr)) {
37 perf_callchain_store(entry, addr); 37 perf_callchain_store(entry, addr);
38 if (entry->nr >= sysctl_perf_event_max_stack) 38 if (entry->nr >= entry->max_stack)
39 break; 39 break;
40 } 40 }
41 } 41 }
42} 42}
43 43
44void perf_callchain_kernel(struct perf_callchain_entry *entry, 44void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
45 struct pt_regs *regs) 45 struct pt_regs *regs)
46{ 46{
47 unsigned long sp = regs->regs[29]; 47 unsigned long sp = regs->regs[29];
48#ifdef CONFIG_KALLSYMS 48#ifdef CONFIG_KALLSYMS
@@ -59,7 +59,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
59 } 59 }
60 do { 60 do {
61 perf_callchain_store(entry, pc); 61 perf_callchain_store(entry, pc);
62 if (entry->nr >= sysctl_perf_event_max_stack) 62 if (entry->nr >= entry->max_stack)
63 break; 63 break;
64 pc = unwind_stack(current, &sp, pc, &ra); 64 pc = unwind_stack(current, &sp, pc, &ra);
65 } while (pc); 65 } while (pc);
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index 26d37e6f924e..0fc26714780a 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -47,7 +47,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
47} 47}
48 48
49void 49void
50perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) 50perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
51{ 51{
52 unsigned long sp, next_sp; 52 unsigned long sp, next_sp;
53 unsigned long next_ip; 53 unsigned long next_ip;
@@ -76,7 +76,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
76 next_ip = regs->nip; 76 next_ip = regs->nip;
77 lr = regs->link; 77 lr = regs->link;
78 level = 0; 78 level = 0;
79 perf_callchain_store(entry, PERF_CONTEXT_KERNEL); 79 perf_callchain_store_context(entry, PERF_CONTEXT_KERNEL);
80 80
81 } else { 81 } else {
82 if (level == 0) 82 if (level == 0)
@@ -232,7 +232,7 @@ static int sane_signal_64_frame(unsigned long sp)
232 puc == (unsigned long) &sf->uc; 232 puc == (unsigned long) &sf->uc;
233} 233}
234 234
235static void perf_callchain_user_64(struct perf_callchain_entry *entry, 235static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
236 struct pt_regs *regs) 236 struct pt_regs *regs)
237{ 237{
238 unsigned long sp, next_sp; 238 unsigned long sp, next_sp;
@@ -247,7 +247,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
247 sp = regs->gpr[1]; 247 sp = regs->gpr[1];
248 perf_callchain_store(entry, next_ip); 248 perf_callchain_store(entry, next_ip);
249 249
250 while (entry->nr < sysctl_perf_event_max_stack) { 250 while (entry->nr < entry->max_stack) {
251 fp = (unsigned long __user *) sp; 251 fp = (unsigned long __user *) sp;
252 if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp)) 252 if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
253 return; 253 return;
@@ -274,7 +274,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
274 read_user_stack_64(&uregs[PT_R1], &sp)) 274 read_user_stack_64(&uregs[PT_R1], &sp))
275 return; 275 return;
276 level = 0; 276 level = 0;
277 perf_callchain_store(entry, PERF_CONTEXT_USER); 277 perf_callchain_store_context(entry, PERF_CONTEXT_USER);
278 perf_callchain_store(entry, next_ip); 278 perf_callchain_store(entry, next_ip);
279 continue; 279 continue;
280 } 280 }
@@ -319,7 +319,7 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
319 return rc; 319 return rc;
320} 320}
321 321
322static inline void perf_callchain_user_64(struct perf_callchain_entry *entry, 322static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
323 struct pt_regs *regs) 323 struct pt_regs *regs)
324{ 324{
325} 325}
@@ -439,7 +439,7 @@ static unsigned int __user *signal_frame_32_regs(unsigned int sp,
439 return mctx->mc_gregs; 439 return mctx->mc_gregs;
440} 440}
441 441
442static void perf_callchain_user_32(struct perf_callchain_entry *entry, 442static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
443 struct pt_regs *regs) 443 struct pt_regs *regs)
444{ 444{
445 unsigned int sp, next_sp; 445 unsigned int sp, next_sp;
@@ -453,7 +453,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
453 sp = regs->gpr[1]; 453 sp = regs->gpr[1];
454 perf_callchain_store(entry, next_ip); 454 perf_callchain_store(entry, next_ip);
455 455
456 while (entry->nr < sysctl_perf_event_max_stack) { 456 while (entry->nr < entry->max_stack) {
457 fp = (unsigned int __user *) (unsigned long) sp; 457 fp = (unsigned int __user *) (unsigned long) sp;
458 if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp)) 458 if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp))
459 return; 459 return;
@@ -473,7 +473,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
473 read_user_stack_32(&uregs[PT_R1], &sp)) 473 read_user_stack_32(&uregs[PT_R1], &sp))
474 return; 474 return;
475 level = 0; 475 level = 0;
476 perf_callchain_store(entry, PERF_CONTEXT_USER); 476 perf_callchain_store_context(entry, PERF_CONTEXT_USER);
477 perf_callchain_store(entry, next_ip); 477 perf_callchain_store(entry, next_ip);
478 continue; 478 continue;
479 } 479 }
@@ -487,7 +487,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
487} 487}
488 488
489void 489void
490perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) 490perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
491{ 491{
492 if (current_is_64bit()) 492 if (current_is_64bit())
493 perf_callchain_user_64(entry, regs); 493 perf_callchain_user_64(entry, regs);
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index c3e4099b60a5..87035fa58bbe 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -224,13 +224,13 @@ arch_initcall(service_level_perf_register);
224 224
225static int __perf_callchain_kernel(void *data, unsigned long address) 225static int __perf_callchain_kernel(void *data, unsigned long address)
226{ 226{
227 struct perf_callchain_entry *entry = data; 227 struct perf_callchain_entry_ctx *entry = data;
228 228
229 perf_callchain_store(entry, address); 229 perf_callchain_store(entry, address);
230 return 0; 230 return 0;
231} 231}
232 232
233void perf_callchain_kernel(struct perf_callchain_entry *entry, 233void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
234 struct pt_regs *regs) 234 struct pt_regs *regs)
235{ 235{
236 if (user_mode(regs)) 236 if (user_mode(regs))
diff --git a/arch/sh/kernel/perf_callchain.c b/arch/sh/kernel/perf_callchain.c
index cc80b614b5fa..fa2c0cd23eaa 100644
--- a/arch/sh/kernel/perf_callchain.c
+++ b/arch/sh/kernel/perf_callchain.c
@@ -21,7 +21,7 @@ static int callchain_stack(void *data, char *name)
21 21
22static void callchain_address(void *data, unsigned long addr, int reliable) 22static void callchain_address(void *data, unsigned long addr, int reliable)
23{ 23{
24 struct perf_callchain_entry *entry = data; 24 struct perf_callchain_entry_ctx *entry = data;
25 25
26 if (reliable) 26 if (reliable)
27 perf_callchain_store(entry, addr); 27 perf_callchain_store(entry, addr);
@@ -33,7 +33,7 @@ static const struct stacktrace_ops callchain_ops = {
33}; 33};
34 34
35void 35void
36perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) 36perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
37{ 37{
38 perf_callchain_store(entry, regs->pc); 38 perf_callchain_store(entry, regs->pc);
39 39
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index a4b8b5aed21c..710f3278d448 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1711,7 +1711,7 @@ static int __init init_hw_perf_events(void)
1711} 1711}
1712pure_initcall(init_hw_perf_events); 1712pure_initcall(init_hw_perf_events);
1713 1713
1714void perf_callchain_kernel(struct perf_callchain_entry *entry, 1714void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
1715 struct pt_regs *regs) 1715 struct pt_regs *regs)
1716{ 1716{
1717 unsigned long ksp, fp; 1717 unsigned long ksp, fp;
@@ -1756,7 +1756,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
1756 } 1756 }
1757 } 1757 }
1758#endif 1758#endif
1759 } while (entry->nr < sysctl_perf_event_max_stack); 1759 } while (entry->nr < entry->max_stack);
1760} 1760}
1761 1761
1762static inline int 1762static inline int
@@ -1769,7 +1769,7 @@ valid_user_frame(const void __user *fp, unsigned long size)
1769 return (__range_not_ok(fp, size, TASK_SIZE) == 0); 1769 return (__range_not_ok(fp, size, TASK_SIZE) == 0);
1770} 1770}
1771 1771
1772static void perf_callchain_user_64(struct perf_callchain_entry *entry, 1772static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
1773 struct pt_regs *regs) 1773 struct pt_regs *regs)
1774{ 1774{
1775 unsigned long ufp; 1775 unsigned long ufp;
@@ -1790,10 +1790,10 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
1790 pc = sf.callers_pc; 1790 pc = sf.callers_pc;
1791 ufp = (unsigned long)sf.fp + STACK_BIAS; 1791 ufp = (unsigned long)sf.fp + STACK_BIAS;
1792 perf_callchain_store(entry, pc); 1792 perf_callchain_store(entry, pc);
1793 } while (entry->nr < sysctl_perf_event_max_stack); 1793 } while (entry->nr < entry->max_stack);
1794} 1794}
1795 1795
1796static void perf_callchain_user_32(struct perf_callchain_entry *entry, 1796static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
1797 struct pt_regs *regs) 1797 struct pt_regs *regs)
1798{ 1798{
1799 unsigned long ufp; 1799 unsigned long ufp;
@@ -1822,11 +1822,11 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
1822 ufp = (unsigned long)sf.fp; 1822 ufp = (unsigned long)sf.fp;
1823 } 1823 }
1824 perf_callchain_store(entry, pc); 1824 perf_callchain_store(entry, pc);
1825 } while (entry->nr < sysctl_perf_event_max_stack); 1825 } while (entry->nr < entry->max_stack);
1826} 1826}
1827 1827
1828void 1828void
1829perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) 1829perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
1830{ 1830{
1831 u64 saved_fault_address = current_thread_info()->fault_address; 1831 u64 saved_fault_address = current_thread_info()->fault_address;
1832 u8 saved_fault_code = get_thread_fault_code(); 1832 u8 saved_fault_code = get_thread_fault_code();
diff --git a/arch/tile/kernel/perf_event.c b/arch/tile/kernel/perf_event.c
index 8767060d70fb..6394c1ccb68e 100644
--- a/arch/tile/kernel/perf_event.c
+++ b/arch/tile/kernel/perf_event.c
@@ -941,7 +941,7 @@ arch_initcall(init_hw_perf_events);
941/* 941/*
942 * Tile specific backtracing code for perf_events. 942 * Tile specific backtracing code for perf_events.
943 */ 943 */
944static inline void perf_callchain(struct perf_callchain_entry *entry, 944static inline void perf_callchain(struct perf_callchain_entry_ctx *entry,
945 struct pt_regs *regs) 945 struct pt_regs *regs)
946{ 946{
947 struct KBacktraceIterator kbt; 947 struct KBacktraceIterator kbt;
@@ -992,13 +992,13 @@ static inline void perf_callchain(struct perf_callchain_entry *entry,
992 } 992 }
993} 993}
994 994
995void perf_callchain_user(struct perf_callchain_entry *entry, 995void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
996 struct pt_regs *regs) 996 struct pt_regs *regs)
997{ 997{
998 perf_callchain(entry, regs); 998 perf_callchain(entry, regs);
999} 999}
1000 1000
1001void perf_callchain_kernel(struct perf_callchain_entry *entry, 1001void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
1002 struct pt_regs *regs) 1002 struct pt_regs *regs)
1003{ 1003{
1004 perf_callchain(entry, regs); 1004 perf_callchain(entry, regs);
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 73a75aa5a66d..33787ee817f0 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2202,7 +2202,7 @@ static int backtrace_stack(void *data, char *name)
2202 2202
2203static int backtrace_address(void *data, unsigned long addr, int reliable) 2203static int backtrace_address(void *data, unsigned long addr, int reliable)
2204{ 2204{
2205 struct perf_callchain_entry *entry = data; 2205 struct perf_callchain_entry_ctx *entry = data;
2206 2206
2207 return perf_callchain_store(entry, addr); 2207 return perf_callchain_store(entry, addr);
2208} 2208}
@@ -2214,7 +2214,7 @@ static const struct stacktrace_ops backtrace_ops = {
2214}; 2214};
2215 2215
2216void 2216void
2217perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) 2217perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
2218{ 2218{
2219 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { 2219 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
2220 /* TODO: We don't support guest os callchain now */ 2220 /* TODO: We don't support guest os callchain now */
@@ -2268,7 +2268,7 @@ static unsigned long get_segment_base(unsigned int segment)
2268#include <asm/compat.h> 2268#include <asm/compat.h>
2269 2269
2270static inline int 2270static inline int
2271perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) 2271perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
2272{ 2272{
2273 /* 32-bit process in 64-bit kernel. */ 2273 /* 32-bit process in 64-bit kernel. */
2274 unsigned long ss_base, cs_base; 2274 unsigned long ss_base, cs_base;
@@ -2283,7 +2283,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
2283 2283
2284 fp = compat_ptr(ss_base + regs->bp); 2284 fp = compat_ptr(ss_base + regs->bp);
2285 pagefault_disable(); 2285 pagefault_disable();
2286 while (entry->nr < sysctl_perf_event_max_stack) { 2286 while (entry->nr < entry->max_stack) {
2287 unsigned long bytes; 2287 unsigned long bytes;
2288 frame.next_frame = 0; 2288 frame.next_frame = 0;
2289 frame.return_address = 0; 2289 frame.return_address = 0;
@@ -2309,14 +2309,14 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
2309} 2309}
2310#else 2310#else
2311static inline int 2311static inline int
2312perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) 2312perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
2313{ 2313{
2314 return 0; 2314 return 0;
2315} 2315}
2316#endif 2316#endif
2317 2317
2318void 2318void
2319perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) 2319perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
2320{ 2320{
2321 struct stack_frame frame; 2321 struct stack_frame frame;
2322 const void __user *fp; 2322 const void __user *fp;
@@ -2343,7 +2343,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
2343 return; 2343 return;
2344 2344
2345 pagefault_disable(); 2345 pagefault_disable();
2346 while (entry->nr < sysctl_perf_event_max_stack) { 2346 while (entry->nr < entry->max_stack) {
2347 unsigned long bytes; 2347 unsigned long bytes;
2348 frame.next_frame = NULL; 2348 frame.next_frame = NULL;
2349 frame.return_address = 0; 2349 frame.return_address = 0;
diff --git a/arch/x86/events/intel/p4.c b/arch/x86/events/intel/p4.c
index 0a5ede187d9c..eb0533558c2b 100644
--- a/arch/x86/events/intel/p4.c
+++ b/arch/x86/events/intel/p4.c
@@ -826,7 +826,7 @@ static int p4_hw_config(struct perf_event *event)
826 * Clear bits we reserve to be managed by kernel itself 826 * Clear bits we reserve to be managed by kernel itself
827 * and never allowed from a user space 827 * and never allowed from a user space
828 */ 828 */
829 event->attr.config &= P4_CONFIG_MASK; 829 event->attr.config &= P4_CONFIG_MASK;
830 830
831 rc = p4_validate_raw_event(event); 831 rc = p4_validate_raw_event(event);
832 if (rc) 832 if (rc)
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 16c178916412..fce74062d981 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -891,7 +891,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
891 return -ENODEV; 891 return -ENODEV;
892 892
893 pkg = topology_phys_to_logical_pkg(phys_id); 893 pkg = topology_phys_to_logical_pkg(phys_id);
894 if (WARN_ON_ONCE(pkg < 0)) 894 if (pkg < 0)
895 return -EINVAL; 895 return -EINVAL;
896 896
897 if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) { 897 if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c
index a6b00b3af429..ef90479e0397 100644
--- a/arch/xtensa/kernel/perf_event.c
+++ b/arch/xtensa/kernel/perf_event.c
@@ -323,23 +323,23 @@ static void xtensa_pmu_read(struct perf_event *event)
323 323
324static int callchain_trace(struct stackframe *frame, void *data) 324static int callchain_trace(struct stackframe *frame, void *data)
325{ 325{
326 struct perf_callchain_entry *entry = data; 326 struct perf_callchain_entry_ctx *entry = data;
327 327
328 perf_callchain_store(entry, frame->pc); 328 perf_callchain_store(entry, frame->pc);
329 return 0; 329 return 0;
330} 330}
331 331
332void perf_callchain_kernel(struct perf_callchain_entry *entry, 332void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
333 struct pt_regs *regs) 333 struct pt_regs *regs)
334{ 334{
335 xtensa_backtrace_kernel(regs, sysctl_perf_event_max_stack, 335 xtensa_backtrace_kernel(regs, entry->max_stack,
336 callchain_trace, NULL, entry); 336 callchain_trace, NULL, entry);
337} 337}
338 338
339void perf_callchain_user(struct perf_callchain_entry *entry, 339void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
340 struct pt_regs *regs) 340 struct pt_regs *regs)
341{ 341{
342 xtensa_backtrace_user(regs, sysctl_perf_event_max_stack, 342 xtensa_backtrace_user(regs, entry->max_stack,
343 callchain_trace, entry); 343 callchain_trace, entry);
344} 344}
345 345
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 44f33834ad78..1a827cecd62f 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -61,6 +61,14 @@ struct perf_callchain_entry {
61 __u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */ 61 __u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */
62}; 62};
63 63
64struct perf_callchain_entry_ctx {
65 struct perf_callchain_entry *entry;
66 u32 max_stack;
67 u32 nr;
68 short contexts;
69 bool contexts_maxed;
70};
71
64struct perf_raw_record { 72struct perf_raw_record {
65 u32 size; 73 u32 size;
66 void *data; 74 void *data;
@@ -1061,20 +1069,36 @@ extern void perf_event_fork(struct task_struct *tsk);
1061/* Callchains */ 1069/* Callchains */
1062DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); 1070DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1063 1071
1064extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); 1072extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1065extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); 1073extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
1066extern struct perf_callchain_entry * 1074extern struct perf_callchain_entry *
1067get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, 1075get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
1068 bool crosstask, bool add_mark); 1076 u32 max_stack, bool crosstask, bool add_mark);
1069extern int get_callchain_buffers(void); 1077extern int get_callchain_buffers(void);
1070extern void put_callchain_buffers(void); 1078extern void put_callchain_buffers(void);
1071 1079
1072extern int sysctl_perf_event_max_stack; 1080extern int sysctl_perf_event_max_stack;
1081extern int sysctl_perf_event_max_contexts_per_stack;
1082
1083static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip)
1084{
1085 if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
1086 struct perf_callchain_entry *entry = ctx->entry;
1087 entry->ip[entry->nr++] = ip;
1088 ++ctx->contexts;
1089 return 0;
1090 } else {
1091 ctx->contexts_maxed = true;
1092 return -1; /* no more room, stop walking the stack */
1093 }
1094}
1073 1095
1074static inline int perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) 1096static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
1075{ 1097{
1076 if (entry->nr < sysctl_perf_event_max_stack) { 1098 if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
1099 struct perf_callchain_entry *entry = ctx->entry;
1077 entry->ip[entry->nr++] = ip; 1100 entry->ip[entry->nr++] = ip;
1101 ++ctx->nr;
1078 return 0; 1102 return 0;
1079 } else { 1103 } else {
1080 return -1; /* no more room, stop walking the stack */ 1104 return -1; /* no more room, stop walking the stack */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 43fc8d213472..36ce552cf6a9 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -862,6 +862,7 @@ enum perf_event_type {
862}; 862};
863 863
864#define PERF_MAX_STACK_DEPTH 127 864#define PERF_MAX_STACK_DEPTH 127
865#define PERF_MAX_CONTEXTS_PER_STACK 8
865 866
866enum perf_callchain_context { 867enum perf_callchain_context {
867 PERF_CONTEXT_HV = (__u64)-32, 868 PERF_CONTEXT_HV = (__u64)-32,
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index c8ee35287bfe..080a2dfb5800 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -136,7 +136,8 @@ u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5)
136 BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID))) 136 BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
137 return -EINVAL; 137 return -EINVAL;
138 138
139 trace = get_perf_callchain(regs, init_nr, kernel, user, false, false); 139 trace = get_perf_callchain(regs, init_nr, kernel, user,
140 sysctl_perf_event_max_stack, false, false);
140 141
141 if (unlikely(!trace)) 142 if (unlikely(!trace))
142 /* couldn't fetch the stack trace */ 143 /* couldn't fetch the stack trace */
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index b9325e7dcba1..179ef4640964 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -19,11 +19,13 @@ struct callchain_cpus_entries {
19}; 19};
20 20
21int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH; 21int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH;
22int sysctl_perf_event_max_contexts_per_stack __read_mostly = PERF_MAX_CONTEXTS_PER_STACK;
22 23
23static inline size_t perf_callchain_entry__sizeof(void) 24static inline size_t perf_callchain_entry__sizeof(void)
24{ 25{
25 return (sizeof(struct perf_callchain_entry) + 26 return (sizeof(struct perf_callchain_entry) +
26 sizeof(__u64) * sysctl_perf_event_max_stack); 27 sizeof(__u64) * (sysctl_perf_event_max_stack +
28 sysctl_perf_event_max_contexts_per_stack));
27} 29}
28 30
29static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]); 31static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
@@ -32,12 +34,12 @@ static DEFINE_MUTEX(callchain_mutex);
32static struct callchain_cpus_entries *callchain_cpus_entries; 34static struct callchain_cpus_entries *callchain_cpus_entries;
33 35
34 36
35__weak void perf_callchain_kernel(struct perf_callchain_entry *entry, 37__weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
36 struct pt_regs *regs) 38 struct pt_regs *regs)
37{ 39{
38} 40}
39 41
40__weak void perf_callchain_user(struct perf_callchain_entry *entry, 42__weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
41 struct pt_regs *regs) 43 struct pt_regs *regs)
42{ 44{
43} 45}
@@ -176,14 +178,15 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
176 if (!kernel && !user) 178 if (!kernel && !user)
177 return NULL; 179 return NULL;
178 180
179 return get_perf_callchain(regs, 0, kernel, user, crosstask, true); 181 return get_perf_callchain(regs, 0, kernel, user, sysctl_perf_event_max_stack, crosstask, true);
180} 182}
181 183
182struct perf_callchain_entry * 184struct perf_callchain_entry *
183get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, 185get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
184 bool crosstask, bool add_mark) 186 u32 max_stack, bool crosstask, bool add_mark)
185{ 187{
186 struct perf_callchain_entry *entry; 188 struct perf_callchain_entry *entry;
189 struct perf_callchain_entry_ctx ctx;
187 int rctx; 190 int rctx;
188 191
189 entry = get_callchain_entry(&rctx); 192 entry = get_callchain_entry(&rctx);
@@ -193,12 +196,16 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
193 if (!entry) 196 if (!entry)
194 goto exit_put; 197 goto exit_put;
195 198
196 entry->nr = init_nr; 199 ctx.entry = entry;
200 ctx.max_stack = max_stack;
201 ctx.nr = entry->nr = init_nr;
202 ctx.contexts = 0;
203 ctx.contexts_maxed = false;
197 204
198 if (kernel && !user_mode(regs)) { 205 if (kernel && !user_mode(regs)) {
199 if (add_mark) 206 if (add_mark)
200 perf_callchain_store(entry, PERF_CONTEXT_KERNEL); 207 perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
201 perf_callchain_kernel(entry, regs); 208 perf_callchain_kernel(&ctx, regs);
202 } 209 }
203 210
204 if (user) { 211 if (user) {
@@ -214,8 +221,8 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
214 goto exit_put; 221 goto exit_put;
215 222
216 if (add_mark) 223 if (add_mark)
217 perf_callchain_store(entry, PERF_CONTEXT_USER); 224 perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
218 perf_callchain_user(entry, regs); 225 perf_callchain_user(&ctx, regs);
219 } 226 }
220 } 227 }
221 228
@@ -225,10 +232,15 @@ exit_put:
225 return entry; 232 return entry;
226} 233}
227 234
235/*
236 * Used for sysctl_perf_event_max_stack and
237 * sysctl_perf_event_max_contexts_per_stack.
238 */
228int perf_event_max_stack_handler(struct ctl_table *table, int write, 239int perf_event_max_stack_handler(struct ctl_table *table, int write,
229 void __user *buffer, size_t *lenp, loff_t *ppos) 240 void __user *buffer, size_t *lenp, loff_t *ppos)
230{ 241{
231 int new_value = sysctl_perf_event_max_stack, ret; 242 int *value = table->data;
243 int new_value = *value, ret;
232 struct ctl_table new_table = *table; 244 struct ctl_table new_table = *table;
233 245
234 new_table.data = &new_value; 246 new_table.data = &new_value;
@@ -240,7 +252,7 @@ int perf_event_max_stack_handler(struct ctl_table *table, int write,
240 if (atomic_read(&nr_callchain_events)) 252 if (atomic_read(&nr_callchain_events))
241 ret = -EBUSY; 253 ret = -EBUSY;
242 else 254 else
243 sysctl_perf_event_max_stack = new_value; 255 *value = new_value;
244 256
245 mutex_unlock(&callchain_mutex); 257 mutex_unlock(&callchain_mutex);
246 258
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 2effd84d83e3..87b2fc38398b 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1149,13 +1149,22 @@ static struct ctl_table kern_table[] = {
1149 }, 1149 },
1150 { 1150 {
1151 .procname = "perf_event_max_stack", 1151 .procname = "perf_event_max_stack",
1152 .data = NULL, /* filled in by handler */ 1152 .data = &sysctl_perf_event_max_stack,
1153 .maxlen = sizeof(sysctl_perf_event_max_stack), 1153 .maxlen = sizeof(sysctl_perf_event_max_stack),
1154 .mode = 0644, 1154 .mode = 0644,
1155 .proc_handler = perf_event_max_stack_handler, 1155 .proc_handler = perf_event_max_stack_handler,
1156 .extra1 = &zero, 1156 .extra1 = &zero,
1157 .extra2 = &six_hundred_forty_kb, 1157 .extra2 = &six_hundred_forty_kb,
1158 }, 1158 },
1159 {
1160 .procname = "perf_event_max_contexts_per_stack",
1161 .data = &sysctl_perf_event_max_contexts_per_stack,
1162 .maxlen = sizeof(sysctl_perf_event_max_contexts_per_stack),
1163 .mode = 0644,
1164 .proc_handler = perf_event_max_stack_handler,
1165 .extra1 = &zero,
1166 .extra2 = &one_thousand,
1167 },
1159#endif 1168#endif
1160#ifdef CONFIG_KMEMCHECK 1169#ifdef CONFIG_KMEMCHECK
1161 { 1170 {
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index ebaf849e30ef..9cbddc290aff 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -103,12 +103,13 @@ OPTIONS
103 103
104 If --branch-stack option is used, following sort keys are also 104 If --branch-stack option is used, following sort keys are also
105 available: 105 available:
106 dso_from, dso_to, symbol_from, symbol_to, mispredict.
107 106
108 - dso_from: name of library or module branched from 107 - dso_from: name of library or module branched from
109 - dso_to: name of library or module branched to 108 - dso_to: name of library or module branched to
110 - symbol_from: name of function branched from 109 - symbol_from: name of function branched from
111 - symbol_to: name of function branched to 110 - symbol_to: name of function branched to
111 - srcline_from: source file and line branched from
112 - srcline_to: source file and line branched to
112 - mispredict: "N" for predicted branch, "Y" for mispredicted branch 113 - mispredict: "N" for predicted branch, "Y" for mispredicted branch
113 - in_tx: branch in TSX transaction 114 - in_tx: branch in TSX transaction
114 - abort: TSX transaction abort. 115 - abort: TSX transaction abort.
@@ -248,7 +249,7 @@ OPTIONS
248 Note that when using the --itrace option the synthesized callchain size 249 Note that when using the --itrace option the synthesized callchain size
249 will override this value if the synthesized callchain size is bigger. 250 will override this value if the synthesized callchain size is bigger.
250 251
251 Default: /proc/sys/kernel/perf_event_max_stack when present, 127 otherwise. 252 Default: 127
252 253
253-G:: 254-G::
254--inverted:: 255--inverted::
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index a856a1095893..4fc44c75263f 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -267,7 +267,7 @@ include::itrace.txt[]
267 Note that when using the --itrace option the synthesized callchain size 267 Note that when using the --itrace option the synthesized callchain size
268 will override this value if the synthesized callchain size is bigger. 268 will override this value if the synthesized callchain size is bigger.
269 269
270 Default: /proc/sys/kernel/perf_event_max_stack when present, 127 otherwise. 270 Default: 127
271 271
272--ns:: 272--ns::
273 Use 9 decimal places when displaying time (i.e. show the nanoseconds) 273 Use 9 decimal places when displaying time (i.e. show the nanoseconds)
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 6afe20121bc0..1ab0782369b1 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -143,7 +143,8 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
143 Implies '--call-graph dwarf' when --call-graph not present on the 143 Implies '--call-graph dwarf' when --call-graph not present on the
144 command line, on systems where DWARF unwinding was built in. 144 command line, on systems where DWARF unwinding was built in.
145 145
146 Default: /proc/sys/kernel/perf_event_max_stack when present, 127 otherwise. 146 Default: /proc/sys/kernel/perf_event_max_stack when present for
147 live sessions (without --input/-i), 127 otherwise.
147 148
148--min-stack:: 149--min-stack::
149 Set the stack depth limit when parsing the callchain, anything 150 Set the stack depth limit when parsing the callchain, anything
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 814158393656..25c81734a950 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -324,8 +324,9 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
324 OPT_BOOLEAN(0, "skip-missing", &annotate.skip_missing, 324 OPT_BOOLEAN(0, "skip-missing", &annotate.skip_missing,
325 "Skip symbols that cannot be annotated"), 325 "Skip symbols that cannot be annotated"),
326 OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"), 326 OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"),
327 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", 327 OPT_CALLBACK(0, "symfs", NULL, "directory",
328 "Look for files with symbols relative to this directory"), 328 "Look for files with symbols relative to this directory",
329 symbol__config_symfs),
329 OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src, 330 OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
330 "Interleave source code with assembly code (default)"), 331 "Interleave source code with assembly code (default)"),
331 OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw, 332 OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw,
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index 632efc6b79a0..d75bded21fe0 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -119,8 +119,8 @@ static int build_id_cache__add_kcore(const char *filename, bool force)
119 if (build_id_cache__kcore_buildid(from_dir, sbuildid) < 0) 119 if (build_id_cache__kcore_buildid(from_dir, sbuildid) < 0)
120 return -1; 120 return -1;
121 121
122 scnprintf(to_dir, sizeof(to_dir), "%s/[kernel.kcore]/%s", 122 scnprintf(to_dir, sizeof(to_dir), "%s/%s/%s",
123 buildid_dir, sbuildid); 123 buildid_dir, DSO__NAME_KCORE, sbuildid);
124 124
125 if (!force && 125 if (!force &&
126 !build_id_cache__kcore_existing(from_dir, to_dir, sizeof(to_dir))) { 126 !build_id_cache__kcore_existing(from_dir, to_dir, sizeof(to_dir))) {
@@ -131,8 +131,8 @@ static int build_id_cache__add_kcore(const char *filename, bool force)
131 if (build_id_cache__kcore_dir(dir, sizeof(dir))) 131 if (build_id_cache__kcore_dir(dir, sizeof(dir)))
132 return -1; 132 return -1;
133 133
134 scnprintf(to_dir, sizeof(to_dir), "%s/[kernel.kcore]/%s/%s", 134 scnprintf(to_dir, sizeof(to_dir), "%s/%s/%s/%s",
135 buildid_dir, sbuildid, dir); 135 buildid_dir, DSO__NAME_KCORE, sbuildid, dir);
136 136
137 if (mkdir_p(to_dir, 0755)) 137 if (mkdir_p(to_dir, 0755))
138 return -1; 138 return -1;
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 9ce354f469dc..f7645a42708e 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -812,8 +812,9 @@ static const struct option options[] = {
812 OPT_STRING_NOEMPTY('t', "field-separator", &symbol_conf.field_sep, "separator", 812 OPT_STRING_NOEMPTY('t', "field-separator", &symbol_conf.field_sep, "separator",
813 "separator for columns, no spaces will be added between " 813 "separator for columns, no spaces will be added between "
814 "columns '.' is reserved."), 814 "columns '.' is reserved."),
815 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", 815 OPT_CALLBACK(0, "symfs", NULL, "directory",
816 "Look for files with symbols relative to this directory"), 816 "Look for files with symbols relative to this directory",
817 symbol__config_symfs),
817 OPT_UINTEGER('o', "order", &sort_compute, "Specify compute sorting."), 818 OPT_UINTEGER('o', "order", &sort_compute, "Specify compute sorting."),
818 OPT_CALLBACK(0, "percentage", NULL, "relative|absolute", 819 OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
819 "How to display percentage of filtered entries", parse_filter_percentage), 820 "How to display percentage of filtered entries", parse_filter_percentage),
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index f3679c44d3f3..dc3fcb597e4c 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -40,6 +40,7 @@
40#include <unistd.h> 40#include <unistd.h>
41#include <sched.h> 41#include <sched.h>
42#include <sys/mman.h> 42#include <sys/mman.h>
43#include <asm/bug.h>
43 44
44 45
45struct record { 46struct record {
@@ -82,27 +83,87 @@ static int process_synthesized_event(struct perf_tool *tool,
82 return record__write(rec, event, event->header.size); 83 return record__write(rec, event, event->header.size);
83} 84}
84 85
86static int
87backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
88{
89 struct perf_event_header *pheader;
90 u64 evt_head = head;
91 int size = mask + 1;
92
93 pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
94 pheader = (struct perf_event_header *)(buf + (head & mask));
95 *start = head;
96 while (true) {
97 if (evt_head - head >= (unsigned int)size) {
98 pr_debug("Finshed reading backward ring buffer: rewind\n");
99 if (evt_head - head > (unsigned int)size)
100 evt_head -= pheader->size;
101 *end = evt_head;
102 return 0;
103 }
104
105 pheader = (struct perf_event_header *)(buf + (evt_head & mask));
106
107 if (pheader->size == 0) {
108 pr_debug("Finshed reading backward ring buffer: get start\n");
109 *end = evt_head;
110 return 0;
111 }
112
113 evt_head += pheader->size;
114 pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
115 }
116 WARN_ONCE(1, "Shouldn't get here\n");
117 return -1;
118}
119
120static int
121rb_find_range(struct perf_evlist *evlist,
122 void *data, int mask, u64 head, u64 old,
123 u64 *start, u64 *end)
124{
125 if (!evlist->backward) {
126 *start = old;
127 *end = head;
128 return 0;
129 }
130
131 return backward_rb_find_range(data, mask, head, start, end);
132}
133
85static int record__mmap_read(struct record *rec, int idx) 134static int record__mmap_read(struct record *rec, int idx)
86{ 135{
87 struct perf_mmap *md = &rec->evlist->mmap[idx]; 136 struct perf_mmap *md = &rec->evlist->mmap[idx];
88 u64 head = perf_mmap__read_head(md); 137 u64 head = perf_mmap__read_head(md);
89 u64 old = md->prev; 138 u64 old = md->prev;
139 u64 end = head, start = old;
90 unsigned char *data = md->base + page_size; 140 unsigned char *data = md->base + page_size;
91 unsigned long size; 141 unsigned long size;
92 void *buf; 142 void *buf;
93 int rc = 0; 143 int rc = 0;
94 144
95 if (old == head) 145 if (rb_find_range(rec->evlist, data, md->mask, head,
146 old, &start, &end))
147 return -1;
148
149 if (start == end)
96 return 0; 150 return 0;
97 151
98 rec->samples++; 152 rec->samples++;
99 153
100 size = head - old; 154 size = end - start;
155 if (size > (unsigned long)(md->mask) + 1) {
156 WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
157
158 md->prev = head;
159 perf_evlist__mmap_consume(rec->evlist, idx);
160 return 0;
161 }
101 162
102 if ((old & md->mask) + size != (head & md->mask)) { 163 if ((start & md->mask) + size != (end & md->mask)) {
103 buf = &data[old & md->mask]; 164 buf = &data[start & md->mask];
104 size = md->mask + 1 - (old & md->mask); 165 size = md->mask + 1 - (start & md->mask);
105 old += size; 166 start += size;
106 167
107 if (record__write(rec, buf, size) < 0) { 168 if (record__write(rec, buf, size) < 0) {
108 rc = -1; 169 rc = -1;
@@ -110,16 +171,16 @@ static int record__mmap_read(struct record *rec, int idx)
110 } 171 }
111 } 172 }
112 173
113 buf = &data[old & md->mask]; 174 buf = &data[start & md->mask];
114 size = head - old; 175 size = end - start;
115 old += size; 176 start += size;
116 177
117 if (record__write(rec, buf, size) < 0) { 178 if (record__write(rec, buf, size) < 0) {
118 rc = -1; 179 rc = -1;
119 goto out; 180 goto out;
120 } 181 }
121 182
122 md->prev = old; 183 md->prev = head;
123 perf_evlist__mmap_consume(rec->evlist, idx); 184 perf_evlist__mmap_consume(rec->evlist, idx);
124out: 185out:
125 return rc; 186 return rc;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 87d40e3c4078..a87cb338bdf1 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -691,7 +691,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
691 .ordered_events = true, 691 .ordered_events = true,
692 .ordering_requires_timestamps = true, 692 .ordering_requires_timestamps = true,
693 }, 693 },
694 .max_stack = sysctl_perf_event_max_stack, 694 .max_stack = PERF_MAX_STACK_DEPTH,
695 .pretty_printing_style = "normal", 695 .pretty_printing_style = "normal",
696 .socket_filter = -1, 696 .socket_filter = -1,
697 }; 697 };
@@ -770,8 +770,9 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
770 "columns '.' is reserved."), 770 "columns '.' is reserved."),
771 OPT_BOOLEAN('U', "hide-unresolved", &symbol_conf.hide_unresolved, 771 OPT_BOOLEAN('U', "hide-unresolved", &symbol_conf.hide_unresolved,
772 "Only display entries resolved to a symbol"), 772 "Only display entries resolved to a symbol"),
773 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", 773 OPT_CALLBACK(0, "symfs", NULL, "directory",
774 "Look for files with symbols relative to this directory"), 774 "Look for files with symbols relative to this directory",
775 symbol__config_symfs),
775 OPT_STRING('C', "cpu", &report.cpu_list, "cpu", 776 OPT_STRING('C', "cpu", &report.cpu_list, "cpu",
776 "list of cpus to profile"), 777 "list of cpus to profile"),
777 OPT_BOOLEAN('I', "show-info", &report.show_full_info, 778 OPT_BOOLEAN('I', "show-info", &report.show_full_info,
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index efca81679bb3..e3ce2f34d3ad 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -2010,8 +2010,9 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
2010 "file", "kallsyms pathname"), 2010 "file", "kallsyms pathname"),
2011 OPT_BOOLEAN('G', "hide-call-graph", &no_callchain, 2011 OPT_BOOLEAN('G', "hide-call-graph", &no_callchain,
2012 "When printing symbols do not display call chain"), 2012 "When printing symbols do not display call chain"),
2013 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", 2013 OPT_CALLBACK(0, "symfs", NULL, "directory",
2014 "Look for files with symbols relative to this directory"), 2014 "Look for files with symbols relative to this directory",
2015 symbol__config_symfs),
2015 OPT_CALLBACK('F', "fields", NULL, "str", 2016 OPT_CALLBACK('F', "fields", NULL, "str",
2016 "comma separated output fields prepend with 'type:'. " 2017 "comma separated output fields prepend with 'type:'. "
2017 "Valid types: hw,sw,trace,raw. " 2018 "Valid types: hw,sw,trace,raw. "
@@ -2067,8 +2068,6 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
2067 NULL 2068 NULL
2068 }; 2069 };
2069 2070
2070 scripting_max_stack = sysctl_perf_event_max_stack;
2071
2072 setup_scripting(); 2071 setup_scripting();
2073 2072
2074 argc = parse_options_subcommand(argc, argv, options, script_subcommands, script_usage, 2073 argc = parse_options_subcommand(argc, argv, options, script_subcommands, script_usage,
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index e459b685a4e9..ee7ada78d86f 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -66,6 +66,7 @@
66#include <stdlib.h> 66#include <stdlib.h>
67#include <sys/prctl.h> 67#include <sys/prctl.h>
68#include <locale.h> 68#include <locale.h>
69#include <math.h>
69 70
70#define DEFAULT_SEPARATOR " " 71#define DEFAULT_SEPARATOR " "
71#define CNTR_NOT_SUPPORTED "<not supported>" 72#define CNTR_NOT_SUPPORTED "<not supported>"
@@ -991,12 +992,12 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
991 const char *fmt; 992 const char *fmt;
992 993
993 if (csv_output) { 994 if (csv_output) {
994 fmt = sc != 1.0 ? "%.2f%s" : "%.0f%s"; 995 fmt = floor(sc) != sc ? "%.2f%s" : "%.0f%s";
995 } else { 996 } else {
996 if (big_num) 997 if (big_num)
997 fmt = sc != 1.0 ? "%'18.2f%s" : "%'18.0f%s"; 998 fmt = floor(sc) != sc ? "%'18.2f%s" : "%'18.0f%s";
998 else 999 else
999 fmt = sc != 1.0 ? "%18.2f%s" : "%18.0f%s"; 1000 fmt = floor(sc) != sc ? "%18.2f%s" : "%18.0f%s";
1000 } 1001 }
1001 1002
1002 aggr_printout(evsel, id, nr); 1003 aggr_printout(evsel, id, nr);
@@ -1909,6 +1910,9 @@ static int add_default_attributes(void)
1909 } 1910 }
1910 1911
1911 if (!evsel_list->nr_entries) { 1912 if (!evsel_list->nr_entries) {
1913 if (target__has_cpu(&target))
1914 default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
1915
1912 if (perf_evlist__add_default_attrs(evsel_list, default_attrs0) < 0) 1916 if (perf_evlist__add_default_attrs(evsel_list, default_attrs0) < 0)
1913 return -1; 1917 return -1;
1914 if (pmu_have_event("cpu", "stalled-cycles-frontend")) { 1918 if (pmu_have_event("cpu", "stalled-cycles-frontend")) {
@@ -2000,7 +2004,7 @@ static int process_stat_round_event(struct perf_tool *tool __maybe_unused,
2000 union perf_event *event, 2004 union perf_event *event,
2001 struct perf_session *session) 2005 struct perf_session *session)
2002{ 2006{
2003 struct stat_round_event *round = &event->stat_round; 2007 struct stat_round_event *stat_round = &event->stat_round;
2004 struct perf_evsel *counter; 2008 struct perf_evsel *counter;
2005 struct timespec tsh, *ts = NULL; 2009 struct timespec tsh, *ts = NULL;
2006 const char **argv = session->header.env.cmdline_argv; 2010 const char **argv = session->header.env.cmdline_argv;
@@ -2009,12 +2013,12 @@ static int process_stat_round_event(struct perf_tool *tool __maybe_unused,
2009 evlist__for_each(evsel_list, counter) 2013 evlist__for_each(evsel_list, counter)
2010 perf_stat_process_counter(&stat_config, counter); 2014 perf_stat_process_counter(&stat_config, counter);
2011 2015
2012 if (round->type == PERF_STAT_ROUND_TYPE__FINAL) 2016 if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL)
2013 update_stats(&walltime_nsecs_stats, round->time); 2017 update_stats(&walltime_nsecs_stats, stat_round->time);
2014 2018
2015 if (stat_config.interval && round->time) { 2019 if (stat_config.interval && stat_round->time) {
2016 tsh.tv_sec = round->time / NSECS_PER_SEC; 2020 tsh.tv_sec = stat_round->time / NSECS_PER_SEC;
2017 tsh.tv_nsec = round->time % NSECS_PER_SEC; 2021 tsh.tv_nsec = stat_round->time % NSECS_PER_SEC;
2018 ts = &tsh; 2022 ts = &tsh;
2019 } 2023 }
2020 2024
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 40cc9bb3506c..733a55422d03 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -1945,8 +1945,9 @@ int cmd_timechart(int argc, const char **argv,
1945 OPT_CALLBACK('p', "process", NULL, "process", 1945 OPT_CALLBACK('p', "process", NULL, "process",
1946 "process selector. Pass a pid or process name.", 1946 "process selector. Pass a pid or process name.",
1947 parse_process), 1947 parse_process),
1948 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", 1948 OPT_CALLBACK(0, "symfs", NULL, "directory",
1949 "Look for files with symbols relative to this directory"), 1949 "Look for files with symbols relative to this directory",
1950 symbol__config_symfs),
1950 OPT_INTEGER('n', "proc-num", &tchart.proc_num, 1951 OPT_INTEGER('n', "proc-num", &tchart.proc_num,
1951 "min. number of tasks to print"), 1952 "min. number of tasks to print"),
1952 OPT_BOOLEAN('t', "topology", &tchart.topology, 1953 OPT_BOOLEAN('t', "topology", &tchart.topology,
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 1793da585676..2a6cc254ad0c 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -732,7 +732,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
732 if (machine__resolve(machine, &al, sample) < 0) 732 if (machine__resolve(machine, &al, sample) < 0)
733 return; 733 return;
734 734
735 if (!top->kptr_restrict_warned && 735 if (!machine->kptr_restrict_warned &&
736 symbol_conf.kptr_restrict && 736 symbol_conf.kptr_restrict &&
737 al.cpumode == PERF_RECORD_MISC_KERNEL) { 737 al.cpumode == PERF_RECORD_MISC_KERNEL) {
738 ui__warning( 738 ui__warning(
@@ -743,7 +743,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
743 " modules" : ""); 743 " modules" : "");
744 if (use_browser <= 0) 744 if (use_browser <= 0)
745 sleep(5); 745 sleep(5);
746 top->kptr_restrict_warned = true; 746 machine->kptr_restrict_warned = true;
747 } 747 }
748 748
749 if (al.sym == NULL) { 749 if (al.sym == NULL) {
@@ -759,7 +759,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
759 * --hide-kernel-symbols, even if the user specifies an 759 * --hide-kernel-symbols, even if the user specifies an
760 * invalid --vmlinux ;-) 760 * invalid --vmlinux ;-)
761 */ 761 */
762 if (!top->kptr_restrict_warned && !top->vmlinux_warned && 762 if (!machine->kptr_restrict_warned && !top->vmlinux_warned &&
763 al.map == machine->vmlinux_maps[MAP__FUNCTION] && 763 al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
764 RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) { 764 RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
765 if (symbol_conf.vmlinux_name) { 765 if (symbol_conf.vmlinux_name) {
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 6e5c325148e4..5c50fe70d6b3 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -576,84 +576,54 @@ static struct syscall_fmt {
576 bool hexret; 576 bool hexret;
577} syscall_fmts[] = { 577} syscall_fmts[] = {
578 { .name = "access", .errmsg = true, 578 { .name = "access", .errmsg = true,
579 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ 579 .arg_scnprintf = { [1] = SCA_ACCMODE, /* mode */ }, },
580 [1] = SCA_ACCMODE, /* mode */ }, },
581 { .name = "arch_prctl", .errmsg = true, .alias = "prctl", }, 580 { .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
582 { .name = "bpf", .errmsg = true, STRARRAY(0, cmd, bpf_cmd), }, 581 { .name = "bpf", .errmsg = true, STRARRAY(0, cmd, bpf_cmd), },
583 { .name = "brk", .hexret = true, 582 { .name = "brk", .hexret = true,
584 .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, }, 583 .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
585 { .name = "chdir", .errmsg = true, 584 { .name = "chdir", .errmsg = true, },
586 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, }, 585 { .name = "chmod", .errmsg = true, },
587 { .name = "chmod", .errmsg = true, 586 { .name = "chroot", .errmsg = true, },
588 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
589 { .name = "chroot", .errmsg = true,
590 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
591 { .name = "clock_gettime", .errmsg = true, STRARRAY(0, clk_id, clockid), }, 587 { .name = "clock_gettime", .errmsg = true, STRARRAY(0, clk_id, clockid), },
592 { .name = "clone", .errpid = true, }, 588 { .name = "clone", .errpid = true, },
593 { .name = "close", .errmsg = true, 589 { .name = "close", .errmsg = true,
594 .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, }, 590 .arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, },
595 { .name = "connect", .errmsg = true, }, 591 { .name = "connect", .errmsg = true, },
596 { .name = "creat", .errmsg = true, 592 { .name = "creat", .errmsg = true, },
597 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, }, 593 { .name = "dup", .errmsg = true, },
598 { .name = "dup", .errmsg = true, 594 { .name = "dup2", .errmsg = true, },
599 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 595 { .name = "dup3", .errmsg = true, },
600 { .name = "dup2", .errmsg = true,
601 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
602 { .name = "dup3", .errmsg = true,
603 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
604 { .name = "epoll_ctl", .errmsg = true, STRARRAY(1, op, epoll_ctl_ops), }, 596 { .name = "epoll_ctl", .errmsg = true, STRARRAY(1, op, epoll_ctl_ops), },
605 { .name = "eventfd2", .errmsg = true, 597 { .name = "eventfd2", .errmsg = true,
606 .arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, }, 598 .arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, },
607 { .name = "faccessat", .errmsg = true, 599 { .name = "faccessat", .errmsg = true, },
608 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ 600 { .name = "fadvise64", .errmsg = true, },
609 [1] = SCA_FILENAME, /* filename */ }, }, 601 { .name = "fallocate", .errmsg = true, },
610 { .name = "fadvise64", .errmsg = true, 602 { .name = "fchdir", .errmsg = true, },
611 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 603 { .name = "fchmod", .errmsg = true, },
612 { .name = "fallocate", .errmsg = true,
613 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
614 { .name = "fchdir", .errmsg = true,
615 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
616 { .name = "fchmod", .errmsg = true,
617 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
618 { .name = "fchmodat", .errmsg = true, 604 { .name = "fchmodat", .errmsg = true,
619 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ 605 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
620 [1] = SCA_FILENAME, /* filename */ }, }, 606 { .name = "fchown", .errmsg = true, },
621 { .name = "fchown", .errmsg = true,
622 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
623 { .name = "fchownat", .errmsg = true, 607 { .name = "fchownat", .errmsg = true,
624 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ 608 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
625 [1] = SCA_FILENAME, /* filename */ }, },
626 { .name = "fcntl", .errmsg = true, 609 { .name = "fcntl", .errmsg = true,
627 .arg_scnprintf = { [0] = SCA_FD, /* fd */ 610 .arg_scnprintf = { [1] = SCA_STRARRAY, /* cmd */ },
628 [1] = SCA_STRARRAY, /* cmd */ },
629 .arg_parm = { [1] = &strarray__fcntl_cmds, /* cmd */ }, }, 611 .arg_parm = { [1] = &strarray__fcntl_cmds, /* cmd */ }, },
630 { .name = "fdatasync", .errmsg = true, 612 { .name = "fdatasync", .errmsg = true, },
631 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
632 { .name = "flock", .errmsg = true, 613 { .name = "flock", .errmsg = true,
633 .arg_scnprintf = { [0] = SCA_FD, /* fd */ 614 .arg_scnprintf = { [1] = SCA_FLOCK, /* cmd */ }, },
634 [1] = SCA_FLOCK, /* cmd */ }, }, 615 { .name = "fsetxattr", .errmsg = true, },
635 { .name = "fsetxattr", .errmsg = true, 616 { .name = "fstat", .errmsg = true, .alias = "newfstat", },
636 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 617 { .name = "fstatat", .errmsg = true, .alias = "newfstatat", },
637 { .name = "fstat", .errmsg = true, .alias = "newfstat", 618 { .name = "fstatfs", .errmsg = true, },
638 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 619 { .name = "fsync", .errmsg = true, },
639 { .name = "fstatat", .errmsg = true, .alias = "newfstatat", 620 { .name = "ftruncate", .errmsg = true, },
640 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
641 [1] = SCA_FILENAME, /* filename */ }, },
642 { .name = "fstatfs", .errmsg = true,
643 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
644 { .name = "fsync", .errmsg = true,
645 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
646 { .name = "ftruncate", .errmsg = true,
647 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
648 { .name = "futex", .errmsg = true, 621 { .name = "futex", .errmsg = true,
649 .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, }, 622 .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, },
650 { .name = "futimesat", .errmsg = true, 623 { .name = "futimesat", .errmsg = true,
651 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ 624 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
652 [1] = SCA_FILENAME, /* filename */ }, }, 625 { .name = "getdents", .errmsg = true, },
653 { .name = "getdents", .errmsg = true, 626 { .name = "getdents64", .errmsg = true, },
654 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
655 { .name = "getdents64", .errmsg = true,
656 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
657 { .name = "getitimer", .errmsg = true, STRARRAY(0, which, itimers), }, 627 { .name = "getitimer", .errmsg = true, STRARRAY(0, which, itimers), },
658 { .name = "getpid", .errpid = true, }, 628 { .name = "getpid", .errpid = true, },
659 { .name = "getpgid", .errpid = true, }, 629 { .name = "getpgid", .errpid = true, },
@@ -661,12 +631,10 @@ static struct syscall_fmt {
661 { .name = "getrandom", .errmsg = true, 631 { .name = "getrandom", .errmsg = true,
662 .arg_scnprintf = { [2] = SCA_GETRANDOM_FLAGS, /* flags */ }, }, 632 .arg_scnprintf = { [2] = SCA_GETRANDOM_FLAGS, /* flags */ }, },
663 { .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), }, 633 { .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
664 { .name = "getxattr", .errmsg = true, 634 { .name = "getxattr", .errmsg = true, },
665 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, }, 635 { .name = "inotify_add_watch", .errmsg = true, },
666 { .name = "inotify_add_watch", .errmsg = true,
667 .arg_scnprintf = { [1] = SCA_FILENAME, /* pathname */ }, },
668 { .name = "ioctl", .errmsg = true, 636 { .name = "ioctl", .errmsg = true,
669 .arg_scnprintf = { [0] = SCA_FD, /* fd */ 637 .arg_scnprintf = {
670#if defined(__i386__) || defined(__x86_64__) 638#if defined(__i386__) || defined(__x86_64__)
671/* 639/*
672 * FIXME: Make this available to all arches. 640 * FIXME: Make this available to all arches.
@@ -680,41 +648,28 @@ static struct syscall_fmt {
680 { .name = "keyctl", .errmsg = true, STRARRAY(0, option, keyctl_options), }, 648 { .name = "keyctl", .errmsg = true, STRARRAY(0, option, keyctl_options), },
681 { .name = "kill", .errmsg = true, 649 { .name = "kill", .errmsg = true,
682 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, }, 650 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
683 { .name = "lchown", .errmsg = true, 651 { .name = "lchown", .errmsg = true, },
684 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, }, 652 { .name = "lgetxattr", .errmsg = true, },
685 { .name = "lgetxattr", .errmsg = true,
686 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
687 { .name = "linkat", .errmsg = true, 653 { .name = "linkat", .errmsg = true,
688 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, }, 654 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
689 { .name = "listxattr", .errmsg = true, 655 { .name = "listxattr", .errmsg = true, },
690 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, }, 656 { .name = "llistxattr", .errmsg = true, },
691 { .name = "llistxattr", .errmsg = true, 657 { .name = "lremovexattr", .errmsg = true, },
692 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
693 { .name = "lremovexattr", .errmsg = true,
694 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
695 { .name = "lseek", .errmsg = true, 658 { .name = "lseek", .errmsg = true,
696 .arg_scnprintf = { [0] = SCA_FD, /* fd */ 659 .arg_scnprintf = { [2] = SCA_STRARRAY, /* whence */ },
697 [2] = SCA_STRARRAY, /* whence */ },
698 .arg_parm = { [2] = &strarray__whences, /* whence */ }, }, 660 .arg_parm = { [2] = &strarray__whences, /* whence */ }, },
699 { .name = "lsetxattr", .errmsg = true, 661 { .name = "lsetxattr", .errmsg = true, },
700 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, }, 662 { .name = "lstat", .errmsg = true, .alias = "newlstat", },
701 { .name = "lstat", .errmsg = true, .alias = "newlstat", 663 { .name = "lsxattr", .errmsg = true, },
702 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
703 { .name = "lsxattr", .errmsg = true,
704 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
705 { .name = "madvise", .errmsg = true, 664 { .name = "madvise", .errmsg = true,
706 .arg_scnprintf = { [0] = SCA_HEX, /* start */ 665 .arg_scnprintf = { [0] = SCA_HEX, /* start */
707 [2] = SCA_MADV_BHV, /* behavior */ }, }, 666 [2] = SCA_MADV_BHV, /* behavior */ }, },
708 { .name = "mkdir", .errmsg = true, 667 { .name = "mkdir", .errmsg = true, },
709 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
710 { .name = "mkdirat", .errmsg = true, 668 { .name = "mkdirat", .errmsg = true,
711 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ 669 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
712 [1] = SCA_FILENAME, /* pathname */ }, }, 670 { .name = "mknod", .errmsg = true, },
713 { .name = "mknod", .errmsg = true,
714 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
715 { .name = "mknodat", .errmsg = true, 671 { .name = "mknodat", .errmsg = true,
716 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ 672 .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
717 [1] = SCA_FILENAME, /* filename */ }, },
718 { .name = "mlock", .errmsg = true, 673 { .name = "mlock", .errmsg = true,
719 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, }, 674 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
720 { .name = "mlockall", .errmsg = true, 675 { .name = "mlockall", .errmsg = true,
@@ -722,8 +677,7 @@ static struct syscall_fmt {
722 { .name = "mmap", .hexret = true, 677 { .name = "mmap", .hexret = true,
723 .arg_scnprintf = { [0] = SCA_HEX, /* addr */ 678 .arg_scnprintf = { [0] = SCA_HEX, /* addr */
724 [2] = SCA_MMAP_PROT, /* prot */ 679 [2] = SCA_MMAP_PROT, /* prot */
725 [3] = SCA_MMAP_FLAGS, /* flags */ 680 [3] = SCA_MMAP_FLAGS, /* flags */ }, },
726 [4] = SCA_FD, /* fd */ }, },
727 { .name = "mprotect", .errmsg = true, 681 { .name = "mprotect", .errmsg = true,
728 .arg_scnprintf = { [0] = SCA_HEX, /* start */ 682 .arg_scnprintf = { [0] = SCA_HEX, /* start */
729 [2] = SCA_MMAP_PROT, /* prot */ }, }, 683 [2] = SCA_MMAP_PROT, /* prot */ }, },
@@ -740,17 +694,14 @@ static struct syscall_fmt {
740 { .name = "name_to_handle_at", .errmsg = true, 694 { .name = "name_to_handle_at", .errmsg = true,
741 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 695 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
742 { .name = "newfstatat", .errmsg = true, 696 { .name = "newfstatat", .errmsg = true,
743 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ 697 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
744 [1] = SCA_FILENAME, /* filename */ }, },
745 { .name = "open", .errmsg = true, 698 { .name = "open", .errmsg = true,
746 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ 699 .arg_scnprintf = { [1] = SCA_OPEN_FLAGS, /* flags */ }, },
747 [1] = SCA_OPEN_FLAGS, /* flags */ }, },
748 { .name = "open_by_handle_at", .errmsg = true, 700 { .name = "open_by_handle_at", .errmsg = true,
749 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ 701 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
750 [2] = SCA_OPEN_FLAGS, /* flags */ }, }, 702 [2] = SCA_OPEN_FLAGS, /* flags */ }, },
751 { .name = "openat", .errmsg = true, 703 { .name = "openat", .errmsg = true,
752 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ 704 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
753 [1] = SCA_FILENAME, /* filename */
754 [2] = SCA_OPEN_FLAGS, /* flags */ }, }, 705 [2] = SCA_OPEN_FLAGS, /* flags */ }, },
755 { .name = "perf_event_open", .errmsg = true, 706 { .name = "perf_event_open", .errmsg = true,
756 .arg_scnprintf = { [2] = SCA_INT, /* cpu */ 707 .arg_scnprintf = { [2] = SCA_INT, /* cpu */
@@ -760,39 +711,26 @@ static struct syscall_fmt {
760 .arg_scnprintf = { [1] = SCA_PIPE_FLAGS, /* flags */ }, }, 711 .arg_scnprintf = { [1] = SCA_PIPE_FLAGS, /* flags */ }, },
761 { .name = "poll", .errmsg = true, .timeout = true, }, 712 { .name = "poll", .errmsg = true, .timeout = true, },
762 { .name = "ppoll", .errmsg = true, .timeout = true, }, 713 { .name = "ppoll", .errmsg = true, .timeout = true, },
763 { .name = "pread", .errmsg = true, .alias = "pread64", 714 { .name = "pread", .errmsg = true, .alias = "pread64", },
764 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 715 { .name = "preadv", .errmsg = true, .alias = "pread", },
765 { .name = "preadv", .errmsg = true, .alias = "pread",
766 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
767 { .name = "prlimit64", .errmsg = true, STRARRAY(1, resource, rlimit_resources), }, 716 { .name = "prlimit64", .errmsg = true, STRARRAY(1, resource, rlimit_resources), },
768 { .name = "pwrite", .errmsg = true, .alias = "pwrite64", 717 { .name = "pwrite", .errmsg = true, .alias = "pwrite64", },
769 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 718 { .name = "pwritev", .errmsg = true, },
770 { .name = "pwritev", .errmsg = true, 719 { .name = "read", .errmsg = true, },
771 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 720 { .name = "readlink", .errmsg = true, },
772 { .name = "read", .errmsg = true,
773 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
774 { .name = "readlink", .errmsg = true,
775 .arg_scnprintf = { [0] = SCA_FILENAME, /* path */ }, },
776 { .name = "readlinkat", .errmsg = true, 721 { .name = "readlinkat", .errmsg = true,
777 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ 722 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
778 [1] = SCA_FILENAME, /* pathname */ }, }, 723 { .name = "readv", .errmsg = true, },
779 { .name = "readv", .errmsg = true,
780 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
781 { .name = "recvfrom", .errmsg = true, 724 { .name = "recvfrom", .errmsg = true,
782 .arg_scnprintf = { [0] = SCA_FD, /* fd */ 725 .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
783 [3] = SCA_MSG_FLAGS, /* flags */ }, },
784 { .name = "recvmmsg", .errmsg = true, 726 { .name = "recvmmsg", .errmsg = true,
785 .arg_scnprintf = { [0] = SCA_FD, /* fd */ 727 .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
786 [3] = SCA_MSG_FLAGS, /* flags */ }, },
787 { .name = "recvmsg", .errmsg = true, 728 { .name = "recvmsg", .errmsg = true,
788 .arg_scnprintf = { [0] = SCA_FD, /* fd */ 729 .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
789 [2] = SCA_MSG_FLAGS, /* flags */ }, }, 730 { .name = "removexattr", .errmsg = true, },
790 { .name = "removexattr", .errmsg = true,
791 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
792 { .name = "renameat", .errmsg = true, 731 { .name = "renameat", .errmsg = true,
793 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, }, 732 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
794 { .name = "rmdir", .errmsg = true, 733 { .name = "rmdir", .errmsg = true, },
795 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
796 { .name = "rt_sigaction", .errmsg = true, 734 { .name = "rt_sigaction", .errmsg = true,
797 .arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, }, 735 .arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, },
798 { .name = "rt_sigprocmask", .errmsg = true, STRARRAY(0, how, sighow), }, 736 { .name = "rt_sigprocmask", .errmsg = true, STRARRAY(0, how, sighow), },
@@ -807,22 +745,17 @@ static struct syscall_fmt {
807 [1] = SCA_SECCOMP_FLAGS, /* flags */ }, }, 745 [1] = SCA_SECCOMP_FLAGS, /* flags */ }, },
808 { .name = "select", .errmsg = true, .timeout = true, }, 746 { .name = "select", .errmsg = true, .timeout = true, },
809 { .name = "sendmmsg", .errmsg = true, 747 { .name = "sendmmsg", .errmsg = true,
810 .arg_scnprintf = { [0] = SCA_FD, /* fd */ 748 .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
811 [3] = SCA_MSG_FLAGS, /* flags */ }, },
812 { .name = "sendmsg", .errmsg = true, 749 { .name = "sendmsg", .errmsg = true,
813 .arg_scnprintf = { [0] = SCA_FD, /* fd */ 750 .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
814 [2] = SCA_MSG_FLAGS, /* flags */ }, },
815 { .name = "sendto", .errmsg = true, 751 { .name = "sendto", .errmsg = true,
816 .arg_scnprintf = { [0] = SCA_FD, /* fd */ 752 .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
817 [3] = SCA_MSG_FLAGS, /* flags */ }, },
818 { .name = "set_tid_address", .errpid = true, }, 753 { .name = "set_tid_address", .errpid = true, },
819 { .name = "setitimer", .errmsg = true, STRARRAY(0, which, itimers), }, 754 { .name = "setitimer", .errmsg = true, STRARRAY(0, which, itimers), },
820 { .name = "setpgid", .errmsg = true, }, 755 { .name = "setpgid", .errmsg = true, },
821 { .name = "setrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), }, 756 { .name = "setrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
822 { .name = "setxattr", .errmsg = true, 757 { .name = "setxattr", .errmsg = true, },
823 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, }, 758 { .name = "shutdown", .errmsg = true, },
824 { .name = "shutdown", .errmsg = true,
825 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
826 { .name = "socket", .errmsg = true, 759 { .name = "socket", .errmsg = true,
827 .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */ 760 .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
828 [1] = SCA_SK_TYPE, /* type */ }, 761 [1] = SCA_SK_TYPE, /* type */ },
@@ -831,10 +764,8 @@ static struct syscall_fmt {
831 .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */ 764 .arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
832 [1] = SCA_SK_TYPE, /* type */ }, 765 [1] = SCA_SK_TYPE, /* type */ },
833 .arg_parm = { [0] = &strarray__socket_families, /* family */ }, }, 766 .arg_parm = { [0] = &strarray__socket_families, /* family */ }, },
834 { .name = "stat", .errmsg = true, .alias = "newstat", 767 { .name = "stat", .errmsg = true, .alias = "newstat", },
835 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, }, 768 { .name = "statfs", .errmsg = true, },
836 { .name = "statfs", .errmsg = true,
837 .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
838 { .name = "swapoff", .errmsg = true, 769 { .name = "swapoff", .errmsg = true,
839 .arg_scnprintf = { [0] = SCA_FILENAME, /* specialfile */ }, }, 770 .arg_scnprintf = { [0] = SCA_FILENAME, /* specialfile */ }, },
840 { .name = "swapon", .errmsg = true, 771 { .name = "swapon", .errmsg = true,
@@ -845,29 +776,21 @@ static struct syscall_fmt {
845 .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, }, 776 .arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
846 { .name = "tkill", .errmsg = true, 777 { .name = "tkill", .errmsg = true,
847 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, }, 778 .arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
848 { .name = "truncate", .errmsg = true, 779 { .name = "truncate", .errmsg = true, },
849 .arg_scnprintf = { [0] = SCA_FILENAME, /* path */ }, },
850 { .name = "uname", .errmsg = true, .alias = "newuname", }, 780 { .name = "uname", .errmsg = true, .alias = "newuname", },
851 { .name = "unlinkat", .errmsg = true, 781 { .name = "unlinkat", .errmsg = true,
852 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ 782 .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
853 [1] = SCA_FILENAME, /* pathname */ }, }, 783 { .name = "utime", .errmsg = true, },
854 { .name = "utime", .errmsg = true,
855 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
856 { .name = "utimensat", .errmsg = true, 784 { .name = "utimensat", .errmsg = true,
857 .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */ 785 .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */ }, },
858 [1] = SCA_FILENAME, /* filename */ }, }, 786 { .name = "utimes", .errmsg = true, },
859 { .name = "utimes", .errmsg = true, 787 { .name = "vmsplice", .errmsg = true, },
860 .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
861 { .name = "vmsplice", .errmsg = true,
862 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
863 { .name = "wait4", .errpid = true, 788 { .name = "wait4", .errpid = true,
864 .arg_scnprintf = { [2] = SCA_WAITID_OPTIONS, /* options */ }, }, 789 .arg_scnprintf = { [2] = SCA_WAITID_OPTIONS, /* options */ }, },
865 { .name = "waitid", .errpid = true, 790 { .name = "waitid", .errpid = true,
866 .arg_scnprintf = { [3] = SCA_WAITID_OPTIONS, /* options */ }, }, 791 .arg_scnprintf = { [3] = SCA_WAITID_OPTIONS, /* options */ }, },
867 { .name = "write", .errmsg = true, 792 { .name = "write", .errmsg = true, },
868 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, }, 793 { .name = "writev", .errmsg = true, },
869 { .name = "writev", .errmsg = true,
870 .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
871}; 794};
872 795
873static int syscall_fmt__cmp(const void *name, const void *fmtp) 796static int syscall_fmt__cmp(const void *name, const void *fmtp)
@@ -1160,6 +1083,24 @@ static int trace__tool_process(struct perf_tool *tool,
1160 return trace__process_event(trace, machine, event, sample); 1083 return trace__process_event(trace, machine, event, sample);
1161} 1084}
1162 1085
1086static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
1087{
1088 struct machine *machine = vmachine;
1089
1090 if (machine->kptr_restrict_warned)
1091 return NULL;
1092
1093 if (symbol_conf.kptr_restrict) {
1094 pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
1095 "Check /proc/sys/kernel/kptr_restrict.\n\n"
1096 "Kernel samples will not be resolved.\n");
1097 machine->kptr_restrict_warned = true;
1098 return NULL;
1099 }
1100
1101 return machine__resolve_kernel_addr(vmachine, addrp, modp);
1102}
1103
1163static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist) 1104static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
1164{ 1105{
1165 int err = symbol__init(NULL); 1106 int err = symbol__init(NULL);
@@ -1171,7 +1112,7 @@ static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
1171 if (trace->host == NULL) 1112 if (trace->host == NULL)
1172 return -ENOMEM; 1113 return -ENOMEM;
1173 1114
1174 if (trace_event__register_resolver(trace->host, machine__resolve_kernel_addr) < 0) 1115 if (trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr) < 0)
1175 return -errno; 1116 return -errno;
1176 1117
1177 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target, 1118 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
@@ -1186,7 +1127,7 @@ static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
1186static int syscall__set_arg_fmts(struct syscall *sc) 1127static int syscall__set_arg_fmts(struct syscall *sc)
1187{ 1128{
1188 struct format_field *field; 1129 struct format_field *field;
1189 int idx = 0; 1130 int idx = 0, len;
1190 1131
1191 sc->arg_scnprintf = calloc(sc->nr_args, sizeof(void *)); 1132 sc->arg_scnprintf = calloc(sc->nr_args, sizeof(void *));
1192 if (sc->arg_scnprintf == NULL) 1133 if (sc->arg_scnprintf == NULL)
@@ -1198,12 +1139,31 @@ static int syscall__set_arg_fmts(struct syscall *sc)
1198 for (field = sc->args; field; field = field->next) { 1139 for (field = sc->args; field; field = field->next) {
1199 if (sc->fmt && sc->fmt->arg_scnprintf[idx]) 1140 if (sc->fmt && sc->fmt->arg_scnprintf[idx])
1200 sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx]; 1141 sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx];
1142 else if (strcmp(field->type, "const char *") == 0 &&
1143 (strcmp(field->name, "filename") == 0 ||
1144 strcmp(field->name, "path") == 0 ||
1145 strcmp(field->name, "pathname") == 0))
1146 sc->arg_scnprintf[idx] = SCA_FILENAME;
1201 else if (field->flags & FIELD_IS_POINTER) 1147 else if (field->flags & FIELD_IS_POINTER)
1202 sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex; 1148 sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex;
1203 else if (strcmp(field->type, "pid_t") == 0) 1149 else if (strcmp(field->type, "pid_t") == 0)
1204 sc->arg_scnprintf[idx] = SCA_PID; 1150 sc->arg_scnprintf[idx] = SCA_PID;
1205 else if (strcmp(field->type, "umode_t") == 0) 1151 else if (strcmp(field->type, "umode_t") == 0)
1206 sc->arg_scnprintf[idx] = SCA_MODE_T; 1152 sc->arg_scnprintf[idx] = SCA_MODE_T;
1153 else if ((strcmp(field->type, "int") == 0 ||
1154 strcmp(field->type, "unsigned int") == 0 ||
1155 strcmp(field->type, "long") == 0) &&
1156 (len = strlen(field->name)) >= 2 &&
1157 strcmp(field->name + len - 2, "fd") == 0) {
1158 /*
1159 * /sys/kernel/tracing/events/syscalls/sys_enter*
1160 * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
1161 * 65 int
1162 * 23 unsigned int
1163 * 7 unsigned long
1164 */
1165 sc->arg_scnprintf[idx] = SCA_FD;
1166 }
1207 ++idx; 1167 ++idx;
1208 } 1168 }
1209 1169
@@ -1534,7 +1494,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
1534 if (sc->is_exit) { 1494 if (sc->is_exit) {
1535 if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) { 1495 if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) {
1536 trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output); 1496 trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
1537 fprintf(trace->output, "%-70s\n", ttrace->entry_str); 1497 fprintf(trace->output, "%-70s)\n", ttrace->entry_str);
1538 } 1498 }
1539 } else { 1499 } else {
1540 ttrace->entry_pending = true; 1500 ttrace->entry_pending = true;
@@ -2887,12 +2847,12 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
2887 mmap_pages_user_set = false; 2847 mmap_pages_user_set = false;
2888 2848
2889 if (trace.max_stack == UINT_MAX) { 2849 if (trace.max_stack == UINT_MAX) {
2890 trace.max_stack = sysctl_perf_event_max_stack; 2850 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl_perf_event_max_stack;
2891 max_stack_user_set = false; 2851 max_stack_user_set = false;
2892 } 2852 }
2893 2853
2894#ifdef HAVE_DWARF_UNWIND_SUPPORT 2854#ifdef HAVE_DWARF_UNWIND_SUPPORT
2895 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) 2855 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled && trace.trace_syscalls)
2896 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false); 2856 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
2897#endif 2857#endif
2898 2858
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 797000842d40..15982cee5ef3 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -549,6 +549,9 @@ int main(int argc, const char **argv)
549 if (sysctl__read_int("kernel/perf_event_max_stack", &value) == 0) 549 if (sysctl__read_int("kernel/perf_event_max_stack", &value) == 0)
550 sysctl_perf_event_max_stack = value; 550 sysctl_perf_event_max_stack = value;
551 551
552 if (sysctl__read_int("kernel/perf_event_max_contexts_per_stack", &value) == 0)
553 sysctl_perf_event_max_contexts_per_stack = value;
554
552 cmd = extract_argv0_path(argv[0]); 555 cmd = extract_argv0_path(argv[0]);
553 if (!cmd) 556 if (!cmd)
554 cmd = "perf-help"; 557 cmd = "perf-help";
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 4db73d5a0dbc..7e5a1e8874ce 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -354,9 +354,6 @@ static struct ins_ops nop_ops = {
354 .scnprintf = nop__scnprintf, 354 .scnprintf = nop__scnprintf,
355}; 355};
356 356
357/*
358 * Must be sorted by name!
359 */
360static struct ins instructions[] = { 357static struct ins instructions[] = {
361 { .name = "add", .ops = &mov_ops, }, 358 { .name = "add", .ops = &mov_ops, },
362 { .name = "addl", .ops = &mov_ops, }, 359 { .name = "addl", .ops = &mov_ops, },
@@ -372,8 +369,8 @@ static struct ins instructions[] = {
372 { .name = "bgt", .ops = &jump_ops, }, 369 { .name = "bgt", .ops = &jump_ops, },
373 { .name = "bhi", .ops = &jump_ops, }, 370 { .name = "bhi", .ops = &jump_ops, },
374 { .name = "bl", .ops = &call_ops, }, 371 { .name = "bl", .ops = &call_ops, },
375 { .name = "blt", .ops = &jump_ops, },
376 { .name = "bls", .ops = &jump_ops, }, 372 { .name = "bls", .ops = &jump_ops, },
373 { .name = "blt", .ops = &jump_ops, },
377 { .name = "blx", .ops = &call_ops, }, 374 { .name = "blx", .ops = &call_ops, },
378 { .name = "bne", .ops = &jump_ops, }, 375 { .name = "bne", .ops = &jump_ops, },
379#endif 376#endif
@@ -449,18 +446,39 @@ static struct ins instructions[] = {
449 { .name = "xbeginq", .ops = &jump_ops, }, 446 { .name = "xbeginq", .ops = &jump_ops, },
450}; 447};
451 448
452static int ins__cmp(const void *name, const void *insp) 449static int ins__key_cmp(const void *name, const void *insp)
453{ 450{
454 const struct ins *ins = insp; 451 const struct ins *ins = insp;
455 452
456 return strcmp(name, ins->name); 453 return strcmp(name, ins->name);
457} 454}
458 455
456static int ins__cmp(const void *a, const void *b)
457{
458 const struct ins *ia = a;
459 const struct ins *ib = b;
460
461 return strcmp(ia->name, ib->name);
462}
463
464static void ins__sort(void)
465{
466 const int nmemb = ARRAY_SIZE(instructions);
467
468 qsort(instructions, nmemb, sizeof(struct ins), ins__cmp);
469}
470
459static struct ins *ins__find(const char *name) 471static struct ins *ins__find(const char *name)
460{ 472{
461 const int nmemb = ARRAY_SIZE(instructions); 473 const int nmemb = ARRAY_SIZE(instructions);
474 static bool sorted;
475
476 if (!sorted) {
477 ins__sort();
478 sorted = true;
479 }
462 480
463 return bsearch(name, instructions, nmemb, sizeof(struct ins), ins__cmp); 481 return bsearch(name, instructions, nmemb, sizeof(struct ins), ins__key_cmp);
464} 482}
465 483
466int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym) 484int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym)
@@ -1122,7 +1140,7 @@ int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize)
1122 } else if (dso__is_kcore(dso)) { 1140 } else if (dso__is_kcore(dso)) {
1123 goto fallback; 1141 goto fallback;
1124 } else if (readlink(symfs_filename, command, sizeof(command)) < 0 || 1142 } else if (readlink(symfs_filename, command, sizeof(command)) < 0 ||
1125 strstr(command, "[kernel.kallsyms]") || 1143 strstr(command, DSO__NAME_KALLSYMS) ||
1126 access(symfs_filename, R_OK)) { 1144 access(symfs_filename, R_OK)) {
1127 free(filename); 1145 free(filename);
1128fallback: 1146fallback:
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index bff425e1232c..67e5966503b2 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -256,7 +256,7 @@ static int machine__write_buildid_table(struct machine *machine, int fd)
256 size_t name_len; 256 size_t name_len;
257 bool in_kernel = false; 257 bool in_kernel = false;
258 258
259 if (!pos->hit) 259 if (!pos->hit && !dso__is_vdso(pos))
260 continue; 260 continue;
261 261
262 if (dso__is_vdso(pos)) { 262 if (dso__is_vdso(pos)) {
diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
index 8d96c80cc67e..c9a6dc173e74 100644
--- a/tools/perf/util/db-export.c
+++ b/tools/perf/util/db-export.c
@@ -298,8 +298,7 @@ static struct call_path *call_path_from_sample(struct db_export *dbe,
298 */ 298 */
299 callchain_param.order = ORDER_CALLER; 299 callchain_param.order = ORDER_CALLER;
300 err = thread__resolve_callchain(thread, &callchain_cursor, evsel, 300 err = thread__resolve_callchain(thread, &callchain_cursor, evsel,
301 sample, NULL, NULL, 301 sample, NULL, NULL, PERF_MAX_STACK_DEPTH);
302 sysctl_perf_event_max_stack);
303 if (err) { 302 if (err) {
304 callchain_param.order = saved_order; 303 callchain_param.order = saved_order;
305 return NULL; 304 return NULL;
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 3357479082ca..5d286f5d7906 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -7,6 +7,7 @@
7#include "auxtrace.h" 7#include "auxtrace.h"
8#include "util.h" 8#include "util.h"
9#include "debug.h" 9#include "debug.h"
10#include "vdso.h"
10 11
11char dso__symtab_origin(const struct dso *dso) 12char dso__symtab_origin(const struct dso *dso)
12{ 13{
@@ -62,9 +63,7 @@ int dso__read_binary_type_filename(const struct dso *dso,
62 } 63 }
63 break; 64 break;
64 case DSO_BINARY_TYPE__BUILD_ID_CACHE: 65 case DSO_BINARY_TYPE__BUILD_ID_CACHE:
65 /* skip the locally configured cache if a symfs is given */ 66 if (dso__build_id_filename(dso, filename, size) == NULL)
66 if (symbol_conf.symfs[0] ||
67 (dso__build_id_filename(dso, filename, size) == NULL))
68 ret = -1; 67 ret = -1;
69 break; 68 break;
70 69
@@ -1169,7 +1168,7 @@ bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
1169 struct dso *pos; 1168 struct dso *pos;
1170 1169
1171 list_for_each_entry(pos, head, node) { 1170 list_for_each_entry(pos, head, node) {
1172 if (with_hits && !pos->hit) 1171 if (with_hits && !pos->hit && !dso__is_vdso(pos))
1173 continue; 1172 continue;
1174 if (pos->has_build_id) { 1173 if (pos->has_build_id) {
1175 have_build_id = true; 1174 have_build_id = true;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index c4bfe11479a0..e82ba90cc969 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -44,6 +44,7 @@ void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
44 perf_evlist__set_maps(evlist, cpus, threads); 44 perf_evlist__set_maps(evlist, cpus, threads);
45 fdarray__init(&evlist->pollfd, 64); 45 fdarray__init(&evlist->pollfd, 64);
46 evlist->workload.pid = -1; 46 evlist->workload.pid = -1;
47 evlist->backward = false;
47} 48}
48 49
49struct perf_evlist *perf_evlist__new(void) 50struct perf_evlist *perf_evlist__new(void)
@@ -679,6 +680,33 @@ static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
679 return NULL; 680 return NULL;
680} 681}
681 682
683static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
684{
685 int i;
686
687 for (i = 0; i < evlist->nr_mmaps; i++) {
688 int fd = evlist->mmap[i].fd;
689 int err;
690
691 if (fd < 0)
692 continue;
693 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
694 if (err)
695 return err;
696 }
697 return 0;
698}
699
700int perf_evlist__pause(struct perf_evlist *evlist)
701{
702 return perf_evlist__set_paused(evlist, true);
703}
704
705int perf_evlist__resume(struct perf_evlist *evlist)
706{
707 return perf_evlist__set_paused(evlist, false);
708}
709
682/* When check_messup is true, 'end' must points to a good entry */ 710/* When check_messup is true, 'end' must points to a good entry */
683static union perf_event * 711static union perf_event *
684perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start, 712perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start,
@@ -881,6 +909,7 @@ static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
881 if (evlist->mmap[idx].base != NULL) { 909 if (evlist->mmap[idx].base != NULL) {
882 munmap(evlist->mmap[idx].base, evlist->mmap_len); 910 munmap(evlist->mmap[idx].base, evlist->mmap_len);
883 evlist->mmap[idx].base = NULL; 911 evlist->mmap[idx].base = NULL;
912 evlist->mmap[idx].fd = -1;
884 atomic_set(&evlist->mmap[idx].refcnt, 0); 913 atomic_set(&evlist->mmap[idx].refcnt, 0);
885 } 914 }
886 auxtrace_mmap__munmap(&evlist->mmap[idx].auxtrace_mmap); 915 auxtrace_mmap__munmap(&evlist->mmap[idx].auxtrace_mmap);
@@ -901,10 +930,14 @@ void perf_evlist__munmap(struct perf_evlist *evlist)
901 930
902static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) 931static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
903{ 932{
933 int i;
934
904 evlist->nr_mmaps = cpu_map__nr(evlist->cpus); 935 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
905 if (cpu_map__empty(evlist->cpus)) 936 if (cpu_map__empty(evlist->cpus))
906 evlist->nr_mmaps = thread_map__nr(evlist->threads); 937 evlist->nr_mmaps = thread_map__nr(evlist->threads);
907 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); 938 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
939 for (i = 0; i < evlist->nr_mmaps; i++)
940 evlist->mmap[i].fd = -1;
908 return evlist->mmap != NULL ? 0 : -ENOMEM; 941 return evlist->mmap != NULL ? 0 : -ENOMEM;
909} 942}
910 943
@@ -941,6 +974,7 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
941 evlist->mmap[idx].base = NULL; 974 evlist->mmap[idx].base = NULL;
942 return -1; 975 return -1;
943 } 976 }
977 evlist->mmap[idx].fd = fd;
944 978
945 if (auxtrace_mmap__mmap(&evlist->mmap[idx].auxtrace_mmap, 979 if (auxtrace_mmap__mmap(&evlist->mmap[idx].auxtrace_mmap,
946 &mp->auxtrace_mp, evlist->mmap[idx].base, fd)) 980 &mp->auxtrace_mp, evlist->mmap[idx].base, fd))
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 85d1b59802e8..d740fb877ab6 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -28,6 +28,7 @@ struct record_opts;
28struct perf_mmap { 28struct perf_mmap {
29 void *base; 29 void *base;
30 int mask; 30 int mask;
31 int fd;
31 atomic_t refcnt; 32 atomic_t refcnt;
32 u64 prev; 33 u64 prev;
33 struct auxtrace_mmap auxtrace_mmap; 34 struct auxtrace_mmap auxtrace_mmap;
@@ -43,6 +44,7 @@ struct perf_evlist {
43 bool overwrite; 44 bool overwrite;
44 bool enabled; 45 bool enabled;
45 bool has_user_cpus; 46 bool has_user_cpus;
47 bool backward;
46 size_t mmap_len; 48 size_t mmap_len;
47 int id_pos; 49 int id_pos;
48 int is_pos; 50 int is_pos;
@@ -135,6 +137,8 @@ void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx);
135 137
136void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx); 138void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
137 139
140int perf_evlist__pause(struct perf_evlist *evlist);
141int perf_evlist__resume(struct perf_evlist *evlist);
138int perf_evlist__open(struct perf_evlist *evlist); 142int perf_evlist__open(struct perf_evlist *evlist);
139void perf_evlist__close(struct perf_evlist *evlist); 143void perf_evlist__close(struct perf_evlist *evlist);
140 144
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 52c7d8884741..5d7037ef7d3b 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -37,6 +37,7 @@ static struct {
37 bool clockid; 37 bool clockid;
38 bool clockid_wrong; 38 bool clockid_wrong;
39 bool lbr_flags; 39 bool lbr_flags;
40 bool write_backward;
40} perf_missing_features; 41} perf_missing_features;
41 42
42static clockid_t clockid; 43static clockid_t clockid;
@@ -1376,6 +1377,8 @@ fallback_missing_features:
1376 if (perf_missing_features.lbr_flags) 1377 if (perf_missing_features.lbr_flags)
1377 evsel->attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS | 1378 evsel->attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
1378 PERF_SAMPLE_BRANCH_NO_CYCLES); 1379 PERF_SAMPLE_BRANCH_NO_CYCLES);
1380 if (perf_missing_features.write_backward)
1381 evsel->attr.write_backward = false;
1379retry_sample_id: 1382retry_sample_id:
1380 if (perf_missing_features.sample_id_all) 1383 if (perf_missing_features.sample_id_all)
1381 evsel->attr.sample_id_all = 0; 1384 evsel->attr.sample_id_all = 0;
@@ -1438,6 +1441,12 @@ retry_open:
1438 err = -EINVAL; 1441 err = -EINVAL;
1439 goto out_close; 1442 goto out_close;
1440 } 1443 }
1444
1445 if (evsel->overwrite &&
1446 perf_missing_features.write_backward) {
1447 err = -EINVAL;
1448 goto out_close;
1449 }
1441 } 1450 }
1442 } 1451 }
1443 1452
@@ -1500,6 +1509,10 @@ try_fallback:
1500 PERF_SAMPLE_BRANCH_NO_FLAGS))) { 1509 PERF_SAMPLE_BRANCH_NO_FLAGS))) {
1501 perf_missing_features.lbr_flags = true; 1510 perf_missing_features.lbr_flags = true;
1502 goto fallback_missing_features; 1511 goto fallback_missing_features;
1512 } else if (!perf_missing_features.write_backward &&
1513 evsel->attr.write_backward) {
1514 perf_missing_features.write_backward = true;
1515 goto fallback_missing_features;
1503 } 1516 }
1504 1517
1505out_close: 1518out_close:
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 8a644fef452c..c1f10159804c 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -112,6 +112,7 @@ struct perf_evsel {
112 bool tracking; 112 bool tracking;
113 bool per_pkg; 113 bool per_pkg;
114 bool precise_max; 114 bool precise_max;
115 bool overwrite;
115 /* parse modifier helper */ 116 /* parse modifier helper */
116 int exclude_GH; 117 int exclude_GH;
117 int nr_members; 118 int nr_members;
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index cfab531437c7..d1f19e0012d4 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -117,6 +117,13 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
117 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); 117 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
118 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); 118 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
119 } 119 }
120
121 if (h->branch_info->srcline_from)
122 hists__new_col_len(hists, HISTC_SRCLINE_FROM,
123 strlen(h->branch_info->srcline_from));
124 if (h->branch_info->srcline_to)
125 hists__new_col_len(hists, HISTC_SRCLINE_TO,
126 strlen(h->branch_info->srcline_to));
120 } 127 }
121 128
122 if (h->mem_info) { 129 if (h->mem_info) {
@@ -1042,6 +1049,8 @@ void hist_entry__delete(struct hist_entry *he)
1042 if (he->branch_info) { 1049 if (he->branch_info) {
1043 map__zput(he->branch_info->from.map); 1050 map__zput(he->branch_info->from.map);
1044 map__zput(he->branch_info->to.map); 1051 map__zput(he->branch_info->to.map);
1052 free_srcline(he->branch_info->srcline_from);
1053 free_srcline(he->branch_info->srcline_to);
1045 zfree(&he->branch_info); 1054 zfree(&he->branch_info);
1046 } 1055 }
1047 1056
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 0f84bfb42bb1..7b54ccf1b737 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -52,6 +52,8 @@ enum hist_column {
52 HISTC_MEM_IADDR_SYMBOL, 52 HISTC_MEM_IADDR_SYMBOL,
53 HISTC_TRANSACTION, 53 HISTC_TRANSACTION,
54 HISTC_CYCLES, 54 HISTC_CYCLES,
55 HISTC_SRCLINE_FROM,
56 HISTC_SRCLINE_TO,
55 HISTC_TRACE, 57 HISTC_TRACE,
56 HISTC_NR_COLS, /* Last entry */ 58 HISTC_NR_COLS, /* Last entry */
57}; 59};
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index f9644f79686c..b1772180c820 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -43,6 +43,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
43 43
44 machine->symbol_filter = NULL; 44 machine->symbol_filter = NULL;
45 machine->id_hdr_size = 0; 45 machine->id_hdr_size = 0;
46 machine->kptr_restrict_warned = false;
46 machine->comm_exec = false; 47 machine->comm_exec = false;
47 machine->kernel_start = 0; 48 machine->kernel_start = 0;
48 49
@@ -709,7 +710,7 @@ static struct dso *machine__get_kernel(struct machine *machine)
709 if (machine__is_host(machine)) { 710 if (machine__is_host(machine)) {
710 vmlinux_name = symbol_conf.vmlinux_name; 711 vmlinux_name = symbol_conf.vmlinux_name;
711 if (!vmlinux_name) 712 if (!vmlinux_name)
712 vmlinux_name = "[kernel.kallsyms]"; 713 vmlinux_name = DSO__NAME_KALLSYMS;
713 714
714 kernel = machine__findnew_kernel(machine, vmlinux_name, 715 kernel = machine__findnew_kernel(machine, vmlinux_name,
715 "[kernel]", DSO_TYPE_KERNEL); 716 "[kernel]", DSO_TYPE_KERNEL);
@@ -1135,10 +1136,10 @@ int machine__create_kernel_maps(struct machine *machine)
1135{ 1136{
1136 struct dso *kernel = machine__get_kernel(machine); 1137 struct dso *kernel = machine__get_kernel(machine);
1137 const char *name; 1138 const char *name;
1138 u64 addr = machine__get_running_kernel_start(machine, &name); 1139 u64 addr;
1139 int ret; 1140 int ret;
1140 1141
1141 if (!addr || kernel == NULL) 1142 if (kernel == NULL)
1142 return -1; 1143 return -1;
1143 1144
1144 ret = __machine__create_kernel_maps(machine, kernel); 1145 ret = __machine__create_kernel_maps(machine, kernel);
@@ -1160,8 +1161,9 @@ int machine__create_kernel_maps(struct machine *machine)
1160 */ 1161 */
1161 map_groups__fixup_end(&machine->kmaps); 1162 map_groups__fixup_end(&machine->kmaps);
1162 1163
1163 if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, 1164 addr = machine__get_running_kernel_start(machine, &name);
1164 addr)) { 1165 if (!addr) {
1166 } else if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) {
1165 machine__destroy_kernel_maps(machine); 1167 machine__destroy_kernel_maps(machine);
1166 return -1; 1168 return -1;
1167 } 1169 }
@@ -1769,11 +1771,6 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
1769 */ 1771 */
1770 int mix_chain_nr = i + 1 + lbr_nr + 1; 1772 int mix_chain_nr = i + 1 + lbr_nr + 1;
1771 1773
1772 if (mix_chain_nr > (int)sysctl_perf_event_max_stack + PERF_MAX_BRANCH_DEPTH) {
1773 pr_warning("corrupted callchain. skipping...\n");
1774 return 0;
1775 }
1776
1777 for (j = 0; j < mix_chain_nr; j++) { 1774 for (j = 0; j < mix_chain_nr; j++) {
1778 if (callchain_param.order == ORDER_CALLEE) { 1775 if (callchain_param.order == ORDER_CALLEE) {
1779 if (j < i + 1) 1776 if (j < i + 1)
@@ -1811,9 +1808,9 @@ static int thread__resolve_callchain_sample(struct thread *thread,
1811{ 1808{
1812 struct branch_stack *branch = sample->branch_stack; 1809 struct branch_stack *branch = sample->branch_stack;
1813 struct ip_callchain *chain = sample->callchain; 1810 struct ip_callchain *chain = sample->callchain;
1814 int chain_nr = min(max_stack, (int)chain->nr); 1811 int chain_nr = chain->nr;
1815 u8 cpumode = PERF_RECORD_MISC_USER; 1812 u8 cpumode = PERF_RECORD_MISC_USER;
1816 int i, j, err; 1813 int i, j, err, nr_entries;
1817 int skip_idx = -1; 1814 int skip_idx = -1;
1818 int first_call = 0; 1815 int first_call = 0;
1819 1816
@@ -1828,8 +1825,7 @@ static int thread__resolve_callchain_sample(struct thread *thread,
1828 * Based on DWARF debug information, some architectures skip 1825 * Based on DWARF debug information, some architectures skip
1829 * a callchain entry saved by the kernel. 1826 * a callchain entry saved by the kernel.
1830 */ 1827 */
1831 if (chain->nr < sysctl_perf_event_max_stack) 1828 skip_idx = arch_skip_callchain_idx(thread, chain);
1832 skip_idx = arch_skip_callchain_idx(thread, chain);
1833 1829
1834 /* 1830 /*
1835 * Add branches to call stack for easier browsing. This gives 1831 * Add branches to call stack for easier browsing. This gives
@@ -1889,12 +1885,8 @@ static int thread__resolve_callchain_sample(struct thread *thread,
1889 } 1885 }
1890 1886
1891check_calls: 1887check_calls:
1892 if (chain->nr > sysctl_perf_event_max_stack && (int)chain->nr > max_stack) { 1888 for (i = first_call, nr_entries = 0;
1893 pr_warning("corrupted callchain. skipping...\n"); 1889 i < chain_nr && nr_entries < max_stack; i++) {
1894 return 0;
1895 }
1896
1897 for (i = first_call; i < chain_nr; i++) {
1898 u64 ip; 1890 u64 ip;
1899 1891
1900 if (callchain_param.order == ORDER_CALLEE) 1892 if (callchain_param.order == ORDER_CALLEE)
@@ -1908,6 +1900,9 @@ check_calls:
1908#endif 1900#endif
1909 ip = chain->ips[j]; 1901 ip = chain->ips[j];
1910 1902
1903 if (ip < PERF_CONTEXT_MAX)
1904 ++nr_entries;
1905
1911 err = add_callchain_ip(thread, cursor, parent, root_al, &cpumode, ip); 1906 err = add_callchain_ip(thread, cursor, parent, root_al, &cpumode, ip);
1912 1907
1913 if (err) 1908 if (err)
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 83f46790c52f..41ac9cfd416b 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -28,6 +28,7 @@ struct machine {
28 pid_t pid; 28 pid_t pid;
29 u16 id_hdr_size; 29 u16 id_hdr_size;
30 bool comm_exec; 30 bool comm_exec;
31 bool kptr_restrict_warned;
31 char *root_dir; 32 char *root_dir;
32 struct rb_root threads; 33 struct rb_root threads;
33 pthread_rwlock_t threads_lock; 34 pthread_rwlock_t threads_lock;
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 62c7f6988e0e..5d1eb1ccd96c 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -264,8 +264,7 @@ static SV *perl_process_callchain(struct perf_sample *sample,
264 goto exit; 264 goto exit;
265 265
266 if (thread__resolve_callchain(al->thread, &callchain_cursor, evsel, 266 if (thread__resolve_callchain(al->thread, &callchain_cursor, evsel,
267 sample, NULL, NULL, 267 sample, NULL, NULL, scripting_max_stack) != 0) {
268 sysctl_perf_event_max_stack) != 0) {
269 pr_err("Failed to resolve callchain. Skipping\n"); 268 pr_err("Failed to resolve callchain. Skipping\n");
270 goto exit; 269 goto exit;
271 } 270 }
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 20e69edd5006..c4e9bd70723c 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -353,6 +353,88 @@ struct sort_entry sort_srcline = {
353 .se_width_idx = HISTC_SRCLINE, 353 .se_width_idx = HISTC_SRCLINE,
354}; 354};
355 355
356/* --sort srcline_from */
357
358static int64_t
359sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
360{
361 if (!left->branch_info->srcline_from) {
362 struct map *map = left->branch_info->from.map;
363 if (!map)
364 left->branch_info->srcline_from = SRCLINE_UNKNOWN;
365 else
366 left->branch_info->srcline_from = get_srcline(map->dso,
367 map__rip_2objdump(map,
368 left->branch_info->from.al_addr),
369 left->branch_info->from.sym, true);
370 }
371 if (!right->branch_info->srcline_from) {
372 struct map *map = right->branch_info->from.map;
373 if (!map)
374 right->branch_info->srcline_from = SRCLINE_UNKNOWN;
375 else
376 right->branch_info->srcline_from = get_srcline(map->dso,
377 map__rip_2objdump(map,
378 right->branch_info->from.al_addr),
379 right->branch_info->from.sym, true);
380 }
381 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
382}
383
384static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
385 size_t size, unsigned int width)
386{
387 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
388}
389
390struct sort_entry sort_srcline_from = {
391 .se_header = "From Source:Line",
392 .se_cmp = sort__srcline_from_cmp,
393 .se_snprintf = hist_entry__srcline_from_snprintf,
394 .se_width_idx = HISTC_SRCLINE_FROM,
395};
396
397/* --sort srcline_to */
398
399static int64_t
400sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
401{
402 if (!left->branch_info->srcline_to) {
403 struct map *map = left->branch_info->to.map;
404 if (!map)
405 left->branch_info->srcline_to = SRCLINE_UNKNOWN;
406 else
407 left->branch_info->srcline_to = get_srcline(map->dso,
408 map__rip_2objdump(map,
409 left->branch_info->to.al_addr),
410 left->branch_info->from.sym, true);
411 }
412 if (!right->branch_info->srcline_to) {
413 struct map *map = right->branch_info->to.map;
414 if (!map)
415 right->branch_info->srcline_to = SRCLINE_UNKNOWN;
416 else
417 right->branch_info->srcline_to = get_srcline(map->dso,
418 map__rip_2objdump(map,
419 right->branch_info->to.al_addr),
420 right->branch_info->to.sym, true);
421 }
422 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
423}
424
425static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
426 size_t size, unsigned int width)
427{
428 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
429}
430
431struct sort_entry sort_srcline_to = {
432 .se_header = "To Source:Line",
433 .se_cmp = sort__srcline_to_cmp,
434 .se_snprintf = hist_entry__srcline_to_snprintf,
435 .se_width_idx = HISTC_SRCLINE_TO,
436};
437
356/* --sort srcfile */ 438/* --sort srcfile */
357 439
358static char no_srcfile[1]; 440static char no_srcfile[1];
@@ -1347,6 +1429,8 @@ static struct sort_dimension bstack_sort_dimensions[] = {
1347 DIM(SORT_IN_TX, "in_tx", sort_in_tx), 1429 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
1348 DIM(SORT_ABORT, "abort", sort_abort), 1430 DIM(SORT_ABORT, "abort", sort_abort),
1349 DIM(SORT_CYCLES, "cycles", sort_cycles), 1431 DIM(SORT_CYCLES, "cycles", sort_cycles),
1432 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
1433 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
1350}; 1434};
1351 1435
1352#undef DIM 1436#undef DIM
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 42927f448bcb..ebb59cacd092 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -215,6 +215,8 @@ enum sort_type {
215 SORT_ABORT, 215 SORT_ABORT,
216 SORT_IN_TX, 216 SORT_IN_TX,
217 SORT_CYCLES, 217 SORT_CYCLES,
218 SORT_SRCLINE_FROM,
219 SORT_SRCLINE_TO,
218 220
219 /* memory mode specific sort keys */ 221 /* memory mode specific sort keys */
220 __SORT_MEMORY_MODE, 222 __SORT_MEMORY_MODE,
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index fdb71961143e..aa9efe08762b 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -94,7 +94,8 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
94{ 94{
95 int ctx = evsel_context(counter); 95 int ctx = evsel_context(counter);
96 96
97 if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK)) 97 if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) ||
98 perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK))
98 update_stats(&runtime_nsecs_stats[cpu], count[0]); 99 update_stats(&runtime_nsecs_stats[cpu], count[0]);
99 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) 100 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
100 update_stats(&runtime_cycles_stats[ctx][cpu], count[0]); 101 update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
@@ -188,7 +189,7 @@ static void print_stalled_cycles_backend(int cpu,
188 189
189 color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio); 190 color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
190 191
191 out->print_metric(out->ctx, color, "%6.2f%%", "backend cycles idle", ratio); 192 out->print_metric(out->ctx, color, "%7.2f%%", "backend cycles idle", ratio);
192} 193}
193 194
194static void print_branch_misses(int cpu, 195static void print_branch_misses(int cpu,
@@ -444,7 +445,8 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
444 ratio = total / avg; 445 ratio = total / avg;
445 446
446 print_metric(ctxp, NULL, "%8.0f", "cycles / elision", ratio); 447 print_metric(ctxp, NULL, "%8.0f", "cycles / elision", ratio);
447 } else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) { 448 } else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK) ||
449 perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK)) {
448 if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0) 450 if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0)
449 print_metric(ctxp, NULL, "%8.3f", "CPUs utilized", 451 print_metric(ctxp, NULL, "%8.3f", "CPUs utilized",
450 avg / ratio); 452 avg / ratio);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 7fb33304fb4e..20f9cb32b703 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1662,8 +1662,8 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map)
1662 1662
1663 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); 1663 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
1664 1664
1665 scnprintf(path, sizeof(path), "%s/[kernel.kcore]/%s", buildid_dir, 1665 scnprintf(path, sizeof(path), "%s/%s/%s", buildid_dir,
1666 sbuild_id); 1666 DSO__NAME_KCORE, sbuild_id);
1667 1667
1668 /* Use /proc/kallsyms if possible */ 1668 /* Use /proc/kallsyms if possible */
1669 if (is_host) { 1669 if (is_host) {
@@ -1699,8 +1699,8 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map)
1699 if (!find_matching_kcore(map, path, sizeof(path))) 1699 if (!find_matching_kcore(map, path, sizeof(path)))
1700 return strdup(path); 1700 return strdup(path);
1701 1701
1702 scnprintf(path, sizeof(path), "%s/[kernel.kallsyms]/%s", 1702 scnprintf(path, sizeof(path), "%s/%s/%s",
1703 buildid_dir, sbuild_id); 1703 buildid_dir, DSO__NAME_KALLSYMS, sbuild_id);
1704 1704
1705 if (access(path, F_OK)) { 1705 if (access(path, F_OK)) {
1706 pr_err("No kallsyms or vmlinux with build-id %s was found\n", 1706 pr_err("No kallsyms or vmlinux with build-id %s was found\n",
@@ -1769,7 +1769,7 @@ do_kallsyms:
1769 1769
1770 if (err > 0 && !dso__is_kcore(dso)) { 1770 if (err > 0 && !dso__is_kcore(dso)) {
1771 dso->binary_type = DSO_BINARY_TYPE__KALLSYMS; 1771 dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
1772 dso__set_long_name(dso, "[kernel.kallsyms]", false); 1772 dso__set_long_name(dso, DSO__NAME_KALLSYMS, false);
1773 map__fixup_start(map); 1773 map__fixup_start(map);
1774 map__fixup_end(map); 1774 map__fixup_end(map);
1775 } 1775 }
@@ -2033,3 +2033,26 @@ void symbol__exit(void)
2033 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; 2033 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
2034 symbol_conf.initialized = false; 2034 symbol_conf.initialized = false;
2035} 2035}
2036
2037int symbol__config_symfs(const struct option *opt __maybe_unused,
2038 const char *dir, int unset __maybe_unused)
2039{
2040 char *bf = NULL;
2041 int ret;
2042
2043 symbol_conf.symfs = strdup(dir);
2044 if (symbol_conf.symfs == NULL)
2045 return -ENOMEM;
2046
2047 /* skip the locally configured cache if a symfs is given, and
2048 * config buildid dir to symfs/.debug
2049 */
2050 ret = asprintf(&bf, "%s/%s", dir, ".debug");
2051 if (ret < 0)
2052 return -ENOMEM;
2053
2054 set_buildid_dir(bf);
2055
2056 free(bf);
2057 return 0;
2058}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 2b5e4ed76fcb..b10d558a8803 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -44,6 +44,9 @@ Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
44#define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */ 44#define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */
45#endif 45#endif
46 46
47#define DSO__NAME_KALLSYMS "[kernel.kallsyms]"
48#define DSO__NAME_KCORE "[kernel.kcore]"
49
47/** struct symbol - symtab entry 50/** struct symbol - symtab entry
48 * 51 *
49 * @ignore - resolvable but tools ignore it (e.g. idle routines) 52 * @ignore - resolvable but tools ignore it (e.g. idle routines)
@@ -183,6 +186,8 @@ struct branch_info {
183 struct addr_map_symbol from; 186 struct addr_map_symbol from;
184 struct addr_map_symbol to; 187 struct addr_map_symbol to;
185 struct branch_flags flags; 188 struct branch_flags flags;
189 char *srcline_from;
190 char *srcline_to;
186}; 191};
187 192
188struct mem_info { 193struct mem_info {
@@ -287,6 +292,8 @@ bool symbol_type__is_a(char symbol_type, enum map_type map_type);
287bool symbol__restricted_filename(const char *filename, 292bool symbol__restricted_filename(const char *filename,
288 const char *restricted_filename); 293 const char *restricted_filename);
289bool symbol__is_idle(struct symbol *sym); 294bool symbol__is_idle(struct symbol *sym);
295int symbol__config_symfs(const struct option *opt __maybe_unused,
296 const char *dir, int unset __maybe_unused);
290 297
291int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, 298int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
292 struct symsrc *runtime_ss, symbol_filter_t filter, 299 struct symsrc *runtime_ss, symbol_filter_t filter,
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index f92c37abb0a8..b2940c88734a 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -27,7 +27,6 @@ struct perf_top {
27 int max_stack; 27 int max_stack;
28 bool hide_kernel_symbols, hide_user_symbols, zero; 28 bool hide_kernel_symbols, hide_user_symbols, zero;
29 bool use_tui, use_stdio; 29 bool use_tui, use_stdio;
30 bool kptr_restrict_warned;
31 bool vmlinux_warned; 30 bool vmlinux_warned;
32 bool dump_symtab; 31 bool dump_symtab;
33 struct hist_entry *sym_filter_entry; 32 struct hist_entry *sym_filter_entry;
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index eab077ad6ca9..23504ad5d6dd 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -33,7 +33,8 @@ struct callchain_param callchain_param = {
33unsigned int page_size; 33unsigned int page_size;
34int cacheline_size; 34int cacheline_size;
35 35
36unsigned int sysctl_perf_event_max_stack = PERF_MAX_STACK_DEPTH; 36int sysctl_perf_event_max_stack = PERF_MAX_STACK_DEPTH;
37int sysctl_perf_event_max_contexts_per_stack = PERF_MAX_CONTEXTS_PER_STACK;
37 38
38bool test_attr__enabled; 39bool test_attr__enabled;
39 40
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 7651633a8dc7..1e8c3167b9fb 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -261,7 +261,8 @@ void sighandler_dump_stack(int sig);
261 261
262extern unsigned int page_size; 262extern unsigned int page_size;
263extern int cacheline_size; 263extern int cacheline_size;
264extern unsigned int sysctl_perf_event_max_stack; 264extern int sysctl_perf_event_max_stack;
265extern int sysctl_perf_event_max_contexts_per_stack;
265 266
266struct parse_tag { 267struct parse_tag {
267 char tag; 268 char tag;