diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-25 20:05:40 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-25 20:05:40 -0400 |
commit | bdc6b758e443c21c39a14c075e5b7e01f095b37b (patch) | |
tree | 40b98b5abd501cc232f41af03eb078282d7a6327 /arch | |
parent | c4a346002bc06046bc51910a7ade3a0c650c3d34 (diff) | |
parent | 0c9f790fcbdaf8cfb6dd7fb4e88fadf55082e37e (diff) |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar:
"Mostly tooling and PMU driver fixes, but also a number of late updates
such as the reworking of the call-chain size limiting logic to make
call-graph recording more robust, plus tooling side changes for the
new 'backwards ring-buffer' extension to the perf ring-buffer"
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (34 commits)
perf record: Read from backward ring buffer
perf record: Rename variable to make code clear
perf record: Prevent reading invalid data in record__mmap_read
perf evlist: Add API to pause/resume
perf trace: Use the ptr->name beautifier as default for "filename" args
perf trace: Use the fd->name beautifier as default for "fd" args
perf report: Add srcline_from/to branch sort keys
perf evsel: Record fd into perf_mmap
perf evsel: Add overwrite attribute and check write_backward
perf tools: Set buildid dir under symfs when --symfs is provided
perf trace: Only auto set call-graph to "dwarf" when syscalls are being traced
perf annotate: Sort list of recognised instructions
perf annotate: Fix identification of ARM blt and bls instructions
perf tools: Fix usage of max_stack sysctl
perf callchain: Stop validating callchains by the max_stack sysctl
perf trace: Fix exit_group() formatting
perf top: Use machine->kptr_restrict_warned
perf trace: Warn when trying to resolve kernel addresses with kptr_restrict=1
perf machine: Do not bail out if not managing to read ref reloc symbol
perf/x86/intel/p4: Trival indentation fix, remove space
...
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arc/kernel/perf_event.c | 6 | ||||
-rw-r--r-- | arch/arm/kernel/perf_callchain.c | 10 | ||||
-rw-r--r-- | arch/arm64/kernel/perf_callchain.c | 14 | ||||
-rw-r--r-- | arch/metag/kernel/perf_callchain.c | 10 | ||||
-rw-r--r-- | arch/mips/kernel/perf_event.c | 12 | ||||
-rw-r--r-- | arch/powerpc/perf/callchain.c | 20 | ||||
-rw-r--r-- | arch/s390/kernel/perf_event.c | 4 | ||||
-rw-r--r-- | arch/sh/kernel/perf_callchain.c | 4 | ||||
-rw-r--r-- | arch/sparc/kernel/perf_event.c | 14 | ||||
-rw-r--r-- | arch/tile/kernel/perf_event.c | 6 | ||||
-rw-r--r-- | arch/x86/events/core.c | 14 | ||||
-rw-r--r-- | arch/x86/events/intel/p4.c | 2 | ||||
-rw-r--r-- | arch/x86/events/intel/uncore.c | 2 | ||||
-rw-r--r-- | arch/xtensa/kernel/perf_event.c | 10 |
14 files changed, 64 insertions, 64 deletions
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index 8b134cfe5e1f..6fd48021324b 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c | |||
@@ -48,7 +48,7 @@ struct arc_callchain_trace { | |||
48 | static int callchain_trace(unsigned int addr, void *data) | 48 | static int callchain_trace(unsigned int addr, void *data) |
49 | { | 49 | { |
50 | struct arc_callchain_trace *ctrl = data; | 50 | struct arc_callchain_trace *ctrl = data; |
51 | struct perf_callchain_entry *entry = ctrl->perf_stuff; | 51 | struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff; |
52 | perf_callchain_store(entry, addr); | 52 | perf_callchain_store(entry, addr); |
53 | 53 | ||
54 | if (ctrl->depth++ < 3) | 54 | if (ctrl->depth++ < 3) |
@@ -58,7 +58,7 @@ static int callchain_trace(unsigned int addr, void *data) | |||
58 | } | 58 | } |
59 | 59 | ||
60 | void | 60 | void |
61 | perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) | 61 | perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) |
62 | { | 62 | { |
63 | struct arc_callchain_trace ctrl = { | 63 | struct arc_callchain_trace ctrl = { |
64 | .depth = 0, | 64 | .depth = 0, |
@@ -69,7 +69,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) | |||
69 | } | 69 | } |
70 | 70 | ||
71 | void | 71 | void |
72 | perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) | 72 | perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) |
73 | { | 73 | { |
74 | /* | 74 | /* |
75 | * User stack can't be unwound trivially with kernel dwarf unwinder | 75 | * User stack can't be unwound trivially with kernel dwarf unwinder |
diff --git a/arch/arm/kernel/perf_callchain.c b/arch/arm/kernel/perf_callchain.c index 27563befa8a2..22bf1f64d99a 100644 --- a/arch/arm/kernel/perf_callchain.c +++ b/arch/arm/kernel/perf_callchain.c | |||
@@ -31,7 +31,7 @@ struct frame_tail { | |||
31 | */ | 31 | */ |
32 | static struct frame_tail __user * | 32 | static struct frame_tail __user * |
33 | user_backtrace(struct frame_tail __user *tail, | 33 | user_backtrace(struct frame_tail __user *tail, |
34 | struct perf_callchain_entry *entry) | 34 | struct perf_callchain_entry_ctx *entry) |
35 | { | 35 | { |
36 | struct frame_tail buftail; | 36 | struct frame_tail buftail; |
37 | unsigned long err; | 37 | unsigned long err; |
@@ -59,7 +59,7 @@ user_backtrace(struct frame_tail __user *tail, | |||
59 | } | 59 | } |
60 | 60 | ||
61 | void | 61 | void |
62 | perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) | 62 | perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) |
63 | { | 63 | { |
64 | struct frame_tail __user *tail; | 64 | struct frame_tail __user *tail; |
65 | 65 | ||
@@ -75,7 +75,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) | |||
75 | 75 | ||
76 | tail = (struct frame_tail __user *)regs->ARM_fp - 1; | 76 | tail = (struct frame_tail __user *)regs->ARM_fp - 1; |
77 | 77 | ||
78 | while ((entry->nr < sysctl_perf_event_max_stack) && | 78 | while ((entry->nr < entry->max_stack) && |
79 | tail && !((unsigned long)tail & 0x3)) | 79 | tail && !((unsigned long)tail & 0x3)) |
80 | tail = user_backtrace(tail, entry); | 80 | tail = user_backtrace(tail, entry); |
81 | } | 81 | } |
@@ -89,13 +89,13 @@ static int | |||
89 | callchain_trace(struct stackframe *fr, | 89 | callchain_trace(struct stackframe *fr, |
90 | void *data) | 90 | void *data) |
91 | { | 91 | { |
92 | struct perf_callchain_entry *entry = data; | 92 | struct perf_callchain_entry_ctx *entry = data; |
93 | perf_callchain_store(entry, fr->pc); | 93 | perf_callchain_store(entry, fr->pc); |
94 | return 0; | 94 | return 0; |
95 | } | 95 | } |
96 | 96 | ||
97 | void | 97 | void |
98 | perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) | 98 | perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) |
99 | { | 99 | { |
100 | struct stackframe fr; | 100 | struct stackframe fr; |
101 | 101 | ||
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c index 32c3c6e70119..713ca824f266 100644 --- a/arch/arm64/kernel/perf_callchain.c +++ b/arch/arm64/kernel/perf_callchain.c | |||
@@ -31,7 +31,7 @@ struct frame_tail { | |||
31 | */ | 31 | */ |
32 | static struct frame_tail __user * | 32 | static struct frame_tail __user * |
33 | user_backtrace(struct frame_tail __user *tail, | 33 | user_backtrace(struct frame_tail __user *tail, |
34 | struct perf_callchain_entry *entry) | 34 | struct perf_callchain_entry_ctx *entry) |
35 | { | 35 | { |
36 | struct frame_tail buftail; | 36 | struct frame_tail buftail; |
37 | unsigned long err; | 37 | unsigned long err; |
@@ -76,7 +76,7 @@ struct compat_frame_tail { | |||
76 | 76 | ||
77 | static struct compat_frame_tail __user * | 77 | static struct compat_frame_tail __user * |
78 | compat_user_backtrace(struct compat_frame_tail __user *tail, | 78 | compat_user_backtrace(struct compat_frame_tail __user *tail, |
79 | struct perf_callchain_entry *entry) | 79 | struct perf_callchain_entry_ctx *entry) |
80 | { | 80 | { |
81 | struct compat_frame_tail buftail; | 81 | struct compat_frame_tail buftail; |
82 | unsigned long err; | 82 | unsigned long err; |
@@ -106,7 +106,7 @@ compat_user_backtrace(struct compat_frame_tail __user *tail, | |||
106 | } | 106 | } |
107 | #endif /* CONFIG_COMPAT */ | 107 | #endif /* CONFIG_COMPAT */ |
108 | 108 | ||
109 | void perf_callchain_user(struct perf_callchain_entry *entry, | 109 | void perf_callchain_user(struct perf_callchain_entry_ctx *entry, |
110 | struct pt_regs *regs) | 110 | struct pt_regs *regs) |
111 | { | 111 | { |
112 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { | 112 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { |
@@ -122,7 +122,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry, | |||
122 | 122 | ||
123 | tail = (struct frame_tail __user *)regs->regs[29]; | 123 | tail = (struct frame_tail __user *)regs->regs[29]; |
124 | 124 | ||
125 | while (entry->nr < sysctl_perf_event_max_stack && | 125 | while (entry->nr < entry->max_stack && |
126 | tail && !((unsigned long)tail & 0xf)) | 126 | tail && !((unsigned long)tail & 0xf)) |
127 | tail = user_backtrace(tail, entry); | 127 | tail = user_backtrace(tail, entry); |
128 | } else { | 128 | } else { |
@@ -132,7 +132,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry, | |||
132 | 132 | ||
133 | tail = (struct compat_frame_tail __user *)regs->compat_fp - 1; | 133 | tail = (struct compat_frame_tail __user *)regs->compat_fp - 1; |
134 | 134 | ||
135 | while ((entry->nr < sysctl_perf_event_max_stack) && | 135 | while ((entry->nr < entry->max_stack) && |
136 | tail && !((unsigned long)tail & 0x3)) | 136 | tail && !((unsigned long)tail & 0x3)) |
137 | tail = compat_user_backtrace(tail, entry); | 137 | tail = compat_user_backtrace(tail, entry); |
138 | #endif | 138 | #endif |
@@ -146,12 +146,12 @@ void perf_callchain_user(struct perf_callchain_entry *entry, | |||
146 | */ | 146 | */ |
147 | static int callchain_trace(struct stackframe *frame, void *data) | 147 | static int callchain_trace(struct stackframe *frame, void *data) |
148 | { | 148 | { |
149 | struct perf_callchain_entry *entry = data; | 149 | struct perf_callchain_entry_ctx *entry = data; |
150 | perf_callchain_store(entry, frame->pc); | 150 | perf_callchain_store(entry, frame->pc); |
151 | return 0; | 151 | return 0; |
152 | } | 152 | } |
153 | 153 | ||
154 | void perf_callchain_kernel(struct perf_callchain_entry *entry, | 154 | void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, |
155 | struct pt_regs *regs) | 155 | struct pt_regs *regs) |
156 | { | 156 | { |
157 | struct stackframe frame; | 157 | struct stackframe frame; |
diff --git a/arch/metag/kernel/perf_callchain.c b/arch/metag/kernel/perf_callchain.c index 252abc12a5a3..3e8e048040df 100644 --- a/arch/metag/kernel/perf_callchain.c +++ b/arch/metag/kernel/perf_callchain.c | |||
@@ -29,7 +29,7 @@ static bool is_valid_call(unsigned long calladdr) | |||
29 | 29 | ||
30 | static struct metag_frame __user * | 30 | static struct metag_frame __user * |
31 | user_backtrace(struct metag_frame __user *user_frame, | 31 | user_backtrace(struct metag_frame __user *user_frame, |
32 | struct perf_callchain_entry *entry) | 32 | struct perf_callchain_entry_ctx *entry) |
33 | { | 33 | { |
34 | struct metag_frame frame; | 34 | struct metag_frame frame; |
35 | unsigned long calladdr; | 35 | unsigned long calladdr; |
@@ -56,7 +56,7 @@ user_backtrace(struct metag_frame __user *user_frame, | |||
56 | } | 56 | } |
57 | 57 | ||
58 | void | 58 | void |
59 | perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) | 59 | perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) |
60 | { | 60 | { |
61 | unsigned long sp = regs->ctx.AX[0].U0; | 61 | unsigned long sp = regs->ctx.AX[0].U0; |
62 | struct metag_frame __user *frame; | 62 | struct metag_frame __user *frame; |
@@ -65,7 +65,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) | |||
65 | 65 | ||
66 | --frame; | 66 | --frame; |
67 | 67 | ||
68 | while ((entry->nr < sysctl_perf_event_max_stack) && frame) | 68 | while ((entry->nr < entry->max_stack) && frame) |
69 | frame = user_backtrace(frame, entry); | 69 | frame = user_backtrace(frame, entry); |
70 | } | 70 | } |
71 | 71 | ||
@@ -78,13 +78,13 @@ static int | |||
78 | callchain_trace(struct stackframe *fr, | 78 | callchain_trace(struct stackframe *fr, |
79 | void *data) | 79 | void *data) |
80 | { | 80 | { |
81 | struct perf_callchain_entry *entry = data; | 81 | struct perf_callchain_entry_ctx *entry = data; |
82 | perf_callchain_store(entry, fr->pc); | 82 | perf_callchain_store(entry, fr->pc); |
83 | return 0; | 83 | return 0; |
84 | } | 84 | } |
85 | 85 | ||
86 | void | 86 | void |
87 | perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) | 87 | perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) |
88 | { | 88 | { |
89 | struct stackframe fr; | 89 | struct stackframe fr; |
90 | 90 | ||
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c index 5021c546ad07..d64056e0bb56 100644 --- a/arch/mips/kernel/perf_event.c +++ b/arch/mips/kernel/perf_event.c | |||
@@ -25,8 +25,8 @@ | |||
25 | * the user stack callchains, we will add it here. | 25 | * the user stack callchains, we will add it here. |
26 | */ | 26 | */ |
27 | 27 | ||
28 | static void save_raw_perf_callchain(struct perf_callchain_entry *entry, | 28 | static void save_raw_perf_callchain(struct perf_callchain_entry_ctx *entry, |
29 | unsigned long reg29) | 29 | unsigned long reg29) |
30 | { | 30 | { |
31 | unsigned long *sp = (unsigned long *)reg29; | 31 | unsigned long *sp = (unsigned long *)reg29; |
32 | unsigned long addr; | 32 | unsigned long addr; |
@@ -35,14 +35,14 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry, | |||
35 | addr = *sp++; | 35 | addr = *sp++; |
36 | if (__kernel_text_address(addr)) { | 36 | if (__kernel_text_address(addr)) { |
37 | perf_callchain_store(entry, addr); | 37 | perf_callchain_store(entry, addr); |
38 | if (entry->nr >= sysctl_perf_event_max_stack) | 38 | if (entry->nr >= entry->max_stack) |
39 | break; | 39 | break; |
40 | } | 40 | } |
41 | } | 41 | } |
42 | } | 42 | } |
43 | 43 | ||
44 | void perf_callchain_kernel(struct perf_callchain_entry *entry, | 44 | void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, |
45 | struct pt_regs *regs) | 45 | struct pt_regs *regs) |
46 | { | 46 | { |
47 | unsigned long sp = regs->regs[29]; | 47 | unsigned long sp = regs->regs[29]; |
48 | #ifdef CONFIG_KALLSYMS | 48 | #ifdef CONFIG_KALLSYMS |
@@ -59,7 +59,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry, | |||
59 | } | 59 | } |
60 | do { | 60 | do { |
61 | perf_callchain_store(entry, pc); | 61 | perf_callchain_store(entry, pc); |
62 | if (entry->nr >= sysctl_perf_event_max_stack) | 62 | if (entry->nr >= entry->max_stack) |
63 | break; | 63 | break; |
64 | pc = unwind_stack(current, &sp, pc, &ra); | 64 | pc = unwind_stack(current, &sp, pc, &ra); |
65 | } while (pc); | 65 | } while (pc); |
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c index 26d37e6f924e..0fc26714780a 100644 --- a/arch/powerpc/perf/callchain.c +++ b/arch/powerpc/perf/callchain.c | |||
@@ -47,7 +47,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp) | |||
47 | } | 47 | } |
48 | 48 | ||
49 | void | 49 | void |
50 | perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) | 50 | perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) |
51 | { | 51 | { |
52 | unsigned long sp, next_sp; | 52 | unsigned long sp, next_sp; |
53 | unsigned long next_ip; | 53 | unsigned long next_ip; |
@@ -76,7 +76,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) | |||
76 | next_ip = regs->nip; | 76 | next_ip = regs->nip; |
77 | lr = regs->link; | 77 | lr = regs->link; |
78 | level = 0; | 78 | level = 0; |
79 | perf_callchain_store(entry, PERF_CONTEXT_KERNEL); | 79 | perf_callchain_store_context(entry, PERF_CONTEXT_KERNEL); |
80 | 80 | ||
81 | } else { | 81 | } else { |
82 | if (level == 0) | 82 | if (level == 0) |
@@ -232,7 +232,7 @@ static int sane_signal_64_frame(unsigned long sp) | |||
232 | puc == (unsigned long) &sf->uc; | 232 | puc == (unsigned long) &sf->uc; |
233 | } | 233 | } |
234 | 234 | ||
235 | static void perf_callchain_user_64(struct perf_callchain_entry *entry, | 235 | static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry, |
236 | struct pt_regs *regs) | 236 | struct pt_regs *regs) |
237 | { | 237 | { |
238 | unsigned long sp, next_sp; | 238 | unsigned long sp, next_sp; |
@@ -247,7 +247,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry, | |||
247 | sp = regs->gpr[1]; | 247 | sp = regs->gpr[1]; |
248 | perf_callchain_store(entry, next_ip); | 248 | perf_callchain_store(entry, next_ip); |
249 | 249 | ||
250 | while (entry->nr < sysctl_perf_event_max_stack) { | 250 | while (entry->nr < entry->max_stack) { |
251 | fp = (unsigned long __user *) sp; | 251 | fp = (unsigned long __user *) sp; |
252 | if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp)) | 252 | if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp)) |
253 | return; | 253 | return; |
@@ -274,7 +274,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry, | |||
274 | read_user_stack_64(&uregs[PT_R1], &sp)) | 274 | read_user_stack_64(&uregs[PT_R1], &sp)) |
275 | return; | 275 | return; |
276 | level = 0; | 276 | level = 0; |
277 | perf_callchain_store(entry, PERF_CONTEXT_USER); | 277 | perf_callchain_store_context(entry, PERF_CONTEXT_USER); |
278 | perf_callchain_store(entry, next_ip); | 278 | perf_callchain_store(entry, next_ip); |
279 | continue; | 279 | continue; |
280 | } | 280 | } |
@@ -319,7 +319,7 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) | |||
319 | return rc; | 319 | return rc; |
320 | } | 320 | } |
321 | 321 | ||
322 | static inline void perf_callchain_user_64(struct perf_callchain_entry *entry, | 322 | static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry, |
323 | struct pt_regs *regs) | 323 | struct pt_regs *regs) |
324 | { | 324 | { |
325 | } | 325 | } |
@@ -439,7 +439,7 @@ static unsigned int __user *signal_frame_32_regs(unsigned int sp, | |||
439 | return mctx->mc_gregs; | 439 | return mctx->mc_gregs; |
440 | } | 440 | } |
441 | 441 | ||
442 | static void perf_callchain_user_32(struct perf_callchain_entry *entry, | 442 | static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry, |
443 | struct pt_regs *regs) | 443 | struct pt_regs *regs) |
444 | { | 444 | { |
445 | unsigned int sp, next_sp; | 445 | unsigned int sp, next_sp; |
@@ -453,7 +453,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, | |||
453 | sp = regs->gpr[1]; | 453 | sp = regs->gpr[1]; |
454 | perf_callchain_store(entry, next_ip); | 454 | perf_callchain_store(entry, next_ip); |
455 | 455 | ||
456 | while (entry->nr < sysctl_perf_event_max_stack) { | 456 | while (entry->nr < entry->max_stack) { |
457 | fp = (unsigned int __user *) (unsigned long) sp; | 457 | fp = (unsigned int __user *) (unsigned long) sp; |
458 | if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp)) | 458 | if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp)) |
459 | return; | 459 | return; |
@@ -473,7 +473,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, | |||
473 | read_user_stack_32(&uregs[PT_R1], &sp)) | 473 | read_user_stack_32(&uregs[PT_R1], &sp)) |
474 | return; | 474 | return; |
475 | level = 0; | 475 | level = 0; |
476 | perf_callchain_store(entry, PERF_CONTEXT_USER); | 476 | perf_callchain_store_context(entry, PERF_CONTEXT_USER); |
477 | perf_callchain_store(entry, next_ip); | 477 | perf_callchain_store(entry, next_ip); |
478 | continue; | 478 | continue; |
479 | } | 479 | } |
@@ -487,7 +487,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, | |||
487 | } | 487 | } |
488 | 488 | ||
489 | void | 489 | void |
490 | perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) | 490 | perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) |
491 | { | 491 | { |
492 | if (current_is_64bit()) | 492 | if (current_is_64bit()) |
493 | perf_callchain_user_64(entry, regs); | 493 | perf_callchain_user_64(entry, regs); |
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c index c3e4099b60a5..87035fa58bbe 100644 --- a/arch/s390/kernel/perf_event.c +++ b/arch/s390/kernel/perf_event.c | |||
@@ -224,13 +224,13 @@ arch_initcall(service_level_perf_register); | |||
224 | 224 | ||
225 | static int __perf_callchain_kernel(void *data, unsigned long address) | 225 | static int __perf_callchain_kernel(void *data, unsigned long address) |
226 | { | 226 | { |
227 | struct perf_callchain_entry *entry = data; | 227 | struct perf_callchain_entry_ctx *entry = data; |
228 | 228 | ||
229 | perf_callchain_store(entry, address); | 229 | perf_callchain_store(entry, address); |
230 | return 0; | 230 | return 0; |
231 | } | 231 | } |
232 | 232 | ||
233 | void perf_callchain_kernel(struct perf_callchain_entry *entry, | 233 | void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, |
234 | struct pt_regs *regs) | 234 | struct pt_regs *regs) |
235 | { | 235 | { |
236 | if (user_mode(regs)) | 236 | if (user_mode(regs)) |
diff --git a/arch/sh/kernel/perf_callchain.c b/arch/sh/kernel/perf_callchain.c index cc80b614b5fa..fa2c0cd23eaa 100644 --- a/arch/sh/kernel/perf_callchain.c +++ b/arch/sh/kernel/perf_callchain.c | |||
@@ -21,7 +21,7 @@ static int callchain_stack(void *data, char *name) | |||
21 | 21 | ||
22 | static void callchain_address(void *data, unsigned long addr, int reliable) | 22 | static void callchain_address(void *data, unsigned long addr, int reliable) |
23 | { | 23 | { |
24 | struct perf_callchain_entry *entry = data; | 24 | struct perf_callchain_entry_ctx *entry = data; |
25 | 25 | ||
26 | if (reliable) | 26 | if (reliable) |
27 | perf_callchain_store(entry, addr); | 27 | perf_callchain_store(entry, addr); |
@@ -33,7 +33,7 @@ static const struct stacktrace_ops callchain_ops = { | |||
33 | }; | 33 | }; |
34 | 34 | ||
35 | void | 35 | void |
36 | perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) | 36 | perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) |
37 | { | 37 | { |
38 | perf_callchain_store(entry, regs->pc); | 38 | perf_callchain_store(entry, regs->pc); |
39 | 39 | ||
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index a4b8b5aed21c..710f3278d448 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
@@ -1711,7 +1711,7 @@ static int __init init_hw_perf_events(void) | |||
1711 | } | 1711 | } |
1712 | pure_initcall(init_hw_perf_events); | 1712 | pure_initcall(init_hw_perf_events); |
1713 | 1713 | ||
1714 | void perf_callchain_kernel(struct perf_callchain_entry *entry, | 1714 | void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, |
1715 | struct pt_regs *regs) | 1715 | struct pt_regs *regs) |
1716 | { | 1716 | { |
1717 | unsigned long ksp, fp; | 1717 | unsigned long ksp, fp; |
@@ -1756,7 +1756,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry, | |||
1756 | } | 1756 | } |
1757 | } | 1757 | } |
1758 | #endif | 1758 | #endif |
1759 | } while (entry->nr < sysctl_perf_event_max_stack); | 1759 | } while (entry->nr < entry->max_stack); |
1760 | } | 1760 | } |
1761 | 1761 | ||
1762 | static inline int | 1762 | static inline int |
@@ -1769,7 +1769,7 @@ valid_user_frame(const void __user *fp, unsigned long size) | |||
1769 | return (__range_not_ok(fp, size, TASK_SIZE) == 0); | 1769 | return (__range_not_ok(fp, size, TASK_SIZE) == 0); |
1770 | } | 1770 | } |
1771 | 1771 | ||
1772 | static void perf_callchain_user_64(struct perf_callchain_entry *entry, | 1772 | static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry, |
1773 | struct pt_regs *regs) | 1773 | struct pt_regs *regs) |
1774 | { | 1774 | { |
1775 | unsigned long ufp; | 1775 | unsigned long ufp; |
@@ -1790,10 +1790,10 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry, | |||
1790 | pc = sf.callers_pc; | 1790 | pc = sf.callers_pc; |
1791 | ufp = (unsigned long)sf.fp + STACK_BIAS; | 1791 | ufp = (unsigned long)sf.fp + STACK_BIAS; |
1792 | perf_callchain_store(entry, pc); | 1792 | perf_callchain_store(entry, pc); |
1793 | } while (entry->nr < sysctl_perf_event_max_stack); | 1793 | } while (entry->nr < entry->max_stack); |
1794 | } | 1794 | } |
1795 | 1795 | ||
1796 | static void perf_callchain_user_32(struct perf_callchain_entry *entry, | 1796 | static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry, |
1797 | struct pt_regs *regs) | 1797 | struct pt_regs *regs) |
1798 | { | 1798 | { |
1799 | unsigned long ufp; | 1799 | unsigned long ufp; |
@@ -1822,11 +1822,11 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, | |||
1822 | ufp = (unsigned long)sf.fp; | 1822 | ufp = (unsigned long)sf.fp; |
1823 | } | 1823 | } |
1824 | perf_callchain_store(entry, pc); | 1824 | perf_callchain_store(entry, pc); |
1825 | } while (entry->nr < sysctl_perf_event_max_stack); | 1825 | } while (entry->nr < entry->max_stack); |
1826 | } | 1826 | } |
1827 | 1827 | ||
1828 | void | 1828 | void |
1829 | perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) | 1829 | perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) |
1830 | { | 1830 | { |
1831 | u64 saved_fault_address = current_thread_info()->fault_address; | 1831 | u64 saved_fault_address = current_thread_info()->fault_address; |
1832 | u8 saved_fault_code = get_thread_fault_code(); | 1832 | u8 saved_fault_code = get_thread_fault_code(); |
diff --git a/arch/tile/kernel/perf_event.c b/arch/tile/kernel/perf_event.c index 8767060d70fb..6394c1ccb68e 100644 --- a/arch/tile/kernel/perf_event.c +++ b/arch/tile/kernel/perf_event.c | |||
@@ -941,7 +941,7 @@ arch_initcall(init_hw_perf_events); | |||
941 | /* | 941 | /* |
942 | * Tile specific backtracing code for perf_events. | 942 | * Tile specific backtracing code for perf_events. |
943 | */ | 943 | */ |
944 | static inline void perf_callchain(struct perf_callchain_entry *entry, | 944 | static inline void perf_callchain(struct perf_callchain_entry_ctx *entry, |
945 | struct pt_regs *regs) | 945 | struct pt_regs *regs) |
946 | { | 946 | { |
947 | struct KBacktraceIterator kbt; | 947 | struct KBacktraceIterator kbt; |
@@ -992,13 +992,13 @@ static inline void perf_callchain(struct perf_callchain_entry *entry, | |||
992 | } | 992 | } |
993 | } | 993 | } |
994 | 994 | ||
995 | void perf_callchain_user(struct perf_callchain_entry *entry, | 995 | void perf_callchain_user(struct perf_callchain_entry_ctx *entry, |
996 | struct pt_regs *regs) | 996 | struct pt_regs *regs) |
997 | { | 997 | { |
998 | perf_callchain(entry, regs); | 998 | perf_callchain(entry, regs); |
999 | } | 999 | } |
1000 | 1000 | ||
1001 | void perf_callchain_kernel(struct perf_callchain_entry *entry, | 1001 | void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, |
1002 | struct pt_regs *regs) | 1002 | struct pt_regs *regs) |
1003 | { | 1003 | { |
1004 | perf_callchain(entry, regs); | 1004 | perf_callchain(entry, regs); |
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 73a75aa5a66d..33787ee817f0 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c | |||
@@ -2202,7 +2202,7 @@ static int backtrace_stack(void *data, char *name) | |||
2202 | 2202 | ||
2203 | static int backtrace_address(void *data, unsigned long addr, int reliable) | 2203 | static int backtrace_address(void *data, unsigned long addr, int reliable) |
2204 | { | 2204 | { |
2205 | struct perf_callchain_entry *entry = data; | 2205 | struct perf_callchain_entry_ctx *entry = data; |
2206 | 2206 | ||
2207 | return perf_callchain_store(entry, addr); | 2207 | return perf_callchain_store(entry, addr); |
2208 | } | 2208 | } |
@@ -2214,7 +2214,7 @@ static const struct stacktrace_ops backtrace_ops = { | |||
2214 | }; | 2214 | }; |
2215 | 2215 | ||
2216 | void | 2216 | void |
2217 | perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) | 2217 | perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) |
2218 | { | 2218 | { |
2219 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { | 2219 | if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { |
2220 | /* TODO: We don't support guest os callchain now */ | 2220 | /* TODO: We don't support guest os callchain now */ |
@@ -2268,7 +2268,7 @@ static unsigned long get_segment_base(unsigned int segment) | |||
2268 | #include <asm/compat.h> | 2268 | #include <asm/compat.h> |
2269 | 2269 | ||
2270 | static inline int | 2270 | static inline int |
2271 | perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) | 2271 | perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry) |
2272 | { | 2272 | { |
2273 | /* 32-bit process in 64-bit kernel. */ | 2273 | /* 32-bit process in 64-bit kernel. */ |
2274 | unsigned long ss_base, cs_base; | 2274 | unsigned long ss_base, cs_base; |
@@ -2283,7 +2283,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) | |||
2283 | 2283 | ||
2284 | fp = compat_ptr(ss_base + regs->bp); | 2284 | fp = compat_ptr(ss_base + regs->bp); |
2285 | pagefault_disable(); | 2285 | pagefault_disable(); |
2286 | while (entry->nr < sysctl_perf_event_max_stack) { | 2286 | while (entry->nr < entry->max_stack) { |
2287 | unsigned long bytes; | 2287 | unsigned long bytes; |
2288 | frame.next_frame = 0; | 2288 | frame.next_frame = 0; |
2289 | frame.return_address = 0; | 2289 | frame.return_address = 0; |
@@ -2309,14 +2309,14 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) | |||
2309 | } | 2309 | } |
2310 | #else | 2310 | #else |
2311 | static inline int | 2311 | static inline int |
2312 | perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) | 2312 | perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry) |
2313 | { | 2313 | { |
2314 | return 0; | 2314 | return 0; |
2315 | } | 2315 | } |
2316 | #endif | 2316 | #endif |
2317 | 2317 | ||
2318 | void | 2318 | void |
2319 | perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) | 2319 | perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) |
2320 | { | 2320 | { |
2321 | struct stack_frame frame; | 2321 | struct stack_frame frame; |
2322 | const void __user *fp; | 2322 | const void __user *fp; |
@@ -2343,7 +2343,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) | |||
2343 | return; | 2343 | return; |
2344 | 2344 | ||
2345 | pagefault_disable(); | 2345 | pagefault_disable(); |
2346 | while (entry->nr < sysctl_perf_event_max_stack) { | 2346 | while (entry->nr < entry->max_stack) { |
2347 | unsigned long bytes; | 2347 | unsigned long bytes; |
2348 | frame.next_frame = NULL; | 2348 | frame.next_frame = NULL; |
2349 | frame.return_address = 0; | 2349 | frame.return_address = 0; |
diff --git a/arch/x86/events/intel/p4.c b/arch/x86/events/intel/p4.c index 0a5ede187d9c..eb0533558c2b 100644 --- a/arch/x86/events/intel/p4.c +++ b/arch/x86/events/intel/p4.c | |||
@@ -826,7 +826,7 @@ static int p4_hw_config(struct perf_event *event) | |||
826 | * Clear bits we reserve to be managed by kernel itself | 826 | * Clear bits we reserve to be managed by kernel itself |
827 | * and never allowed from a user space | 827 | * and never allowed from a user space |
828 | */ | 828 | */ |
829 | event->attr.config &= P4_CONFIG_MASK; | 829 | event->attr.config &= P4_CONFIG_MASK; |
830 | 830 | ||
831 | rc = p4_validate_raw_event(event); | 831 | rc = p4_validate_raw_event(event); |
832 | if (rc) | 832 | if (rc) |
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 16c178916412..fce74062d981 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c | |||
@@ -891,7 +891,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id | |||
891 | return -ENODEV; | 891 | return -ENODEV; |
892 | 892 | ||
893 | pkg = topology_phys_to_logical_pkg(phys_id); | 893 | pkg = topology_phys_to_logical_pkg(phys_id); |
894 | if (WARN_ON_ONCE(pkg < 0)) | 894 | if (pkg < 0) |
895 | return -EINVAL; | 895 | return -EINVAL; |
896 | 896 | ||
897 | if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) { | 897 | if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) { |
diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c index a6b00b3af429..ef90479e0397 100644 --- a/arch/xtensa/kernel/perf_event.c +++ b/arch/xtensa/kernel/perf_event.c | |||
@@ -323,23 +323,23 @@ static void xtensa_pmu_read(struct perf_event *event) | |||
323 | 323 | ||
324 | static int callchain_trace(struct stackframe *frame, void *data) | 324 | static int callchain_trace(struct stackframe *frame, void *data) |
325 | { | 325 | { |
326 | struct perf_callchain_entry *entry = data; | 326 | struct perf_callchain_entry_ctx *entry = data; |
327 | 327 | ||
328 | perf_callchain_store(entry, frame->pc); | 328 | perf_callchain_store(entry, frame->pc); |
329 | return 0; | 329 | return 0; |
330 | } | 330 | } |
331 | 331 | ||
332 | void perf_callchain_kernel(struct perf_callchain_entry *entry, | 332 | void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, |
333 | struct pt_regs *regs) | 333 | struct pt_regs *regs) |
334 | { | 334 | { |
335 | xtensa_backtrace_kernel(regs, sysctl_perf_event_max_stack, | 335 | xtensa_backtrace_kernel(regs, entry->max_stack, |
336 | callchain_trace, NULL, entry); | 336 | callchain_trace, NULL, entry); |
337 | } | 337 | } |
338 | 338 | ||
339 | void perf_callchain_user(struct perf_callchain_entry *entry, | 339 | void perf_callchain_user(struct perf_callchain_entry_ctx *entry, |
340 | struct pt_regs *regs) | 340 | struct pt_regs *regs) |
341 | { | 341 | { |
342 | xtensa_backtrace_user(regs, sysctl_perf_event_max_stack, | 342 | xtensa_backtrace_user(regs, entry->max_stack, |
343 | callchain_trace, entry); | 343 | callchain_trace, entry); |
344 | } | 344 | } |
345 | 345 | ||