diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-26 18:38:07 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-09-26 18:38:07 -0400 |
commit | a7b7b772bb4abaa4b2d9df67b50bf7208203da82 (patch) | |
tree | 9c3be8daafe4f5547b3470914903a683a04dfe6b /kernel/events | |
parent | 7897c04ad09f815aea1f2dbb05825887d4494a74 (diff) | |
parent | 26acf400d2dcc72c7e713e1f55db47ad92010cc2 (diff) |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull more perf updates from Ingo Molnar:
"The only kernel change is comment typo fixes.
The rest is mostly tooling fixes, but also new vendor event additions
and updates, a bigger libperf/libtraceevent library and a header files
reorganization that came in a bit late"
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (108 commits)
perf unwind: Fix libunwind build failure on i386 systems
perf parser: Remove needless include directives
perf build: Add detection of java-11-openjdk-devel package
perf jvmti: Include JVMTI support for s390
perf vendor events: Remove P8 HW events which are not supported
perf evlist: Fix access of freed id arrays
perf stat: Fix free memory access / memory leaks in metrics
perf tools: Replace needless mmap.h with what is needed, event.h
perf evsel: Move config terms to a separate header
perf evlist: Remove unused perf_evlist__fprintf() method
perf evsel: Introduce evsel_fprintf.h
perf evsel: Remove need for symbol_conf in evsel_fprintf.c
perf copyfile: Move copyfile routines to separate files
libperf: Add perf_evlist__poll() function
libperf: Add perf_evlist__add_pollfd() function
libperf: Add perf_evlist__alloc_pollfd() function
libperf: Add libperf_init() call to the tests
libperf: Merge libperf_set_print() into libperf_init()
libperf: Add libperf dependency for tests targets
libperf: Use sys/types.h to get ssize_t, not unistd.h
...
Diffstat (limited to 'kernel/events')
-rw-r--r-- | kernel/events/core.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 4f08b17d6426..275eae05af20 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -2239,7 +2239,7 @@ static void __perf_event_disable(struct perf_event *event, | |||
2239 | * | 2239 | * |
2240 | * If event->ctx is a cloned context, callers must make sure that | 2240 | * If event->ctx is a cloned context, callers must make sure that |
2241 | * every task struct that event->ctx->task could possibly point to | 2241 | * every task struct that event->ctx->task could possibly point to |
2242 | * remains valid. This condition is satisifed when called through | 2242 | * remains valid. This condition is satisfied when called through |
2243 | * perf_event_for_each_child or perf_event_for_each because they | 2243 | * perf_event_for_each_child or perf_event_for_each because they |
2244 | * hold the top-level event's child_mutex, so any descendant that | 2244 | * hold the top-level event's child_mutex, so any descendant that |
2245 | * goes to exit will block in perf_event_exit_event(). | 2245 | * goes to exit will block in perf_event_exit_event(). |
@@ -6054,7 +6054,7 @@ static void perf_sample_regs_intr(struct perf_regs *regs_intr, | |||
6054 | * Get remaining task size from user stack pointer. | 6054 | * Get remaining task size from user stack pointer. |
6055 | * | 6055 | * |
6056 | * It'd be better to take stack vma map and limit this more | 6056 | * It'd be better to take stack vma map and limit this more |
6057 | * precisly, but there's no way to get it safely under interrupt, | 6057 | * precisely, but there's no way to get it safely under interrupt, |
6058 | * so using TASK_SIZE as limit. | 6058 | * so using TASK_SIZE as limit. |
6059 | */ | 6059 | */ |
6060 | static u64 perf_ustack_task_size(struct pt_regs *regs) | 6060 | static u64 perf_ustack_task_size(struct pt_regs *regs) |
@@ -6616,7 +6616,7 @@ void perf_prepare_sample(struct perf_event_header *header, | |||
6616 | 6616 | ||
6617 | if (sample_type & PERF_SAMPLE_STACK_USER) { | 6617 | if (sample_type & PERF_SAMPLE_STACK_USER) { |
6618 | /* | 6618 | /* |
6619 | * Either we need PERF_SAMPLE_STACK_USER bit to be allways | 6619 | * Either we need PERF_SAMPLE_STACK_USER bit to be always |
6620 | * processed as the last one or have additional check added | 6620 | * processed as the last one or have additional check added |
6621 | * in case new sample type is added, because we could eat | 6621 | * in case new sample type is added, because we could eat |
6622 | * up the rest of the sample size. | 6622 | * up the rest of the sample size. |