aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-08-04 08:37:24 -0400
committerIngo Molnar <mingo@kernel.org>2016-08-10 07:05:51 -0400
commit0b8f1e2e26bfc6b9abe3f0f3faba2cb0eecb9fb9 (patch)
tree5f645289667742567e9a76696a435b48b2db55c5 /kernel/events
parent69766c40c35de3a2c1142c43baa9951dfe11f002 (diff)
perf/core: Fix sideband list-iteration vs. event ordering NULL pointer deference crash
Vegard Nossum reported that perf fuzzing generates a NULL pointer dereference crash: > Digging a bit deeper into this, it seems the event itself is getting > created by perf_event_open() and it gets added to the pmu_event_list > through: > > perf_event_open() > - perf_event_alloc() > - account_event() > - account_pmu_sb_event() > - attach_sb_event() > > so at this point the event is being attached but its ->ctx is still > NULL. It seems like ->ctx is set just a bit later in > perf_event_open(), though. > > But before that, __schedule() comes along and creates a stack trace > similar to the one above: > > __schedule() > - __perf_event_task_sched_out() > - perf_iterate_sb() > - perf_iterate_sb_cpu() > - event_filter_match() > - perf_cgroup_match() > - __get_cpu_context() > - (dereference ctx which is NULL) > > So I guess the question is... should the event be attached (= put on > the list) before ->ctx gets set? Or should the cgroup code check for a > NULL ->ctx? The latter seems like the simplest solution. Moving the list-add later creates a bit of a mess. Reported-by: Vegard Nossum <vegard.nossum@gmail.com> Tested-by: Vegard Nossum <vegard.nossum@gmail.com> Tested-by: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: David Carrillo-Cisneros <davidcc@google.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Kan Liang <kan.liang@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Fixes: f2fb6bef9251 ("perf/core: Optimize side-band event delivery") Link: http://lkml.kernel.org/r/20160804123724.GN6862@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c23
1 files changed, 18 insertions, 5 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index a19550d80ab1..87d02b8cb87e 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1716,8 +1716,8 @@ static inline int pmu_filter_match(struct perf_event *event)
1716static inline int 1716static inline int
1717event_filter_match(struct perf_event *event) 1717event_filter_match(struct perf_event *event)
1718{ 1718{
1719 return (event->cpu == -1 || event->cpu == smp_processor_id()) 1719 return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
1720 && perf_cgroup_match(event) && pmu_filter_match(event); 1720 perf_cgroup_match(event) && pmu_filter_match(event);
1721} 1721}
1722 1722
1723static void 1723static void
@@ -1737,8 +1737,8 @@ event_sched_out(struct perf_event *event,
1737 * maintained, otherwise bogus information is return 1737 * maintained, otherwise bogus information is return
1738 * via read() for time_enabled, time_running: 1738 * via read() for time_enabled, time_running:
1739 */ 1739 */
1740 if (event->state == PERF_EVENT_STATE_INACTIVE 1740 if (event->state == PERF_EVENT_STATE_INACTIVE &&
1741 && !event_filter_match(event)) { 1741 !event_filter_match(event)) {
1742 delta = tstamp - event->tstamp_stopped; 1742 delta = tstamp - event->tstamp_stopped;
1743 event->tstamp_running += delta; 1743 event->tstamp_running += delta;
1744 event->tstamp_stopped = tstamp; 1744 event->tstamp_stopped = tstamp;
@@ -2236,10 +2236,15 @@ perf_install_in_context(struct perf_event_context *ctx,
2236 2236
2237 lockdep_assert_held(&ctx->mutex); 2237 lockdep_assert_held(&ctx->mutex);
2238 2238
2239 event->ctx = ctx;
2240 if (event->cpu != -1) 2239 if (event->cpu != -1)
2241 event->cpu = cpu; 2240 event->cpu = cpu;
2242 2241
2242 /*
2243 * Ensures that if we can observe event->ctx, both the event and ctx
2244 * will be 'complete'. See perf_iterate_sb_cpu().
2245 */
2246 smp_store_release(&event->ctx, ctx);
2247
2243 if (!task) { 2248 if (!task) {
2244 cpu_function_call(cpu, __perf_install_in_context, event); 2249 cpu_function_call(cpu, __perf_install_in_context, event);
2245 return; 2250 return;
@@ -5969,6 +5974,14 @@ static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
5969 struct perf_event *event; 5974 struct perf_event *event;
5970 5975
5971 list_for_each_entry_rcu(event, &pel->list, sb_list) { 5976 list_for_each_entry_rcu(event, &pel->list, sb_list) {
5977 /*
5978 * Skip events that are not fully formed yet; ensure that
5979 * if we observe event->ctx, both event and ctx will be
5980 * complete enough. See perf_install_in_context().
5981 */
5982 if (!smp_load_acquire(&event->ctx))
5983 continue;
5984
5972 if (event->state < PERF_EVENT_STATE_INACTIVE) 5985 if (event->state < PERF_EVENT_STATE_INACTIVE)
5973 continue; 5986 continue;
5974 if (!event_filter_match(event)) 5987 if (!event_filter_match(event))