aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJiri Olsa <jolsa@kernel.org>2014-12-10 15:23:50 -0500
committerIngo Molnar <mingo@kernel.org>2014-12-11 05:24:14 -0500
commitaf91568e762d04931dcbdd6bef4655433d8b9418 (patch)
tree2a2924dfc908ec50ebeb94a9e9840272676592f3 /arch
parent201e7deb9062bb9a8a5bc113e36457c3ff55b226 (diff)
perf/x86/intel/uncore: Make sure only uncore events are collected
The uncore_collect_events functions assumes that event group might contain only uncore events which is wrong, because it might contain any type of events. This bug leads to uncore framework touching 'not' uncore events, which could end up all sorts of bugs. One was triggered by Vince's perf fuzzer, when the uncore code touched breakpoint event private event space as if it was uncore event and caused BUG: BUG: unable to handle kernel paging request at ffffffff82822068 IP: [<ffffffff81020338>] uncore_assign_events+0x188/0x250 ... The code in uncore_assign_events() function was looking for event->hw.idx data while the event was initialized as a breakpoint with different members in event->hw union. This patch forces uncore_collect_events() to collect only uncore events. Reported-by: Vince Weaver <vince@deater.net> Signed-off-by: Jiri Olsa <jolsa@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Yan, Zheng <zheng.z.yan@intel.com> Cc: <stable@vger.kernel.org> Link: http://lkml.kernel.org/r/1418243031-20367-2-git-send-email-jolsa@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c22
1 files changed, 19 insertions, 3 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 9762dbd9f3f7..e98f68cfea02 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -276,6 +276,17 @@ static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
276 return box; 276 return box;
277} 277}
278 278
279/*
280 * Using uncore_pmu_event_init pmu event_init callback
281 * as a detection point for uncore events.
282 */
283static int uncore_pmu_event_init(struct perf_event *event);
284
285static bool is_uncore_event(struct perf_event *event)
286{
287 return event->pmu->event_init == uncore_pmu_event_init;
288}
289
279static int 290static int
280uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp) 291uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
281{ 292{
@@ -290,13 +301,18 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, b
290 return -EINVAL; 301 return -EINVAL;
291 302
292 n = box->n_events; 303 n = box->n_events;
293 box->event_list[n] = leader; 304
294 n++; 305 if (is_uncore_event(leader)) {
306 box->event_list[n] = leader;
307 n++;
308 }
309
295 if (!dogrp) 310 if (!dogrp)
296 return n; 311 return n;
297 312
298 list_for_each_entry(event, &leader->sibling_list, group_entry) { 313 list_for_each_entry(event, &leader->sibling_list, group_entry) {
299 if (event->state <= PERF_EVENT_STATE_OFF) 314 if (!is_uncore_event(event) ||
315 event->state <= PERF_EVENT_STATE_OFF)
300 continue; 316 continue;
301 317
302 if (n >= max_count) 318 if (n >= max_count)