aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
authorSuzuki K. Poulose <suzuki.poulose@arm.com>2015-03-17 14:14:59 -0400
committerWill Deacon <will.deacon@arm.com>2015-03-19 15:45:51 -0400
commit8fff105e13041e49b82f92eef034f363a6b1c071 (patch)
tree3703b93defc5477c9dddfa86e4fdab9d3171f9e9 /arch/arm64/kernel
parent4a97abd44329bf7b9c57f020224da5f823c9c9ea (diff)
arm64: perf: reject groups spanning multiple HW PMUs
The perf core implicitly rejects events spanning multiple HW PMUs, as in these cases the event->ctx will differ. However this validation is performed after pmu::event_init() is called in perf_init_event(), and thus pmu::event_init() may be called with a group leader from a different HW PMU. The ARM64 PMU driver does not take this fact into account, and when validating groups assumes that it can call to_arm_pmu(event->pmu) for any HW event. When the event in question is from another HW PMU this is wrong, and results in dereferencing garbage. This patch updates the ARM64 PMU driver to first test for and reject events from other PMUs, moving the to_arm_pmu and related logic after this test. Fixes a crash triggered by perf_fuzzer on Linux-4.0-rc2, with a CCI PMU present: Bad mode in Synchronous Abort handler detected, code 0x86000006 -- IABT (current EL) CPU: 0 PID: 1371 Comm: perf_fuzzer Not tainted 3.19.0+ #249 Hardware name: V2F-1XV7 Cortex-A53x2 SMM (DT) task: ffffffc07c73a280 ti: ffffffc07b0a0000 task.ti: ffffffc07b0a0000 PC is at 0x0 LR is at validate_event+0x90/0xa8 pc : [<0000000000000000>] lr : [<ffffffc000090228>] pstate: 00000145 sp : ffffffc07b0a3ba0 [< (null)>] (null) [<ffffffc0000907d8>] armpmu_event_init+0x174/0x3cc [<ffffffc00015d870>] perf_try_init_event+0x34/0x70 [<ffffffc000164094>] perf_init_event+0xe0/0x10c [<ffffffc000164348>] perf_event_alloc+0x288/0x358 [<ffffffc000164c5c>] SyS_perf_event_open+0x464/0x98c Code: bad PC value Also cleans up the code to use the arm_pmu only when we know that we are dealing with an arm pmu event. Cc: Will Deacon <will.deacon@arm.com> Acked-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Peter Ziljstra (Intel) <peterz@infradead.org> Signed-off-by: Suzuki K. Poulose <suzuki.poulose@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/perf_event.c21
1 files changed, 15 insertions, 6 deletions
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 25a5308744b1..68a74151fa6c 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -322,22 +322,31 @@ out:
322} 322}
323 323
324static int 324static int
325validate_event(struct pmu_hw_events *hw_events, 325validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
326 struct perf_event *event) 326 struct perf_event *event)
327{ 327{
328 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 328 struct arm_pmu *armpmu;
329 struct hw_perf_event fake_event = event->hw; 329 struct hw_perf_event fake_event = event->hw;
330 struct pmu *leader_pmu = event->group_leader->pmu; 330 struct pmu *leader_pmu = event->group_leader->pmu;
331 331
332 if (is_software_event(event)) 332 if (is_software_event(event))
333 return 1; 333 return 1;
334 334
335 /*
336 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
337 * core perf code won't check that the pmu->ctx == leader->ctx
338 * until after pmu->event_init(event).
339 */
340 if (event->pmu != pmu)
341 return 0;
342
335 if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) 343 if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
336 return 1; 344 return 1;
337 345
338 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) 346 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
339 return 1; 347 return 1;
340 348
349 armpmu = to_arm_pmu(event->pmu);
341 return armpmu->get_event_idx(hw_events, &fake_event) >= 0; 350 return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
342} 351}
343 352
@@ -355,15 +364,15 @@ validate_group(struct perf_event *event)
355 memset(fake_used_mask, 0, sizeof(fake_used_mask)); 364 memset(fake_used_mask, 0, sizeof(fake_used_mask));
356 fake_pmu.used_mask = fake_used_mask; 365 fake_pmu.used_mask = fake_used_mask;
357 366
358 if (!validate_event(&fake_pmu, leader)) 367 if (!validate_event(event->pmu, &fake_pmu, leader))
359 return -EINVAL; 368 return -EINVAL;
360 369
361 list_for_each_entry(sibling, &leader->sibling_list, group_entry) { 370 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
362 if (!validate_event(&fake_pmu, sibling)) 371 if (!validate_event(event->pmu, &fake_pmu, sibling))
363 return -EINVAL; 372 return -EINVAL;
364 } 373 }
365 374
366 if (!validate_event(&fake_pmu, event)) 375 if (!validate_event(event->pmu, &fake_pmu, event))
367 return -EINVAL; 376 return -EINVAL;
368 377
369 return 0; 378 return 0;