aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSuzuki K. Poulose <suzuki.poulose@arm.com>2015-03-17 14:14:58 -0400
committerWill Deacon <will.deacon@arm.com>2015-03-19 15:45:22 -0400
commite429817b401f095ac483fcb02524b01faf45dad6 (patch)
tree39073a4899724f3c676bd8a390b611c39037e52b
parent341e42c4e3f97af9bbeada64c3e1a41f65ce086a (diff)
ARM: perf: reject groups spanning multiple hardware PMUs
The perf core implicitly rejects events spanning multiple HW PMUs, as in these cases the event->ctx will differ. However this validation is performed after pmu::event_init() is called in perf_init_event(), and thus pmu::event_init() may be called with a group leader from a different HW PMU. The ARM PMU driver does not take this fact into account, and when validating groups assumes that it can call to_arm_pmu(event->pmu) for any HW event. When the event in question is from another HW PMU this is wrong, and results in dereferencing garbage. This patch updates the ARM PMU driver to first test for and reject events from other PMUs, moving the to_arm_pmu and related logic after this test. Fixes a crash triggered by perf_fuzzer on Linux-4.0-rc2, with a CCI PMU present: --- CPU: 0 PID: 1527 Comm: perf_fuzzer Not tainted 4.0.0-rc2 #57 Hardware name: ARM-Versatile Express task: bd8484c0 ti: be676000 task.ti: be676000 PC is at 0xbf1bbc90 LR is at validate_event+0x34/0x5c pc : [<bf1bbc90>] lr : [<80016060>] psr: 00000013 ... [<80016060>] (validate_event) from [<80016198>] (validate_group+0x28/0x90) [<80016198>] (validate_group) from [<80016398>] (armpmu_event_init+0x150/0x218) [<80016398>] (armpmu_event_init) from [<800882e4>] (perf_try_init_event+0x30/0x48) [<800882e4>] (perf_try_init_event) from [<8008f544>] (perf_init_event+0x5c/0xf4) [<8008f544>] (perf_init_event) from [<8008f8a8>] (perf_event_alloc+0x2cc/0x35c) [<8008f8a8>] (perf_event_alloc) from [<8009015c>] (SyS_perf_event_open+0x498/0xa70) [<8009015c>] (SyS_perf_event_open) from [<8000e420>] (ret_fast_syscall+0x0/0x34) Code: bf1be000 bf1bb380 802a2664 00000000 (00000002) ---[ end trace 01aff0ff00926a0a ]--- Also cleans up the code to use the arm_pmu only when we know that we are dealing with an arm pmu event. Cc: Will Deacon <will.deacon@arm.com> Acked-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Peter Ziljstra (Intel) <peterz@infradead.org> Signed-off-by: Suzuki K. Poulose <suzuki.poulose@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm/kernel/perf_event.c21
1 files changed, 15 insertions, 6 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 557e128e4df0..4a86a0133ac3 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -259,20 +259,29 @@ out:
259} 259}
260 260
261static int 261static int
262validate_event(struct pmu_hw_events *hw_events, 262validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
263 struct perf_event *event) 263 struct perf_event *event)
264{ 264{
265 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 265 struct arm_pmu *armpmu;
266 266
267 if (is_software_event(event)) 267 if (is_software_event(event))
268 return 1; 268 return 1;
269 269
270 /*
271 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
272 * core perf code won't check that the pmu->ctx == leader->ctx
273 * until after pmu->event_init(event).
274 */
275 if (event->pmu != pmu)
276 return 0;
277
270 if (event->state < PERF_EVENT_STATE_OFF) 278 if (event->state < PERF_EVENT_STATE_OFF)
271 return 1; 279 return 1;
272 280
273 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) 281 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
274 return 1; 282 return 1;
275 283
284 armpmu = to_arm_pmu(event->pmu);
276 return armpmu->get_event_idx(hw_events, event) >= 0; 285 return armpmu->get_event_idx(hw_events, event) >= 0;
277} 286}
278 287
@@ -288,15 +297,15 @@ validate_group(struct perf_event *event)
288 */ 297 */
289 memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask)); 298 memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
290 299
291 if (!validate_event(&fake_pmu, leader)) 300 if (!validate_event(event->pmu, &fake_pmu, leader))
292 return -EINVAL; 301 return -EINVAL;
293 302
294 list_for_each_entry(sibling, &leader->sibling_list, group_entry) { 303 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
295 if (!validate_event(&fake_pmu, sibling)) 304 if (!validate_event(event->pmu, &fake_pmu, sibling))
296 return -EINVAL; 305 return -EINVAL;
297 } 306 }
298 307
299 if (!validate_event(&fake_pmu, event)) 308 if (!validate_event(event->pmu, &fake_pmu, event))
300 return -EINVAL; 309 return -EINVAL;
301 310
302 return 0; 311 return 0;