diff options
author | Peter Zijlstra <peterz@infradead.org> | 2017-01-31 05:27:10 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-02-14 18:25:43 -0500 |
commit | e5c2e51470c2aadaf53a17acb677bb95529ac4d1 (patch) | |
tree | 446b92a70a45c20b418d37772be1ca60e9891a1e /kernel | |
parent | de65c300c7f0d1e18f5a92ea7a6c1cc9a0734202 (diff) |
perf/core: Fix crash in perf_event_read()
commit 451d24d1e5f40bad000fa9abe36ddb16fc9928cb upstream.
Alexei had his box explode because doing read() on a package
(rapl/uncore) event that isn't currently scheduled in ends up doing an
out-of-bounds load.
Rework the code to more explicitly deal with event->oncpu being -1.
Reported-by: Alexei Starovoitov <alexei.starovoitov@gmail.com>
Tested-by: Alexei Starovoitov <ast@kernel.org>
Tested-by: David Carrillo-Cisneros <davidcc@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: eranian@google.com
Fixes: d6a2f9035bfc ("perf/core: Introduce PMU_EV_CAP_READ_ACTIVE_PKG")
Link: http://lkml.kernel.org/r/20170131102710.GL6515@twins.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/events/core.c | 25 |
1 files changed, 15 insertions, 10 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index b1cfd7416db0..4b3323151a2f 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -3461,14 +3461,15 @@ struct perf_read_data { | |||
3461 | int ret; | 3461 | int ret; |
3462 | }; | 3462 | }; |
3463 | 3463 | ||
3464 | static int find_cpu_to_read(struct perf_event *event, int local_cpu) | 3464 | static int __perf_event_read_cpu(struct perf_event *event, int event_cpu) |
3465 | { | 3465 | { |
3466 | int event_cpu = event->oncpu; | ||
3467 | u16 local_pkg, event_pkg; | 3466 | u16 local_pkg, event_pkg; |
3468 | 3467 | ||
3469 | if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { | 3468 | if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { |
3470 | event_pkg = topology_physical_package_id(event_cpu); | 3469 | int local_cpu = smp_processor_id(); |
3471 | local_pkg = topology_physical_package_id(local_cpu); | 3470 | |
3471 | event_pkg = topology_physical_package_id(event_cpu); | ||
3472 | local_pkg = topology_physical_package_id(local_cpu); | ||
3472 | 3473 | ||
3473 | if (event_pkg == local_pkg) | 3474 | if (event_pkg == local_pkg) |
3474 | return local_cpu; | 3475 | return local_cpu; |
@@ -3598,7 +3599,7 @@ u64 perf_event_read_local(struct perf_event *event) | |||
3598 | 3599 | ||
3599 | static int perf_event_read(struct perf_event *event, bool group) | 3600 | static int perf_event_read(struct perf_event *event, bool group) |
3600 | { | 3601 | { |
3601 | int ret = 0, cpu_to_read, local_cpu; | 3602 | int event_cpu, ret = 0; |
3602 | 3603 | ||
3603 | /* | 3604 | /* |
3604 | * If event is enabled and currently active on a CPU, update the | 3605 | * If event is enabled and currently active on a CPU, update the |
@@ -3611,21 +3612,25 @@ static int perf_event_read(struct perf_event *event, bool group) | |||
3611 | .ret = 0, | 3612 | .ret = 0, |
3612 | }; | 3613 | }; |
3613 | 3614 | ||
3614 | local_cpu = get_cpu(); | 3615 | event_cpu = READ_ONCE(event->oncpu); |
3615 | cpu_to_read = find_cpu_to_read(event, local_cpu); | 3616 | if ((unsigned)event_cpu >= nr_cpu_ids) |
3616 | put_cpu(); | 3617 | return 0; |
3618 | |||
3619 | preempt_disable(); | ||
3620 | event_cpu = __perf_event_read_cpu(event, event_cpu); | ||
3617 | 3621 | ||
3618 | /* | 3622 | /* |
3619 | * Purposely ignore the smp_call_function_single() return | 3623 | * Purposely ignore the smp_call_function_single() return |
3620 | * value. | 3624 | * value. |
3621 | * | 3625 | * |
3622 | * If event->oncpu isn't a valid CPU it means the event got | 3626 | * If event_cpu isn't a valid CPU it means the event got |
3623 | * scheduled out and that will have updated the event count. | 3627 | * scheduled out and that will have updated the event count. |
3624 | * | 3628 | * |
3625 | * Therefore, either way, we'll have an up-to-date event count | 3629 | * Therefore, either way, we'll have an up-to-date event count |
3626 | * after this. | 3630 | * after this. |
3627 | */ | 3631 | */ |
3628 | (void)smp_call_function_single(cpu_to_read, __perf_event_read, &data, 1); | 3632 | (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1); |
3633 | preempt_enable(); | ||
3629 | ret = data.ret; | 3634 | ret = data.ret; |
3630 | } else if (event->state == PERF_EVENT_STATE_INACTIVE) { | 3635 | } else if (event->state == PERF_EVENT_STATE_INACTIVE) { |
3631 | struct perf_event_context *ctx = event->ctx; | 3636 | struct perf_event_context *ctx = event->ctx; |