diff options
author | Peter Zijlstra <peterz@infradead.org> | 2017-01-31 05:27:10 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-02-10 03:04:50 -0500 |
commit | 451d24d1e5f40bad000fa9abe36ddb16fc9928cb (patch) | |
tree | 532b1fe208f3223a3b447f535f4a761f08c2527a | |
parent | 53e74a112ce5c1c9b6a6923bdd6612133625d579 (diff) |
perf/core: Fix crash in perf_event_read()
Alexei had his box explode because doing read() on a package
(rapl/uncore) event that isn't currently scheduled in ends up doing an
out-of-bounds load.
Rework the code to more explicitly deal with event->oncpu being -1.
Reported-by: Alexei Starovoitov <alexei.starovoitov@gmail.com>
Tested-by: Alexei Starovoitov <ast@kernel.org>
Tested-by: David Carrillo-Cisneros <davidcc@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: eranian@google.com
Fixes: d6a2f9035bfc ("perf/core: Introduce PMU_EV_CAP_READ_ACTIVE_PKG")
Link: http://lkml.kernel.org/r/20170131102710.GL6515@twins.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | kernel/events/core.c | 25 |
1 files changed, 15 insertions, 10 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index e5aaa806702d..e235bb991bdd 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -3487,14 +3487,15 @@ struct perf_read_data { | |||
3487 | int ret; | 3487 | int ret; |
3488 | }; | 3488 | }; |
3489 | 3489 | ||
3490 | static int find_cpu_to_read(struct perf_event *event, int local_cpu) | 3490 | static int __perf_event_read_cpu(struct perf_event *event, int event_cpu) |
3491 | { | 3491 | { |
3492 | int event_cpu = event->oncpu; | ||
3493 | u16 local_pkg, event_pkg; | 3492 | u16 local_pkg, event_pkg; |
3494 | 3493 | ||
3495 | if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { | 3494 | if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { |
3496 | event_pkg = topology_physical_package_id(event_cpu); | 3495 | int local_cpu = smp_processor_id(); |
3497 | local_pkg = topology_physical_package_id(local_cpu); | 3496 | |
3497 | event_pkg = topology_physical_package_id(event_cpu); | ||
3498 | local_pkg = topology_physical_package_id(local_cpu); | ||
3498 | 3499 | ||
3499 | if (event_pkg == local_pkg) | 3500 | if (event_pkg == local_pkg) |
3500 | return local_cpu; | 3501 | return local_cpu; |
@@ -3624,7 +3625,7 @@ u64 perf_event_read_local(struct perf_event *event) | |||
3624 | 3625 | ||
3625 | static int perf_event_read(struct perf_event *event, bool group) | 3626 | static int perf_event_read(struct perf_event *event, bool group) |
3626 | { | 3627 | { |
3627 | int ret = 0, cpu_to_read, local_cpu; | 3628 | int event_cpu, ret = 0; |
3628 | 3629 | ||
3629 | /* | 3630 | /* |
3630 | * If event is enabled and currently active on a CPU, update the | 3631 | * If event is enabled and currently active on a CPU, update the |
@@ -3637,21 +3638,25 @@ static int perf_event_read(struct perf_event *event, bool group) | |||
3637 | .ret = 0, | 3638 | .ret = 0, |
3638 | }; | 3639 | }; |
3639 | 3640 | ||
3640 | local_cpu = get_cpu(); | 3641 | event_cpu = READ_ONCE(event->oncpu); |
3641 | cpu_to_read = find_cpu_to_read(event, local_cpu); | 3642 | if ((unsigned)event_cpu >= nr_cpu_ids) |
3642 | put_cpu(); | 3643 | return 0; |
3644 | |||
3645 | preempt_disable(); | ||
3646 | event_cpu = __perf_event_read_cpu(event, event_cpu); | ||
3643 | 3647 | ||
3644 | /* | 3648 | /* |
3645 | * Purposely ignore the smp_call_function_single() return | 3649 | * Purposely ignore the smp_call_function_single() return |
3646 | * value. | 3650 | * value. |
3647 | * | 3651 | * |
3648 | * If event->oncpu isn't a valid CPU it means the event got | 3652 | * If event_cpu isn't a valid CPU it means the event got |
3649 | * scheduled out and that will have updated the event count. | 3653 | * scheduled out and that will have updated the event count. |
3650 | * | 3654 | * |
3651 | * Therefore, either way, we'll have an up-to-date event count | 3655 | * Therefore, either way, we'll have an up-to-date event count |
3652 | * after this. | 3656 | * after this. |
3653 | */ | 3657 | */ |
3654 | (void)smp_call_function_single(cpu_to_read, __perf_event_read, &data, 1); | 3658 | (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1); |
3659 | preempt_enable(); | ||
3655 | ret = data.ret; | 3660 | ret = data.ret; |
3656 | } else if (event->state == PERF_EVENT_STATE_INACTIVE) { | 3661 | } else if (event->state == PERF_EVENT_STATE_INACTIVE) { |
3657 | struct perf_event_context *ctx = event->ctx; | 3662 | struct perf_event_context *ctx = event->ctx; |