aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Shishkin <alexander.shishkin@linux.intel.com>2019-07-01 07:07:55 -0400
committerIngo Molnar <mingo@kernel.org>2019-07-13 05:21:28 -0400
commit8a58ddae23796c733c5dfbd717538d89d036c5bd (patch)
treec91e4da989176752c56cd7ae489375fd7898b785
parent2f217d58a8a086d3399fecce39fb358848e799c4 (diff)
perf/core: Fix exclusive events' grouping
So far, we tried to disallow grouping exclusive events for the fear of complications they would cause with moving between contexts. Specifically, moving a software group to a hardware context would violate the exclusivity rules if both groups contain matching exclusive events. This attempt was, however, unsuccessful: the check that we have in the perf_event_open() syscall is both wrong (looks at wrong PMU) and insufficient (group leader may still be exclusive), as can be illustrated by running: $ perf record -e '{intel_pt//,cycles}' uname $ perf record -e '{cycles,intel_pt//}' uname ultimately successfully. Furthermore, we are completely free to trigger the exclusivity violation by: perf -e '{cycles,intel_pt//}' -e '{intel_pt//,instructions}' even though the helpful perf record will not allow that, the ABI will. The warning later in the perf_event_open() path will also not trigger, because it's also wrong. Fix all this by validating the original group before moving, getting rid of broken safeguards and placing a useful one to perf_install_in_context(). Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: <stable@vger.kernel.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: mathieu.poirier@linaro.org Cc: will.deacon@arm.com Fixes: bed5b25ad9c8a ("perf: Add a pmu capability for "exclusive" events") Link: https://lkml.kernel.org/r/20190701110755.24646-1-alexander.shishkin@linux.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/linux/perf_event.h5
-rw-r--r--kernel/events/core.c34
2 files changed, 27 insertions, 12 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 16e38c286d46..e8ad3c590a23 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1055,6 +1055,11 @@ static inline int in_software_context(struct perf_event *event)
1055 return event->ctx->pmu->task_ctx_nr == perf_sw_context; 1055 return event->ctx->pmu->task_ctx_nr == perf_sw_context;
1056} 1056}
1057 1057
1058static inline int is_exclusive_pmu(struct pmu *pmu)
1059{
1060 return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE;
1061}
1062
1058extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 1063extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
1059 1064
1060extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64); 1065extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 5dd19bedbf64..eea9d52b010c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2553,6 +2553,9 @@ unlock:
2553 return ret; 2553 return ret;
2554} 2554}
2555 2555
2556static bool exclusive_event_installable(struct perf_event *event,
2557 struct perf_event_context *ctx);
2558
2556/* 2559/*
2557 * Attach a performance event to a context. 2560 * Attach a performance event to a context.
2558 * 2561 *
@@ -2567,6 +2570,8 @@ perf_install_in_context(struct perf_event_context *ctx,
2567 2570
2568 lockdep_assert_held(&ctx->mutex); 2571 lockdep_assert_held(&ctx->mutex);
2569 2572
2573 WARN_ON_ONCE(!exclusive_event_installable(event, ctx));
2574
2570 if (event->cpu != -1) 2575 if (event->cpu != -1)
2571 event->cpu = cpu; 2576 event->cpu = cpu;
2572 2577
@@ -4360,7 +4365,7 @@ static int exclusive_event_init(struct perf_event *event)
4360{ 4365{
4361 struct pmu *pmu = event->pmu; 4366 struct pmu *pmu = event->pmu;
4362 4367
4363 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) 4368 if (!is_exclusive_pmu(pmu))
4364 return 0; 4369 return 0;
4365 4370
4366 /* 4371 /*
@@ -4391,7 +4396,7 @@ static void exclusive_event_destroy(struct perf_event *event)
4391{ 4396{
4392 struct pmu *pmu = event->pmu; 4397 struct pmu *pmu = event->pmu;
4393 4398
4394 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) 4399 if (!is_exclusive_pmu(pmu))
4395 return; 4400 return;
4396 4401
4397 /* see comment in exclusive_event_init() */ 4402 /* see comment in exclusive_event_init() */
@@ -4411,14 +4416,15 @@ static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
4411 return false; 4416 return false;
4412} 4417}
4413 4418
4414/* Called under the same ctx::mutex as perf_install_in_context() */
4415static bool exclusive_event_installable(struct perf_event *event, 4419static bool exclusive_event_installable(struct perf_event *event,
4416 struct perf_event_context *ctx) 4420 struct perf_event_context *ctx)
4417{ 4421{
4418 struct perf_event *iter_event; 4422 struct perf_event *iter_event;
4419 struct pmu *pmu = event->pmu; 4423 struct pmu *pmu = event->pmu;
4420 4424
4421 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE)) 4425 lockdep_assert_held(&ctx->mutex);
4426
4427 if (!is_exclusive_pmu(pmu))
4422 return true; 4428 return true;
4423 4429
4424 list_for_each_entry(iter_event, &ctx->event_list, event_entry) { 4430 list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
@@ -10947,11 +10953,6 @@ SYSCALL_DEFINE5(perf_event_open,
10947 goto err_alloc; 10953 goto err_alloc;
10948 } 10954 }
10949 10955
10950 if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) {
10951 err = -EBUSY;
10952 goto err_context;
10953 }
10954
10955 /* 10956 /*
10956 * Look up the group leader (we will attach this event to it): 10957 * Look up the group leader (we will attach this event to it):
10957 */ 10958 */
@@ -11039,6 +11040,18 @@ SYSCALL_DEFINE5(perf_event_open,
11039 move_group = 0; 11040 move_group = 0;
11040 } 11041 }
11041 } 11042 }
11043
11044 /*
11045 * Failure to create exclusive events returns -EBUSY.
11046 */
11047 err = -EBUSY;
11048 if (!exclusive_event_installable(group_leader, ctx))
11049 goto err_locked;
11050
11051 for_each_sibling_event(sibling, group_leader) {
11052 if (!exclusive_event_installable(sibling, ctx))
11053 goto err_locked;
11054 }
11042 } else { 11055 } else {
11043 mutex_lock(&ctx->mutex); 11056 mutex_lock(&ctx->mutex);
11044 } 11057 }
@@ -11075,9 +11088,6 @@ SYSCALL_DEFINE5(perf_event_open,
11075 * because we need to serialize with concurrent event creation. 11088 * because we need to serialize with concurrent event creation.
11076 */ 11089 */
11077 if (!exclusive_event_installable(event, ctx)) { 11090 if (!exclusive_event_installable(event, ctx)) {
11078 /* exclusive and group stuff are assumed mutually exclusive */
11079 WARN_ON_ONCE(move_group);
11080
11081 err = -EBUSY; 11091 err = -EBUSY;
11082 goto err_locked; 11092 goto err_locked;
11083 } 11093 }