aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorAlexander Shishkin <alexander.shishkin@linux.intel.com>2015-01-30 05:31:06 -0500
committerIngo Molnar <mingo@kernel.org>2015-04-02 11:14:12 -0400
commitbed5b25ad9c8a2f5d735ef0bc746ec870c01c1b0 (patch)
treede0dcc7cd008965e48e48644a37e3822453c1699 /kernel
parent6a279230391b63130070e0219b0ad09d34d28c89 (diff)
perf: Add a pmu capability for "exclusive" events
Usually, pmus that do, for example, instruction tracing, would only ever be able to have one event per task per cpu (or per perf_event_context). For such pmus it makes sense to disallow creating conflicting events early on, so as to provide consistent behavior for the user. This patch adds a pmu capability that indicates such constraint on event creation. Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Kaixu Xia <kaixu.xia@linaro.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Robert Richter <rric@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: acme@infradead.org Cc: adrian.hunter@intel.com Cc: kan.liang@intel.com Cc: markus.t.metzger@intel.com Cc: mathieu.poirier@linaro.org Link: http://lkml.kernel.org/r/1422613866-113186-1-git-send-email-alexander.shishkin@linux.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c119
1 files changed, 117 insertions, 2 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index da51128c337a..6d9fdaef7b57 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3459,6 +3459,91 @@ static void unaccount_event(struct perf_event *event)
3459 unaccount_event_cpu(event, event->cpu); 3459 unaccount_event_cpu(event, event->cpu);
3460} 3460}
3461 3461
3462/*
3463 * The following implement mutual exclusion of events on "exclusive" pmus
3464 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
3465 * at a time, so we disallow creating events that might conflict, namely:
3466 *
3467 * 1) cpu-wide events in the presence of per-task events,
3468 * 2) per-task events in the presence of cpu-wide events,
3469 * 3) two matching events on the same context.
3470 *
3471 * The former two cases are handled in the allocation path (perf_event_alloc(),
3472 * __free_event()), the latter -- before the first perf_install_in_context().
3473 */
3474static int exclusive_event_init(struct perf_event *event)
3475{
3476 struct pmu *pmu = event->pmu;
3477
3478 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3479 return 0;
3480
3481 /*
3482 * Prevent co-existence of per-task and cpu-wide events on the
3483 * same exclusive pmu.
3484 *
3485 * Negative pmu::exclusive_cnt means there are cpu-wide
3486 * events on this "exclusive" pmu, positive means there are
3487 * per-task events.
3488 *
3489 * Since this is called in perf_event_alloc() path, event::ctx
3490 * doesn't exist yet; it is, however, safe to use PERF_ATTACH_TASK
3491 * to mean "per-task event", because unlike other attach states it
3492 * never gets cleared.
3493 */
3494 if (event->attach_state & PERF_ATTACH_TASK) {
3495 if (!atomic_inc_unless_negative(&pmu->exclusive_cnt))
3496 return -EBUSY;
3497 } else {
3498 if (!atomic_dec_unless_positive(&pmu->exclusive_cnt))
3499 return -EBUSY;
3500 }
3501
3502 return 0;
3503}
3504
3505static void exclusive_event_destroy(struct perf_event *event)
3506{
3507 struct pmu *pmu = event->pmu;
3508
3509 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3510 return;
3511
3512 /* see comment in exclusive_event_init() */
3513 if (event->attach_state & PERF_ATTACH_TASK)
3514 atomic_dec(&pmu->exclusive_cnt);
3515 else
3516 atomic_inc(&pmu->exclusive_cnt);
3517}
3518
3519static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
3520{
3521 if ((e1->pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) &&
3522 (e1->cpu == e2->cpu ||
3523 e1->cpu == -1 ||
3524 e2->cpu == -1))
3525 return true;
3526 return false;
3527}
3528
3529/* Called under the same ctx::mutex as perf_install_in_context() */
3530static bool exclusive_event_installable(struct perf_event *event,
3531 struct perf_event_context *ctx)
3532{
3533 struct perf_event *iter_event;
3534 struct pmu *pmu = event->pmu;
3535
3536 if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
3537 return true;
3538
3539 list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
3540 if (exclusive_event_match(iter_event, event))
3541 return false;
3542 }
3543
3544 return true;
3545}
3546
3462static void __free_event(struct perf_event *event) 3547static void __free_event(struct perf_event *event)
3463{ 3548{
3464 if (!event->parent) { 3549 if (!event->parent) {
@@ -3472,8 +3557,10 @@ static void __free_event(struct perf_event *event)
3472 if (event->ctx) 3557 if (event->ctx)
3473 put_ctx(event->ctx); 3558 put_ctx(event->ctx);
3474 3559
3475 if (event->pmu) 3560 if (event->pmu) {
3561 exclusive_event_destroy(event);
3476 module_put(event->pmu->module); 3562 module_put(event->pmu->module);
3563 }
3477 3564
3478 call_rcu(&event->rcu_head, free_event_rcu); 3565 call_rcu(&event->rcu_head, free_event_rcu);
3479} 3566}
@@ -7150,6 +7237,7 @@ got_cpu_context:
7150 pmu->event_idx = perf_event_idx_default; 7237 pmu->event_idx = perf_event_idx_default;
7151 7238
7152 list_add_rcu(&pmu->entry, &pmus); 7239 list_add_rcu(&pmu->entry, &pmus);
7240 atomic_set(&pmu->exclusive_cnt, 0);
7153 ret = 0; 7241 ret = 0;
7154unlock: 7242unlock:
7155 mutex_unlock(&pmus_lock); 7243 mutex_unlock(&pmus_lock);
@@ -7405,16 +7493,23 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
7405 goto err_ns; 7493 goto err_ns;
7406 } 7494 }
7407 7495
7496 err = exclusive_event_init(event);
7497 if (err)
7498 goto err_pmu;
7499
7408 if (!event->parent) { 7500 if (!event->parent) {
7409 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { 7501 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
7410 err = get_callchain_buffers(); 7502 err = get_callchain_buffers();
7411 if (err) 7503 if (err)
7412 goto err_pmu; 7504 goto err_per_task;
7413 } 7505 }
7414 } 7506 }
7415 7507
7416 return event; 7508 return event;
7417 7509
7510err_per_task:
7511 exclusive_event_destroy(event);
7512
7418err_pmu: 7513err_pmu:
7419 if (event->destroy) 7514 if (event->destroy)
7420 event->destroy(event); 7515 event->destroy(event);
@@ -7819,6 +7914,11 @@ SYSCALL_DEFINE5(perf_event_open,
7819 goto err_alloc; 7914 goto err_alloc;
7820 } 7915 }
7821 7916
7917 if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) {
7918 err = -EBUSY;
7919 goto err_context;
7920 }
7921
7822 if (task) { 7922 if (task) {
7823 put_task_struct(task); 7923 put_task_struct(task);
7824 task = NULL; 7924 task = NULL;
@@ -7941,6 +8041,13 @@ SYSCALL_DEFINE5(perf_event_open,
7941 get_ctx(ctx); 8041 get_ctx(ctx);
7942 } 8042 }
7943 8043
8044 if (!exclusive_event_installable(event, ctx)) {
8045 err = -EBUSY;
8046 mutex_unlock(&ctx->mutex);
8047 fput(event_file);
8048 goto err_context;
8049 }
8050
7944 perf_install_in_context(ctx, event, event->cpu); 8051 perf_install_in_context(ctx, event, event->cpu);
7945 perf_unpin_context(ctx); 8052 perf_unpin_context(ctx);
7946 8053
@@ -8032,6 +8139,14 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
8032 8139
8033 WARN_ON_ONCE(ctx->parent_ctx); 8140 WARN_ON_ONCE(ctx->parent_ctx);
8034 mutex_lock(&ctx->mutex); 8141 mutex_lock(&ctx->mutex);
8142 if (!exclusive_event_installable(event, ctx)) {
8143 mutex_unlock(&ctx->mutex);
8144 perf_unpin_context(ctx);
8145 put_ctx(ctx);
8146 err = -EBUSY;
8147 goto err_free;
8148 }
8149
8035 perf_install_in_context(ctx, event, cpu); 8150 perf_install_in_context(ctx, event, cpu);
8036 perf_unpin_context(ctx); 8151 perf_unpin_context(ctx);
8037 mutex_unlock(&ctx->mutex); 8152 mutex_unlock(&ctx->mutex);