aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index cda3ebd49e86..dbccf83c134d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -372,6 +372,8 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
372 372
373 list_for_each_entry_rcu(pmu, &pmus, entry) { 373 list_for_each_entry_rcu(pmu, &pmus, entry) {
374 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 374 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
375 if (cpuctx->unique_pmu != pmu)
376 continue; /* ensure we process each cpuctx once */
375 377
376 /* 378 /*
377 * perf_cgroup_events says at least one 379 * perf_cgroup_events says at least one
@@ -395,9 +397,10 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
395 397
396 if (mode & PERF_CGROUP_SWIN) { 398 if (mode & PERF_CGROUP_SWIN) {
397 WARN_ON_ONCE(cpuctx->cgrp); 399 WARN_ON_ONCE(cpuctx->cgrp);
398 /* set cgrp before ctxsw in to 400 /*
399 * allow event_filter_match() to not 401 * set cgrp before ctxsw in to allow
400 * have to pass task around 402 * event_filter_match() to not have to pass
403 * task around
401 */ 404 */
402 cpuctx->cgrp = perf_cgroup_from_task(task); 405 cpuctx->cgrp = perf_cgroup_from_task(task);
403 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); 406 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
@@ -4412,7 +4415,7 @@ static void perf_event_task_event(struct perf_task_event *task_event)
4412 rcu_read_lock(); 4415 rcu_read_lock();
4413 list_for_each_entry_rcu(pmu, &pmus, entry) { 4416 list_for_each_entry_rcu(pmu, &pmus, entry) {
4414 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 4417 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4415 if (cpuctx->active_pmu != pmu) 4418 if (cpuctx->unique_pmu != pmu)
4416 goto next; 4419 goto next;
4417 perf_event_task_ctx(&cpuctx->ctx, task_event); 4420 perf_event_task_ctx(&cpuctx->ctx, task_event);
4418 4421
@@ -4558,7 +4561,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
4558 rcu_read_lock(); 4561 rcu_read_lock();
4559 list_for_each_entry_rcu(pmu, &pmus, entry) { 4562 list_for_each_entry_rcu(pmu, &pmus, entry) {
4560 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 4563 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4561 if (cpuctx->active_pmu != pmu) 4564 if (cpuctx->unique_pmu != pmu)
4562 goto next; 4565 goto next;
4563 perf_event_comm_ctx(&cpuctx->ctx, comm_event); 4566 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
4564 4567
@@ -4754,7 +4757,7 @@ got_name:
4754 rcu_read_lock(); 4757 rcu_read_lock();
4755 list_for_each_entry_rcu(pmu, &pmus, entry) { 4758 list_for_each_entry_rcu(pmu, &pmus, entry) {
4756 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 4759 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4757 if (cpuctx->active_pmu != pmu) 4760 if (cpuctx->unique_pmu != pmu)
4758 goto next; 4761 goto next;
4759 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, 4762 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
4760 vma->vm_flags & VM_EXEC); 4763 vma->vm_flags & VM_EXEC);
@@ -5855,8 +5858,8 @@ static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
5855 5858
5856 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 5859 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5857 5860
5858 if (cpuctx->active_pmu == old_pmu) 5861 if (cpuctx->unique_pmu == old_pmu)
5859 cpuctx->active_pmu = pmu; 5862 cpuctx->unique_pmu = pmu;
5860 } 5863 }
5861} 5864}
5862 5865
@@ -5991,7 +5994,7 @@ skip_type:
5991 cpuctx->ctx.pmu = pmu; 5994 cpuctx->ctx.pmu = pmu;
5992 cpuctx->jiffies_interval = 1; 5995 cpuctx->jiffies_interval = 1;
5993 INIT_LIST_HEAD(&cpuctx->rotation_list); 5996 INIT_LIST_HEAD(&cpuctx->rotation_list);
5994 cpuctx->active_pmu = pmu; 5997 cpuctx->unique_pmu = pmu;
5995 } 5998 }
5996 5999
5997got_cpu_context: 6000got_cpu_context: