aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2012-10-02 09:38:52 -0400
committerIngo Molnar <mingo@kernel.org>2012-10-05 07:59:06 -0400
commit3f1f33206c16c7b3839d71372bc2ac3f305aa802 (patch)
tree683e2af07cb49e9a333f982e72088929027881c1 /kernel
parent2e132b12f78d88672711ae1d87624951de1089ca (diff)
perf: Clarify perf_cpu_context::active_pmu usage by renaming it to ::unique_pmu
Stephane thought the perf_cpu_context::active_pmu name confusing and suggested using 'unique_pmu' instead. This pointer is a pointer to a 'random' pmu sharing the cpuctx instance, therefore limiting a for_each_pmu loop to those where cpuctx->unique_pmu matches the pmu we get a loop over unique cpuctx instances. Suggested-by: Stephane Eranian <eranian@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/n/tip-kxyjqpfj2fn9gt7kwu5ag9ks@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 7b9df353ba1b..81939e8999a5 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4419,7 +4419,7 @@ static void perf_event_task_event(struct perf_task_event *task_event)
4419 rcu_read_lock(); 4419 rcu_read_lock();
4420 list_for_each_entry_rcu(pmu, &pmus, entry) { 4420 list_for_each_entry_rcu(pmu, &pmus, entry) {
4421 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 4421 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4422 if (cpuctx->active_pmu != pmu) 4422 if (cpuctx->unique_pmu != pmu)
4423 goto next; 4423 goto next;
4424 perf_event_task_ctx(&cpuctx->ctx, task_event); 4424 perf_event_task_ctx(&cpuctx->ctx, task_event);
4425 4425
@@ -4565,7 +4565,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
4565 rcu_read_lock(); 4565 rcu_read_lock();
4566 list_for_each_entry_rcu(pmu, &pmus, entry) { 4566 list_for_each_entry_rcu(pmu, &pmus, entry) {
4567 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 4567 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4568 if (cpuctx->active_pmu != pmu) 4568 if (cpuctx->unique_pmu != pmu)
4569 goto next; 4569 goto next;
4570 perf_event_comm_ctx(&cpuctx->ctx, comm_event); 4570 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
4571 4571
@@ -4761,7 +4761,7 @@ got_name:
4761 rcu_read_lock(); 4761 rcu_read_lock();
4762 list_for_each_entry_rcu(pmu, &pmus, entry) { 4762 list_for_each_entry_rcu(pmu, &pmus, entry) {
4763 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 4763 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4764 if (cpuctx->active_pmu != pmu) 4764 if (cpuctx->unique_pmu != pmu)
4765 goto next; 4765 goto next;
4766 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, 4766 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
4767 vma->vm_flags & VM_EXEC); 4767 vma->vm_flags & VM_EXEC);
@@ -5862,8 +5862,8 @@ static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
5862 5862
5863 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); 5863 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5864 5864
5865 if (cpuctx->active_pmu == old_pmu) 5865 if (cpuctx->unique_pmu == old_pmu)
5866 cpuctx->active_pmu = pmu; 5866 cpuctx->unique_pmu = pmu;
5867 } 5867 }
5868} 5868}
5869 5869
@@ -5998,7 +5998,7 @@ skip_type:
5998 cpuctx->ctx.pmu = pmu; 5998 cpuctx->ctx.pmu = pmu;
5999 cpuctx->jiffies_interval = 1; 5999 cpuctx->jiffies_interval = 1;
6000 INIT_LIST_HEAD(&cpuctx->rotation_list); 6000 INIT_LIST_HEAD(&cpuctx->rotation_list);
6001 cpuctx->active_pmu = pmu; 6001 cpuctx->unique_pmu = pmu;
6002 } 6002 }
6003 6003
6004got_cpu_context: 6004got_cpu_context: