aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-12-16 05:22:25 -0500
committerIngo Molnar <mingo@elte.hu>2010-12-16 05:22:27 -0500
commit006b20fe4c69189b0d854e5eabf269e50ca86cdd (patch)
tree948b08825a36114c85ddc2bfcd965c261e32810f /kernel/perf_event.c
parent5f29805a4f4627e766f862ff9f10c14f5f314359 (diff)
parentd949750fed168b6553ca11ed19e4affd19d7a4d7 (diff)
Merge branch 'perf/urgent' into perf/core
Merge reason: We want to apply a dependent patch. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c37
1 files changed, 30 insertions, 7 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index f9d2645b5546..a3d568fbacc6 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -3922,6 +3922,8 @@ static void perf_event_task_event(struct perf_task_event *task_event)
3922 rcu_read_lock(); 3922 rcu_read_lock();
3923 list_for_each_entry_rcu(pmu, &pmus, entry) { 3923 list_for_each_entry_rcu(pmu, &pmus, entry) {
3924 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 3924 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
3925 if (cpuctx->active_pmu != pmu)
3926 goto next;
3925 perf_event_task_ctx(&cpuctx->ctx, task_event); 3927 perf_event_task_ctx(&cpuctx->ctx, task_event);
3926 3928
3927 ctx = task_event->task_ctx; 3929 ctx = task_event->task_ctx;
@@ -4066,6 +4068,8 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
4066 rcu_read_lock(); 4068 rcu_read_lock();
4067 list_for_each_entry_rcu(pmu, &pmus, entry) { 4069 list_for_each_entry_rcu(pmu, &pmus, entry) {
4068 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 4070 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4071 if (cpuctx->active_pmu != pmu)
4072 goto next;
4069 perf_event_comm_ctx(&cpuctx->ctx, comm_event); 4073 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
4070 4074
4071 ctxn = pmu->task_ctx_nr; 4075 ctxn = pmu->task_ctx_nr;
@@ -4260,6 +4264,8 @@ got_name:
4260 rcu_read_lock(); 4264 rcu_read_lock();
4261 list_for_each_entry_rcu(pmu, &pmus, entry) { 4265 list_for_each_entry_rcu(pmu, &pmus, entry) {
4262 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 4266 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4267 if (cpuctx->active_pmu != pmu)
4268 goto next;
4263 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, 4269 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
4264 vma->vm_flags & VM_EXEC); 4270 vma->vm_flags & VM_EXEC);
4265 4271
@@ -4841,7 +4847,7 @@ static int perf_swevent_init(struct perf_event *event)
4841 break; 4847 break;
4842 } 4848 }
4843 4849
4844 if (event_id > PERF_COUNT_SW_MAX) 4850 if (event_id >= PERF_COUNT_SW_MAX)
4845 return -ENOENT; 4851 return -ENOENT;
4846 4852
4847 if (!event->parent) { 4853 if (!event->parent) {
@@ -5266,20 +5272,36 @@ static void *find_pmu_context(int ctxn)
5266 return NULL; 5272 return NULL;
5267} 5273}
5268 5274
5269static void free_pmu_context(void * __percpu cpu_context) 5275static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
5270{ 5276{
5271 struct pmu *pmu; 5277 int cpu;
5278
5279 for_each_possible_cpu(cpu) {
5280 struct perf_cpu_context *cpuctx;
5281
5282 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5283
5284 if (cpuctx->active_pmu == old_pmu)
5285 cpuctx->active_pmu = pmu;
5286 }
5287}
5288
5289static void free_pmu_context(struct pmu *pmu)
5290{
5291 struct pmu *i;
5272 5292
5273 mutex_lock(&pmus_lock); 5293 mutex_lock(&pmus_lock);
5274 /* 5294 /*
5275 * Like a real lame refcount. 5295 * Like a real lame refcount.
5276 */ 5296 */
5277 list_for_each_entry(pmu, &pmus, entry) { 5297 list_for_each_entry(i, &pmus, entry) {
5278 if (pmu->pmu_cpu_context == cpu_context) 5298 if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
5299 update_pmu_context(i, pmu);
5279 goto out; 5300 goto out;
5301 }
5280 } 5302 }
5281 5303
5282 free_percpu(cpu_context); 5304 free_percpu(pmu->pmu_cpu_context);
5283out: 5305out:
5284 mutex_unlock(&pmus_lock); 5306 mutex_unlock(&pmus_lock);
5285} 5307}
@@ -5311,6 +5333,7 @@ int perf_pmu_register(struct pmu *pmu)
5311 cpuctx->ctx.pmu = pmu; 5333 cpuctx->ctx.pmu = pmu;
5312 cpuctx->jiffies_interval = 1; 5334 cpuctx->jiffies_interval = 1;
5313 INIT_LIST_HEAD(&cpuctx->rotation_list); 5335 INIT_LIST_HEAD(&cpuctx->rotation_list);
5336 cpuctx->active_pmu = pmu;
5314 } 5337 }
5315 5338
5316got_cpu_context: 5339got_cpu_context:
@@ -5362,7 +5385,7 @@ void perf_pmu_unregister(struct pmu *pmu)
5362 synchronize_rcu(); 5385 synchronize_rcu();
5363 5386
5364 free_percpu(pmu->pmu_disable_count); 5387 free_percpu(pmu->pmu_disable_count);
5365 free_pmu_context(pmu->pmu_cpu_context); 5388 free_pmu_context(pmu);
5366} 5389}
5367 5390
5368struct pmu *perf_init_event(struct perf_event *event) 5391struct pmu *perf_init_event(struct perf_event *event)