aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-09-13 05:06:55 -0400
committerIngo Molnar <mingo@elte.hu>2010-09-13 11:08:42 -0400
commitcde8e88498c8de69271fcb6d4dd974979368fa67 (patch)
tree34eb745cc6f3e5a5dbe0918f4920424be70bcfff /kernel/perf_event.c
parentb0b2072df3b544f56b90173c2cde7a374c51546b (diff)
perf: Sanitize the RCU logic
Simplify things and simply synchronize against two RCU variants for PMU unregister -- we don't care about performance, its module unload if anything. Reported-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index f29b52576ec1..bc46bff69620 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -3810,7 +3810,7 @@ static void perf_event_task_event(struct perf_task_event *task_event)
3810 struct pmu *pmu; 3810 struct pmu *pmu;
3811 int ctxn; 3811 int ctxn;
3812 3812
3813 rcu_read_lock_sched(); 3813 rcu_read_lock();
3814 list_for_each_entry_rcu(pmu, &pmus, entry) { 3814 list_for_each_entry_rcu(pmu, &pmus, entry) {
3815 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 3815 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
3816 perf_event_task_ctx(&cpuctx->ctx, task_event); 3816 perf_event_task_ctx(&cpuctx->ctx, task_event);
@@ -3825,7 +3825,7 @@ static void perf_event_task_event(struct perf_task_event *task_event)
3825 if (ctx) 3825 if (ctx)
3826 perf_event_task_ctx(ctx, task_event); 3826 perf_event_task_ctx(ctx, task_event);
3827 } 3827 }
3828 rcu_read_unlock_sched(); 3828 rcu_read_unlock();
3829} 3829}
3830 3830
3831static void perf_event_task(struct task_struct *task, 3831static void perf_event_task(struct task_struct *task,
@@ -3943,7 +3943,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
3943 3943
3944 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; 3944 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
3945 3945
3946 rcu_read_lock_sched(); 3946 rcu_read_lock();
3947 list_for_each_entry_rcu(pmu, &pmus, entry) { 3947 list_for_each_entry_rcu(pmu, &pmus, entry) {
3948 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 3948 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
3949 perf_event_comm_ctx(&cpuctx->ctx, comm_event); 3949 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
@@ -3956,7 +3956,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
3956 if (ctx) 3956 if (ctx)
3957 perf_event_comm_ctx(ctx, comm_event); 3957 perf_event_comm_ctx(ctx, comm_event);
3958 } 3958 }
3959 rcu_read_unlock_sched(); 3959 rcu_read_unlock();
3960} 3960}
3961 3961
3962void perf_event_comm(struct task_struct *task) 3962void perf_event_comm(struct task_struct *task)
@@ -4126,7 +4126,7 @@ got_name:
4126 4126
4127 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; 4127 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
4128 4128
4129 rcu_read_lock_sched(); 4129 rcu_read_lock();
4130 list_for_each_entry_rcu(pmu, &pmus, entry) { 4130 list_for_each_entry_rcu(pmu, &pmus, entry) {
4131 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 4131 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
4132 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, 4132 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
@@ -4142,7 +4142,7 @@ got_name:
4142 vma->vm_flags & VM_EXEC); 4142 vma->vm_flags & VM_EXEC);
4143 } 4143 }
4144 } 4144 }
4145 rcu_read_unlock_sched(); 4145 rcu_read_unlock();
4146 4146
4147 kfree(buf); 4147 kfree(buf);
4148} 4148}
@@ -5218,10 +5218,11 @@ void perf_pmu_unregister(struct pmu *pmu)
5218 mutex_unlock(&pmus_lock); 5218 mutex_unlock(&pmus_lock);
5219 5219
5220 /* 5220 /*
5221 * We use the pmu list either under SRCU or preempt_disable, 5221 * We dereference the pmu list under both SRCU and regular RCU, so
5222 * synchronize_srcu() implies synchronize_sched() so we're good. 5222 * synchronize against both of those.
5223 */ 5223 */
5224 synchronize_srcu(&pmus_srcu); 5224 synchronize_srcu(&pmus_srcu);
5225 synchronize_rcu();
5225 5226
5226 free_percpu(pmu->pmu_disable_count); 5227 free_percpu(pmu->pmu_disable_count);
5227 free_pmu_context(pmu->pmu_cpu_context); 5228 free_pmu_context(pmu->pmu_cpu_context);