aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-11-16 06:45:14 -0500
committerIngo Molnar <mingo@elte.hu>2009-11-16 07:27:45 -0500
commit559fdc3c1b624edb1933a875022fe7e27934d11c (patch)
treeb722805dbebd9584ffb981e2be84385ee0e22c67 /kernel/perf_event.c
parent7255fe2a42c612f2b8fe4c347f0a5f0c97d85a46 (diff)
perf_event: Optimize perf_output_lock()
The purpose of perf_output_{un,}lock() is to: 1) avoid publishing incomplete data [ possible when publishing a head that is ahead of an entry that is still being written ] 2) guarantee fwd progress [ a simple refcount on pending writers doesn't need to drop to 0, making it so would end up implementing something like forced quiecent states of RCU ] To satisfy the above without undue complexity it serializes between CPUs, this means that a pending writer can only be the same cpu in a nested context, and since (under normal operation) a cpu always makes progress we're good -- if the head is only published when the bottom most writer completes. Now we don't need to disable IRQs in order to serialize between CPUs, disabling preemption ought to be sufficient, esp since we already deal with nesting due to NMIs. This avoids potentially expensive (and needless) local IRQ disable/enable ops. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <1258373161.26714.254.camel@laptop> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 6f4ed3b4cd73..3256e36ad251 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -2674,20 +2674,21 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
2674static void perf_output_lock(struct perf_output_handle *handle) 2674static void perf_output_lock(struct perf_output_handle *handle)
2675{ 2675{
2676 struct perf_mmap_data *data = handle->data; 2676 struct perf_mmap_data *data = handle->data;
2677 int cpu; 2677 int cur, cpu = get_cpu();
2678 2678
2679 handle->locked = 0; 2679 handle->locked = 0;
2680 2680
2681 local_irq_save(handle->flags); 2681 for (;;) {
2682 cpu = smp_processor_id(); 2682 cur = atomic_cmpxchg(&data->lock, -1, cpu);
2683 2683 if (cur == -1) {
2684 if (in_nmi() && atomic_read(&data->lock) == cpu) 2684 handle->locked = 1;
2685 return; 2685 break;
2686 }
2687 if (cur == cpu)
2688 break;
2686 2689
2687 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2688 cpu_relax(); 2690 cpu_relax();
2689 2691 }
2690 handle->locked = 1;
2691} 2692}
2692 2693
2693static void perf_output_unlock(struct perf_output_handle *handle) 2694static void perf_output_unlock(struct perf_output_handle *handle)
@@ -2733,7 +2734,7 @@ again:
2733 if (atomic_xchg(&data->wakeup, 0)) 2734 if (atomic_xchg(&data->wakeup, 0))
2734 perf_output_wakeup(handle); 2735 perf_output_wakeup(handle);
2735out: 2736out:
2736 local_irq_restore(handle->flags); 2737 put_cpu();
2737} 2738}
2738 2739
2739void perf_output_copy(struct perf_output_handle *handle, 2740void perf_output_copy(struct perf_output_handle *handle,