aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2011-12-06 00:42:35 -0500
committerIngo Molnar <mingo@elte.hu>2011-12-06 00:43:49 -0500
commitd6c1c49de577fa292af2449817364b7d89b574d8 (patch)
treee97cf88a318f50eb23c18d790e8b0bcf7bb47169 /kernel/events
parent9dde9dc0a81c7aeb863b35121d09011f09b4897c (diff)
parentddf6e0e50723b62ac76ed18eb53e9417c6eefba7 (diff)
Merge branch 'perf/urgent' into perf/core
Merge reason: Add these cherry-picked commits so that future changes on perf/core don't conflict. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c89
-rw-r--r--kernel/events/internal.h3
-rw-r--r--kernel/events/ring_buffer.c3
3 files changed, 92 insertions, 3 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 924338bb489c..a355ffb0b28f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -185,6 +185,9 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
185static void update_context_time(struct perf_event_context *ctx); 185static void update_context_time(struct perf_event_context *ctx);
186static u64 perf_event_time(struct perf_event *event); 186static u64 perf_event_time(struct perf_event *event);
187 187
188static void ring_buffer_attach(struct perf_event *event,
189 struct ring_buffer *rb);
190
188void __weak perf_event_print_debug(void) { } 191void __weak perf_event_print_debug(void) { }
189 192
190extern __weak const char *perf_pmu_name(void) 193extern __weak const char *perf_pmu_name(void)
@@ -2175,7 +2178,8 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
2175 2178
2176 perf_event_sched_in(cpuctx, ctx, task); 2179 perf_event_sched_in(cpuctx, ctx, task);
2177 2180
2178 cpuctx->task_ctx = ctx; 2181 if (ctx->nr_events)
2182 cpuctx->task_ctx = ctx;
2179 2183
2180 perf_pmu_enable(ctx->pmu); 2184 perf_pmu_enable(ctx->pmu);
2181 perf_ctx_unlock(cpuctx, ctx); 2185 perf_ctx_unlock(cpuctx, ctx);
@@ -2983,12 +2987,33 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
2983 struct ring_buffer *rb; 2987 struct ring_buffer *rb;
2984 unsigned int events = POLL_HUP; 2988 unsigned int events = POLL_HUP;
2985 2989
2990 /*
2991 * Race between perf_event_set_output() and perf_poll(): perf_poll()
2992 * grabs the rb reference but perf_event_set_output() overrides it.
2993 * Here is the timeline for two threads T1, T2:
2994 * t0: T1, rb = rcu_dereference(event->rb)
2995 * t1: T2, old_rb = event->rb
2996 * t2: T2, event->rb = new rb
2997 * t3: T2, ring_buffer_detach(old_rb)
2998 * t4: T1, ring_buffer_attach(rb1)
2999 * t5: T1, poll_wait(event->waitq)
3000 *
3001 * To avoid this problem, we grab mmap_mutex in perf_poll()
3002 * thereby ensuring that the assignment of the new ring buffer
3003 * and the detachment of the old buffer appear atomic to perf_poll()
3004 */
3005 mutex_lock(&event->mmap_mutex);
3006
2986 rcu_read_lock(); 3007 rcu_read_lock();
2987 rb = rcu_dereference(event->rb); 3008 rb = rcu_dereference(event->rb);
2988 if (rb) 3009 if (rb) {
3010 ring_buffer_attach(event, rb);
2989 events = atomic_xchg(&rb->poll, 0); 3011 events = atomic_xchg(&rb->poll, 0);
3012 }
2990 rcu_read_unlock(); 3013 rcu_read_unlock();
2991 3014
3015 mutex_unlock(&event->mmap_mutex);
3016
2992 poll_wait(file, &event->waitq, wait); 3017 poll_wait(file, &event->waitq, wait);
2993 3018
2994 return events; 3019 return events;
@@ -3289,6 +3314,49 @@ unlock:
3289 return ret; 3314 return ret;
3290} 3315}
3291 3316
3317static void ring_buffer_attach(struct perf_event *event,
3318 struct ring_buffer *rb)
3319{
3320 unsigned long flags;
3321
3322 if (!list_empty(&event->rb_entry))
3323 return;
3324
3325 spin_lock_irqsave(&rb->event_lock, flags);
3326 if (!list_empty(&event->rb_entry))
3327 goto unlock;
3328
3329 list_add(&event->rb_entry, &rb->event_list);
3330unlock:
3331 spin_unlock_irqrestore(&rb->event_lock, flags);
3332}
3333
3334static void ring_buffer_detach(struct perf_event *event,
3335 struct ring_buffer *rb)
3336{
3337 unsigned long flags;
3338
3339 if (list_empty(&event->rb_entry))
3340 return;
3341
3342 spin_lock_irqsave(&rb->event_lock, flags);
3343 list_del_init(&event->rb_entry);
3344 wake_up_all(&event->waitq);
3345 spin_unlock_irqrestore(&rb->event_lock, flags);
3346}
3347
3348static void ring_buffer_wakeup(struct perf_event *event)
3349{
3350 struct ring_buffer *rb;
3351
3352 rcu_read_lock();
3353 rb = rcu_dereference(event->rb);
3354 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
3355 wake_up_all(&event->waitq);
3356 }
3357 rcu_read_unlock();
3358}
3359
3292static void rb_free_rcu(struct rcu_head *rcu_head) 3360static void rb_free_rcu(struct rcu_head *rcu_head)
3293{ 3361{
3294 struct ring_buffer *rb; 3362 struct ring_buffer *rb;
@@ -3314,9 +3382,19 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event)
3314 3382
3315static void ring_buffer_put(struct ring_buffer *rb) 3383static void ring_buffer_put(struct ring_buffer *rb)
3316{ 3384{
3385 struct perf_event *event, *n;
3386 unsigned long flags;
3387
3317 if (!atomic_dec_and_test(&rb->refcount)) 3388 if (!atomic_dec_and_test(&rb->refcount))
3318 return; 3389 return;
3319 3390
3391 spin_lock_irqsave(&rb->event_lock, flags);
3392 list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
3393 list_del_init(&event->rb_entry);
3394 wake_up_all(&event->waitq);
3395 }
3396 spin_unlock_irqrestore(&rb->event_lock, flags);
3397
3320 call_rcu(&rb->rcu_head, rb_free_rcu); 3398 call_rcu(&rb->rcu_head, rb_free_rcu);
3321} 3399}
3322 3400
@@ -3339,6 +3417,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
3339 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); 3417 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
3340 vma->vm_mm->pinned_vm -= event->mmap_locked; 3418 vma->vm_mm->pinned_vm -= event->mmap_locked;
3341 rcu_assign_pointer(event->rb, NULL); 3419 rcu_assign_pointer(event->rb, NULL);
3420 ring_buffer_detach(event, rb);
3342 mutex_unlock(&event->mmap_mutex); 3421 mutex_unlock(&event->mmap_mutex);
3343 3422
3344 ring_buffer_put(rb); 3423 ring_buffer_put(rb);
@@ -3493,7 +3572,7 @@ static const struct file_operations perf_fops = {
3493 3572
3494void perf_event_wakeup(struct perf_event *event) 3573void perf_event_wakeup(struct perf_event *event)
3495{ 3574{
3496 wake_up_all(&event->waitq); 3575 ring_buffer_wakeup(event);
3497 3576
3498 if (event->pending_kill) { 3577 if (event->pending_kill) {
3499 kill_fasync(&event->fasync, SIGIO, event->pending_kill); 3578 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
@@ -5620,6 +5699,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
5620 INIT_LIST_HEAD(&event->group_entry); 5699 INIT_LIST_HEAD(&event->group_entry);
5621 INIT_LIST_HEAD(&event->event_entry); 5700 INIT_LIST_HEAD(&event->event_entry);
5622 INIT_LIST_HEAD(&event->sibling_list); 5701 INIT_LIST_HEAD(&event->sibling_list);
5702 INIT_LIST_HEAD(&event->rb_entry);
5703
5623 init_waitqueue_head(&event->waitq); 5704 init_waitqueue_head(&event->waitq);
5624 init_irq_work(&event->pending, perf_pending_event); 5705 init_irq_work(&event->pending, perf_pending_event);
5625 5706
@@ -5826,6 +5907,8 @@ set:
5826 5907
5827 old_rb = event->rb; 5908 old_rb = event->rb;
5828 rcu_assign_pointer(event->rb, rb); 5909 rcu_assign_pointer(event->rb, rb);
5910 if (old_rb)
5911 ring_buffer_detach(event, old_rb);
5829 ret = 0; 5912 ret = 0;
5830unlock: 5913unlock:
5831 mutex_unlock(&event->mmap_mutex); 5914 mutex_unlock(&event->mmap_mutex);
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index be4a43f6de4f..b0b107f90afc 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -26,6 +26,9 @@ struct ring_buffer {
26 local_t lost; /* nr records lost */ 26 local_t lost; /* nr records lost */
27 27
28 long watermark; /* wakeup watermark */ 28 long watermark; /* wakeup watermark */
29 /* poll crap */
30 spinlock_t event_lock;
31 struct list_head event_list;
29 32
30 struct perf_event_mmap_page *user_page; 33 struct perf_event_mmap_page *user_page;
31 void *data_pages[0]; 34 void *data_pages[0];
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index a2a29205cc0f..7f3011c6b57f 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -209,6 +209,9 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
209 rb->writable = 1; 209 rb->writable = 1;
210 210
211 atomic_set(&rb->refcount, 1); 211 atomic_set(&rb->refcount, 1);
212
213 INIT_LIST_HEAD(&rb->event_list);
214 spin_lock_init(&rb->event_lock);
212} 215}
213 216
214#ifndef CONFIG_PERF_USE_VMALLOC 217#ifndef CONFIG_PERF_USE_VMALLOC