aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2014-06-06 01:55:06 -0400
committerIngo Molnar <mingo@kernel.org>2014-06-06 01:55:06 -0400
commitec00010972a0971b2c1da4fbe4e5c7d8ed1ecb05 (patch)
treec28975d7daf6d8a3aa23afe8f42837b71105b269 /kernel/events
parent8c6e549a447c51f4f8c0ba7f1e444469f75a354a (diff)
parente041e328c4b41e1db79bfe5ba9992c2ed771ad19 (diff)
Merge branch 'perf/urgent' into perf/core, to resolve conflict and to prepare for new patches
Conflicts: arch/x86/kernel/traps.c Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c153
1 files changed, 80 insertions, 73 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index e9ef0c6646af..8fac2056d51e 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2973,6 +2973,22 @@ out:
2973 local_irq_restore(flags); 2973 local_irq_restore(flags);
2974} 2974}
2975 2975
2976void perf_event_exec(void)
2977{
2978 struct perf_event_context *ctx;
2979 int ctxn;
2980
2981 rcu_read_lock();
2982 for_each_task_context_nr(ctxn) {
2983 ctx = current->perf_event_ctxp[ctxn];
2984 if (!ctx)
2985 continue;
2986
2987 perf_event_enable_on_exec(ctx);
2988 }
2989 rcu_read_unlock();
2990}
2991
2976/* 2992/*
2977 * Cross CPU call to read the hardware event 2993 * Cross CPU call to read the hardware event
2978 */ 2994 */
@@ -3195,7 +3211,8 @@ static void free_event_rcu(struct rcu_head *head)
3195} 3211}
3196 3212
3197static void ring_buffer_put(struct ring_buffer *rb); 3213static void ring_buffer_put(struct ring_buffer *rb);
3198static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb); 3214static void ring_buffer_attach(struct perf_event *event,
3215 struct ring_buffer *rb);
3199 3216
3200static void unaccount_event_cpu(struct perf_event *event, int cpu) 3217static void unaccount_event_cpu(struct perf_event *event, int cpu)
3201{ 3218{
@@ -3259,8 +3276,6 @@ static void _free_event(struct perf_event *event)
3259 unaccount_event(event); 3276 unaccount_event(event);
3260 3277
3261 if (event->rb) { 3278 if (event->rb) {
3262 struct ring_buffer *rb;
3263
3264 /* 3279 /*
3265 * Can happen when we close an event with re-directed output. 3280 * Can happen when we close an event with re-directed output.
3266 * 3281 *
@@ -3268,12 +3283,7 @@ static void _free_event(struct perf_event *event)
3268 * over us; possibly making our ring_buffer_put() the last. 3283 * over us; possibly making our ring_buffer_put() the last.
3269 */ 3284 */
3270 mutex_lock(&event->mmap_mutex); 3285 mutex_lock(&event->mmap_mutex);
3271 rb = event->rb; 3286 ring_buffer_attach(event, NULL);
3272 if (rb) {
3273 rcu_assign_pointer(event->rb, NULL);
3274 ring_buffer_detach(event, rb);
3275 ring_buffer_put(rb); /* could be last */
3276 }
3277 mutex_unlock(&event->mmap_mutex); 3287 mutex_unlock(&event->mmap_mutex);
3278 } 3288 }
3279 3289
@@ -3870,28 +3880,47 @@ unlock:
3870static void ring_buffer_attach(struct perf_event *event, 3880static void ring_buffer_attach(struct perf_event *event,
3871 struct ring_buffer *rb) 3881 struct ring_buffer *rb)
3872{ 3882{
3883 struct ring_buffer *old_rb = NULL;
3873 unsigned long flags; 3884 unsigned long flags;
3874 3885
3875 if (!list_empty(&event->rb_entry)) 3886 if (event->rb) {
3876 return; 3887 /*
3888 * Should be impossible, we set this when removing
3889 * event->rb_entry and wait/clear when adding event->rb_entry.
3890 */
3891 WARN_ON_ONCE(event->rcu_pending);
3877 3892
3878 spin_lock_irqsave(&rb->event_lock, flags); 3893 old_rb = event->rb;
3879 if (list_empty(&event->rb_entry)) 3894 event->rcu_batches = get_state_synchronize_rcu();
3880 list_add(&event->rb_entry, &rb->event_list); 3895 event->rcu_pending = 1;
3881 spin_unlock_irqrestore(&rb->event_lock, flags);
3882}
3883 3896
3884static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb) 3897 spin_lock_irqsave(&old_rb->event_lock, flags);
3885{ 3898 list_del_rcu(&event->rb_entry);
3886 unsigned long flags; 3899 spin_unlock_irqrestore(&old_rb->event_lock, flags);
3900 }
3887 3901
3888 if (list_empty(&event->rb_entry)) 3902 if (event->rcu_pending && rb) {
3889 return; 3903 cond_synchronize_rcu(event->rcu_batches);
3904 event->rcu_pending = 0;
3905 }
3906
3907 if (rb) {
3908 spin_lock_irqsave(&rb->event_lock, flags);
3909 list_add_rcu(&event->rb_entry, &rb->event_list);
3910 spin_unlock_irqrestore(&rb->event_lock, flags);
3911 }
3912
3913 rcu_assign_pointer(event->rb, rb);
3890 3914
3891 spin_lock_irqsave(&rb->event_lock, flags); 3915 if (old_rb) {
3892 list_del_init(&event->rb_entry); 3916 ring_buffer_put(old_rb);
3893 wake_up_all(&event->waitq); 3917 /*
3894 spin_unlock_irqrestore(&rb->event_lock, flags); 3918 * Since we detached before setting the new rb, so that we
3919 * could attach the new rb, we could have missed a wakeup.
3920 * Provide it now.
3921 */
3922 wake_up_all(&event->waitq);
3923 }
3895} 3924}
3896 3925
3897static void ring_buffer_wakeup(struct perf_event *event) 3926static void ring_buffer_wakeup(struct perf_event *event)
@@ -3960,7 +3989,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
3960{ 3989{
3961 struct perf_event *event = vma->vm_file->private_data; 3990 struct perf_event *event = vma->vm_file->private_data;
3962 3991
3963 struct ring_buffer *rb = event->rb; 3992 struct ring_buffer *rb = ring_buffer_get(event);
3964 struct user_struct *mmap_user = rb->mmap_user; 3993 struct user_struct *mmap_user = rb->mmap_user;
3965 int mmap_locked = rb->mmap_locked; 3994 int mmap_locked = rb->mmap_locked;
3966 unsigned long size = perf_data_size(rb); 3995 unsigned long size = perf_data_size(rb);
@@ -3968,18 +3997,14 @@ static void perf_mmap_close(struct vm_area_struct *vma)
3968 atomic_dec(&rb->mmap_count); 3997 atomic_dec(&rb->mmap_count);
3969 3998
3970 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) 3999 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
3971 return; 4000 goto out_put;
3972 4001
3973 /* Detach current event from the buffer. */ 4002 ring_buffer_attach(event, NULL);
3974 rcu_assign_pointer(event->rb, NULL);
3975 ring_buffer_detach(event, rb);
3976 mutex_unlock(&event->mmap_mutex); 4003 mutex_unlock(&event->mmap_mutex);
3977 4004
3978 /* If there's still other mmap()s of this buffer, we're done. */ 4005 /* If there's still other mmap()s of this buffer, we're done. */
3979 if (atomic_read(&rb->mmap_count)) { 4006 if (atomic_read(&rb->mmap_count))
3980 ring_buffer_put(rb); /* can't be last */ 4007 goto out_put;
3981 return;
3982 }
3983 4008
3984 /* 4009 /*
3985 * No other mmap()s, detach from all other events that might redirect 4010 * No other mmap()s, detach from all other events that might redirect
@@ -4009,11 +4034,9 @@ again:
4009 * still restart the iteration to make sure we're not now 4034 * still restart the iteration to make sure we're not now
4010 * iterating the wrong list. 4035 * iterating the wrong list.
4011 */ 4036 */
4012 if (event->rb == rb) { 4037 if (event->rb == rb)
4013 rcu_assign_pointer(event->rb, NULL); 4038 ring_buffer_attach(event, NULL);
4014 ring_buffer_detach(event, rb); 4039
4015 ring_buffer_put(rb); /* can't be last, we still have one */
4016 }
4017 mutex_unlock(&event->mmap_mutex); 4040 mutex_unlock(&event->mmap_mutex);
4018 put_event(event); 4041 put_event(event);
4019 4042
@@ -4038,6 +4061,7 @@ again:
4038 vma->vm_mm->pinned_vm -= mmap_locked; 4061 vma->vm_mm->pinned_vm -= mmap_locked;
4039 free_uid(mmap_user); 4062 free_uid(mmap_user);
4040 4063
4064out_put:
4041 ring_buffer_put(rb); /* could be last */ 4065 ring_buffer_put(rb); /* could be last */
4042} 4066}
4043 4067
@@ -4155,7 +4179,6 @@ again:
4155 vma->vm_mm->pinned_vm += extra; 4179 vma->vm_mm->pinned_vm += extra;
4156 4180
4157 ring_buffer_attach(event, rb); 4181 ring_buffer_attach(event, rb);
4158 rcu_assign_pointer(event->rb, rb);
4159 4182
4160 perf_event_init_userpage(event); 4183 perf_event_init_userpage(event);
4161 perf_event_update_userpage(event); 4184 perf_event_update_userpage(event);
@@ -5070,18 +5093,6 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
5070void perf_event_comm(struct task_struct *task) 5093void perf_event_comm(struct task_struct *task)
5071{ 5094{
5072 struct perf_comm_event comm_event; 5095 struct perf_comm_event comm_event;
5073 struct perf_event_context *ctx;
5074 int ctxn;
5075
5076 rcu_read_lock();
5077 for_each_task_context_nr(ctxn) {
5078 ctx = task->perf_event_ctxp[ctxn];
5079 if (!ctx)
5080 continue;
5081
5082 perf_event_enable_on_exec(ctx);
5083 }
5084 rcu_read_unlock();
5085 5096
5086 if (!atomic_read(&nr_comm_events)) 5097 if (!atomic_read(&nr_comm_events))
5087 return; 5098 return;
@@ -5439,6 +5450,9 @@ struct swevent_htable {
5439 5450
5440 /* Recursion avoidance in each contexts */ 5451 /* Recursion avoidance in each contexts */
5441 int recursion[PERF_NR_CONTEXTS]; 5452 int recursion[PERF_NR_CONTEXTS];
5453
5454 /* Keeps track of cpu being initialized/exited */
5455 bool online;
5442}; 5456};
5443 5457
5444static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); 5458static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
@@ -5685,8 +5699,14 @@ static int perf_swevent_add(struct perf_event *event, int flags)
5685 hwc->state = !(flags & PERF_EF_START); 5699 hwc->state = !(flags & PERF_EF_START);
5686 5700
5687 head = find_swevent_head(swhash, event); 5701 head = find_swevent_head(swhash, event);
5688 if (WARN_ON_ONCE(!head)) 5702 if (!head) {
5703 /*
5704 * We can race with cpu hotplug code. Do not
5705 * WARN if the cpu just got unplugged.
5706 */
5707 WARN_ON_ONCE(swhash->online);
5689 return -EINVAL; 5708 return -EINVAL;
5709 }
5690 5710
5691 hlist_add_head_rcu(&event->hlist_entry, head); 5711 hlist_add_head_rcu(&event->hlist_entry, head);
5692 5712
@@ -6956,7 +6976,7 @@ err_size:
6956static int 6976static int
6957perf_event_set_output(struct perf_event *event, struct perf_event *output_event) 6977perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
6958{ 6978{
6959 struct ring_buffer *rb = NULL, *old_rb = NULL; 6979 struct ring_buffer *rb = NULL;
6960 int ret = -EINVAL; 6980 int ret = -EINVAL;
6961 6981
6962 if (!output_event) 6982 if (!output_event)
@@ -6984,8 +7004,6 @@ set:
6984 if (atomic_read(&event->mmap_count)) 7004 if (atomic_read(&event->mmap_count))
6985 goto unlock; 7005 goto unlock;
6986 7006
6987 old_rb = event->rb;
6988
6989 if (output_event) { 7007 if (output_event) {
6990 /* get the rb we want to redirect to */ 7008 /* get the rb we want to redirect to */
6991 rb = ring_buffer_get(output_event); 7009 rb = ring_buffer_get(output_event);
@@ -6993,23 +7011,7 @@ set:
6993 goto unlock; 7011 goto unlock;
6994 } 7012 }
6995 7013
6996 if (old_rb) 7014 ring_buffer_attach(event, rb);
6997 ring_buffer_detach(event, old_rb);
6998
6999 if (rb)
7000 ring_buffer_attach(event, rb);
7001
7002 rcu_assign_pointer(event->rb, rb);
7003
7004 if (old_rb) {
7005 ring_buffer_put(old_rb);
7006 /*
7007 * Since we detached before setting the new rb, so that we
7008 * could attach the new rb, we could have missed a wakeup.
7009 * Provide it now.
7010 */
7011 wake_up_all(&event->waitq);
7012 }
7013 7015
7014 ret = 0; 7016 ret = 0;
7015unlock: 7017unlock:
@@ -7060,6 +7062,9 @@ SYSCALL_DEFINE5(perf_event_open,
7060 if (attr.freq) { 7062 if (attr.freq) {
7061 if (attr.sample_freq > sysctl_perf_event_sample_rate) 7063 if (attr.sample_freq > sysctl_perf_event_sample_rate)
7062 return -EINVAL; 7064 return -EINVAL;
7065 } else {
7066 if (attr.sample_period & (1ULL << 63))
7067 return -EINVAL;
7063 } 7068 }
7064 7069
7065 /* 7070 /*
@@ -7872,6 +7877,7 @@ static void perf_event_init_cpu(int cpu)
7872 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 7877 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7873 7878
7874 mutex_lock(&swhash->hlist_mutex); 7879 mutex_lock(&swhash->hlist_mutex);
7880 swhash->online = true;
7875 if (swhash->hlist_refcount > 0) { 7881 if (swhash->hlist_refcount > 0) {
7876 struct swevent_hlist *hlist; 7882 struct swevent_hlist *hlist;
7877 7883
@@ -7929,6 +7935,7 @@ static void perf_event_exit_cpu(int cpu)
7929 perf_event_exit_cpu_context(cpu); 7935 perf_event_exit_cpu_context(cpu);
7930 7936
7931 mutex_lock(&swhash->hlist_mutex); 7937 mutex_lock(&swhash->hlist_mutex);
7938 swhash->online = false;
7932 swevent_hlist_release(swhash); 7939 swevent_hlist_release(swhash);
7933 mutex_unlock(&swhash->hlist_mutex); 7940 mutex_unlock(&swhash->hlist_mutex);
7934} 7941}