aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2010-05-20 08:38:55 -0400
committerIngo Molnar <mingo@elte.hu>2010-05-20 08:38:55 -0400
commitdfacc4d6c98b89609250269f518c1f54c30454ef (patch)
treee7effbee7bdc85d18f7b26ab9cb5c9f700d1481a /kernel/perf_event.c
parentf869097e884d8cb65b2bb7831ca57b7dffb66fdd (diff)
parent85cb68b27c428d477169f3aa46c72dba103a17bd (diff)
Merge branch 'perf/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/random-tracing into perf/core
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c58
1 files changed, 46 insertions, 12 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 7e3bcf1a29f0..2a060be3b07f 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -4050,19 +4050,46 @@ static inline u64 swevent_hash(u64 type, u32 event_id)
4050 return hash_64(val, SWEVENT_HLIST_BITS); 4050 return hash_64(val, SWEVENT_HLIST_BITS);
4051} 4051}
4052 4052
4053static struct hlist_head * 4053static inline struct hlist_head *
4054find_swevent_head(struct perf_cpu_context *ctx, u64 type, u32 event_id) 4054__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
4055{ 4055{
4056 u64 hash; 4056 u64 hash = swevent_hash(type, event_id);
4057 struct swevent_hlist *hlist; 4057
4058 return &hlist->heads[hash];
4059}
4058 4060
4059 hash = swevent_hash(type, event_id); 4061/* For the read side: events when they trigger */
4062static inline struct hlist_head *
4063find_swevent_head_rcu(struct perf_cpu_context *ctx, u64 type, u32 event_id)
4064{
4065 struct swevent_hlist *hlist;
4060 4066
4061 hlist = rcu_dereference(ctx->swevent_hlist); 4067 hlist = rcu_dereference(ctx->swevent_hlist);
4062 if (!hlist) 4068 if (!hlist)
4063 return NULL; 4069 return NULL;
4064 4070
4065 return &hlist->heads[hash]; 4071 return __find_swevent_head(hlist, type, event_id);
4072}
4073
4074/* For the event head insertion and removal in the hlist */
4075static inline struct hlist_head *
4076find_swevent_head(struct perf_cpu_context *ctx, struct perf_event *event)
4077{
4078 struct swevent_hlist *hlist;
4079 u32 event_id = event->attr.config;
4080 u64 type = event->attr.type;
4081
4082 /*
4083 * Event scheduling is always serialized against hlist allocation
4084 * and release. Which makes the protected version suitable here.
4085 * The context lock guarantees that.
4086 */
4087 hlist = rcu_dereference_protected(ctx->swevent_hlist,
4088 lockdep_is_held(&event->ctx->lock));
4089 if (!hlist)
4090 return NULL;
4091
4092 return __find_swevent_head(hlist, type, event_id);
4066} 4093}
4067 4094
4068static void do_perf_sw_event(enum perf_type_id type, u32 event_id, 4095static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
@@ -4079,7 +4106,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
4079 4106
4080 rcu_read_lock(); 4107 rcu_read_lock();
4081 4108
4082 head = find_swevent_head(cpuctx, type, event_id); 4109 head = find_swevent_head_rcu(cpuctx, type, event_id);
4083 4110
4084 if (!head) 4111 if (!head)
4085 goto end; 4112 goto end;
@@ -4162,7 +4189,7 @@ static int perf_swevent_enable(struct perf_event *event)
4162 perf_swevent_set_period(event); 4189 perf_swevent_set_period(event);
4163 } 4190 }
4164 4191
4165 head = find_swevent_head(cpuctx, event->attr.type, event->attr.config); 4192 head = find_swevent_head(cpuctx, event);
4166 if (WARN_ON_ONCE(!head)) 4193 if (WARN_ON_ONCE(!head))
4167 return -EINVAL; 4194 return -EINVAL;
4168 4195
@@ -4350,6 +4377,14 @@ static const struct pmu perf_ops_task_clock = {
4350 .read = task_clock_perf_event_read, 4377 .read = task_clock_perf_event_read,
4351}; 4378};
4352 4379
4380/* Deref the hlist from the update side */
4381static inline struct swevent_hlist *
4382swevent_hlist_deref(struct perf_cpu_context *cpuctx)
4383{
4384 return rcu_dereference_protected(cpuctx->swevent_hlist,
4385 lockdep_is_held(&cpuctx->hlist_mutex));
4386}
4387
4353static void swevent_hlist_release_rcu(struct rcu_head *rcu_head) 4388static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
4354{ 4389{
4355 struct swevent_hlist *hlist; 4390 struct swevent_hlist *hlist;
@@ -4360,12 +4395,11 @@ static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
4360 4395
4361static void swevent_hlist_release(struct perf_cpu_context *cpuctx) 4396static void swevent_hlist_release(struct perf_cpu_context *cpuctx)
4362{ 4397{
4363 struct swevent_hlist *hlist; 4398 struct swevent_hlist *hlist = swevent_hlist_deref(cpuctx);
4364 4399
4365 if (!cpuctx->swevent_hlist) 4400 if (!hlist)
4366 return; 4401 return;
4367 4402
4368 hlist = cpuctx->swevent_hlist;
4369 rcu_assign_pointer(cpuctx->swevent_hlist, NULL); 4403 rcu_assign_pointer(cpuctx->swevent_hlist, NULL);
4370 call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu); 4404 call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu);
4371} 4405}
@@ -4402,7 +4436,7 @@ static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
4402 4436
4403 mutex_lock(&cpuctx->hlist_mutex); 4437 mutex_lock(&cpuctx->hlist_mutex);
4404 4438
4405 if (!cpuctx->swevent_hlist && cpu_online(cpu)) { 4439 if (!swevent_hlist_deref(cpuctx) && cpu_online(cpu)) {
4406 struct swevent_hlist *hlist; 4440 struct swevent_hlist *hlist;
4407 4441
4408 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL); 4442 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);