aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-06-03 08:01:36 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-03 08:57:03 -0400
commita96bbc16418bc691317f265d6bf98ba941ca9c1a (patch)
tree136e40a12a94653e8224d6c09d5384b4140e0532 /kernel
parent8229289b607682f90b946ad2c319526303c17700 (diff)
perf_counter: Fix race in counter initialization
We need the PID namespace and counter ID available when the counter overflows and we need to generate a sample event. [ Impact: fix kernel crash with high-frequency sampling ] Reported-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <new-submission> [ fixed a further crash and cleaned up the initialization a bit ] Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_counter.c25
1 files changed, 14 insertions, 11 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 317cef78a388..ab4455447f84 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -48,6 +48,8 @@ int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
48int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */ 48int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
49int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */ 49int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */
50 50
51static atomic64_t perf_counter_id;
52
51/* 53/*
52 * Lock for (sysadmin-configurable) counter reservations: 54 * Lock for (sysadmin-configurable) counter reservations:
53 */ 55 */
@@ -3351,14 +3353,18 @@ perf_counter_alloc(struct perf_counter_attr *attr,
3351 3353
3352 mutex_init(&counter->mmap_mutex); 3354 mutex_init(&counter->mmap_mutex);
3353 3355
3354 counter->cpu = cpu; 3356 counter->cpu = cpu;
3355 counter->attr = *attr; 3357 counter->attr = *attr;
3356 counter->group_leader = group_leader; 3358 counter->group_leader = group_leader;
3357 counter->pmu = NULL; 3359 counter->pmu = NULL;
3358 counter->ctx = ctx; 3360 counter->ctx = ctx;
3359 counter->oncpu = -1; 3361 counter->oncpu = -1;
3362
3363 counter->ns = get_pid_ns(current->nsproxy->pid_ns);
3364 counter->id = atomic64_inc_return(&perf_counter_id);
3365
3366 counter->state = PERF_COUNTER_STATE_INACTIVE;
3360 3367
3361 counter->state = PERF_COUNTER_STATE_INACTIVE;
3362 if (attr->disabled) 3368 if (attr->disabled)
3363 counter->state = PERF_COUNTER_STATE_OFF; 3369 counter->state = PERF_COUNTER_STATE_OFF;
3364 3370
@@ -3402,6 +3408,8 @@ done:
3402 err = PTR_ERR(pmu); 3408 err = PTR_ERR(pmu);
3403 3409
3404 if (err) { 3410 if (err) {
3411 if (counter->ns)
3412 put_pid_ns(counter->ns);
3405 kfree(counter); 3413 kfree(counter);
3406 return ERR_PTR(err); 3414 return ERR_PTR(err);
3407 } 3415 }
@@ -3419,8 +3427,6 @@ done:
3419 return counter; 3427 return counter;
3420} 3428}
3421 3429
3422static atomic64_t perf_counter_id;
3423
3424/** 3430/**
3425 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu 3431 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
3426 * 3432 *
@@ -3515,9 +3521,6 @@ SYSCALL_DEFINE5(perf_counter_open,
3515 list_add_tail(&counter->owner_entry, &current->perf_counter_list); 3521 list_add_tail(&counter->owner_entry, &current->perf_counter_list);
3516 mutex_unlock(&current->perf_counter_mutex); 3522 mutex_unlock(&current->perf_counter_mutex);
3517 3523
3518 counter->ns = get_pid_ns(current->nsproxy->pid_ns);
3519 counter->id = atomic64_inc_return(&perf_counter_id);
3520
3521 fput_light(counter_file, fput_needed2); 3524 fput_light(counter_file, fput_needed2);
3522 3525
3523out_fput: 3526out_fput: