aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-05-04 13:23:18 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-04 13:30:42 -0400
commit1dce8d99b85aba6eddb8b8260baea944922e6fe7 (patch)
tree77afc73743f8eb1c81b652418661b98b1f4b933d /kernel/perf_counter.c
parent0d905bca23aca5c86a10ee101bcd3b1abbd40b25 (diff)
perf_counter: convert perf_resource_mutex to a spinlock
Now percpu counters can be initialized very early. But the init sequence uses mutex_lock(). Fortunately, perf_resource_mutex should be a spinlock anyway, so convert it. [ Impact: fix crash due to early init mutex use ] LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index fcdafa234a5d..5f86a1156c94 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -46,9 +46,9 @@ static atomic_t nr_comm_tracking __read_mostly;
46int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */ 46int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
47 47
48/* 48/*
49 * Mutex for (sysadmin-configurable) counter reservations: 49 * Lock for (sysadmin-configurable) counter reservations:
50 */ 50 */
51static DEFINE_MUTEX(perf_resource_mutex); 51static DEFINE_SPINLOCK(perf_resource_lock);
52 52
53/* 53/*
54 * Architecture provided APIs - weak aliases: 54 * Architecture provided APIs - weak aliases:
@@ -3207,9 +3207,9 @@ static void __cpuinit perf_counter_init_cpu(int cpu)
3207 cpuctx = &per_cpu(perf_cpu_context, cpu); 3207 cpuctx = &per_cpu(perf_cpu_context, cpu);
3208 __perf_counter_init_context(&cpuctx->ctx, NULL); 3208 __perf_counter_init_context(&cpuctx->ctx, NULL);
3209 3209
3210 mutex_lock(&perf_resource_mutex); 3210 spin_lock(&perf_resource_lock);
3211 cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu; 3211 cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
3212 mutex_unlock(&perf_resource_mutex); 3212 spin_unlock(&perf_resource_lock);
3213 3213
3214 hw_perf_counter_setup(cpu); 3214 hw_perf_counter_setup(cpu);
3215} 3215}
@@ -3292,7 +3292,7 @@ perf_set_reserve_percpu(struct sysdev_class *class,
3292 if (val > perf_max_counters) 3292 if (val > perf_max_counters)
3293 return -EINVAL; 3293 return -EINVAL;
3294 3294
3295 mutex_lock(&perf_resource_mutex); 3295 spin_lock(&perf_resource_lock);
3296 perf_reserved_percpu = val; 3296 perf_reserved_percpu = val;
3297 for_each_online_cpu(cpu) { 3297 for_each_online_cpu(cpu) {
3298 cpuctx = &per_cpu(perf_cpu_context, cpu); 3298 cpuctx = &per_cpu(perf_cpu_context, cpu);
@@ -3302,7 +3302,7 @@ perf_set_reserve_percpu(struct sysdev_class *class,
3302 cpuctx->max_pertask = mpt; 3302 cpuctx->max_pertask = mpt;
3303 spin_unlock_irq(&cpuctx->ctx.lock); 3303 spin_unlock_irq(&cpuctx->ctx.lock);
3304 } 3304 }
3305 mutex_unlock(&perf_resource_mutex); 3305 spin_unlock(&perf_resource_lock);
3306 3306
3307 return count; 3307 return count;
3308} 3308}
@@ -3324,9 +3324,9 @@ perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
3324 if (val > 1) 3324 if (val > 1)
3325 return -EINVAL; 3325 return -EINVAL;
3326 3326
3327 mutex_lock(&perf_resource_mutex); 3327 spin_lock(&perf_resource_lock);
3328 perf_overcommit = val; 3328 perf_overcommit = val;
3329 mutex_unlock(&perf_resource_mutex); 3329 spin_unlock(&perf_resource_lock);
3330 3330
3331 return count; 3331 return count;
3332} 3332}