aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-05-23 12:28:56 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-23 13:37:45 -0400
commitfccc714b3148ab9741fafc1e90c3876d50df6093 (patch)
treee536e75faf15f4db8ec653e7303ad9f41cc5c186 /kernel/perf_counter.c
parente220d2dcb944c5c488b6855d15ec66d76900514f (diff)
perf_counter: Sanitize counter->mutex
s/counter->mutex/counter->child_mutex/ and make sure its only used to protect child_list. The usage in __perf_counter_exit_task() doesn't appear to be problematic since ctx->mutex also covers anything related to fd tear-down. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <20090523163012.533186528@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c47
1 files changed, 19 insertions, 28 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 2f410ea2cb39..679c3b5bb7d4 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -111,6 +111,10 @@ static void put_ctx(struct perf_counter_context *ctx)
111 } 111 }
112} 112}
113 113
114/*
115 * Add a counter from the lists for its context.
116 * Must be called with ctx->mutex and ctx->lock held.
117 */
114static void 118static void
115list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) 119list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
116{ 120{
@@ -136,7 +140,7 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
136 140
137/* 141/*
138 * Remove a counter from the lists for its context. 142 * Remove a counter from the lists for its context.
139 * Must be called with counter->mutex and ctx->mutex held. 143 * Must be called with ctx->mutex and ctx->lock held.
140 */ 144 */
141static void 145static void
142list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) 146list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
@@ -276,7 +280,7 @@ static void __perf_counter_remove_from_context(void *info)
276/* 280/*
277 * Remove the counter from a task's (or a CPU's) list of counters. 281 * Remove the counter from a task's (or a CPU's) list of counters.
278 * 282 *
279 * Must be called with counter->mutex and ctx->mutex held. 283 * Must be called with ctx->mutex held.
280 * 284 *
281 * CPU counters are removed with a smp call. For task counters we only 285 * CPU counters are removed with a smp call. For task counters we only
282 * call when the task is on a CPU. 286 * call when the task is on a CPU.
@@ -1407,11 +1411,7 @@ static int perf_release(struct inode *inode, struct file *file)
1407 file->private_data = NULL; 1411 file->private_data = NULL;
1408 1412
1409 mutex_lock(&ctx->mutex); 1413 mutex_lock(&ctx->mutex);
1410 mutex_lock(&counter->mutex);
1411
1412 perf_counter_remove_from_context(counter); 1414 perf_counter_remove_from_context(counter);
1413
1414 mutex_unlock(&counter->mutex);
1415 mutex_unlock(&ctx->mutex); 1415 mutex_unlock(&ctx->mutex);
1416 1416
1417 free_counter(counter); 1417 free_counter(counter);
@@ -1437,7 +1437,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1437 if (counter->state == PERF_COUNTER_STATE_ERROR) 1437 if (counter->state == PERF_COUNTER_STATE_ERROR)
1438 return 0; 1438 return 0;
1439 1439
1440 mutex_lock(&counter->mutex); 1440 mutex_lock(&counter->child_mutex);
1441 values[0] = perf_counter_read(counter); 1441 values[0] = perf_counter_read(counter);
1442 n = 1; 1442 n = 1;
1443 if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1443 if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
@@ -1446,7 +1446,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1446 if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1446 if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1447 values[n++] = counter->total_time_running + 1447 values[n++] = counter->total_time_running +
1448 atomic64_read(&counter->child_total_time_running); 1448 atomic64_read(&counter->child_total_time_running);
1449 mutex_unlock(&counter->mutex); 1449 mutex_unlock(&counter->child_mutex);
1450 1450
1451 if (count < n * sizeof(u64)) 1451 if (count < n * sizeof(u64))
1452 return -EINVAL; 1452 return -EINVAL;
@@ -1510,11 +1510,11 @@ static void perf_counter_for_each_child(struct perf_counter *counter,
1510{ 1510{
1511 struct perf_counter *child; 1511 struct perf_counter *child;
1512 1512
1513 mutex_lock(&counter->mutex); 1513 mutex_lock(&counter->child_mutex);
1514 func(counter); 1514 func(counter);
1515 list_for_each_entry(child, &counter->child_list, child_list) 1515 list_for_each_entry(child, &counter->child_list, child_list)
1516 func(child); 1516 func(child);
1517 mutex_unlock(&counter->mutex); 1517 mutex_unlock(&counter->child_mutex);
1518} 1518}
1519 1519
1520static void perf_counter_for_each(struct perf_counter *counter, 1520static void perf_counter_for_each(struct perf_counter *counter,
@@ -1522,11 +1522,11 @@ static void perf_counter_for_each(struct perf_counter *counter,
1522{ 1522{
1523 struct perf_counter *child; 1523 struct perf_counter *child;
1524 1524
1525 mutex_lock(&counter->mutex); 1525 mutex_lock(&counter->child_mutex);
1526 perf_counter_for_each_sibling(counter, func); 1526 perf_counter_for_each_sibling(counter, func);
1527 list_for_each_entry(child, &counter->child_list, child_list) 1527 list_for_each_entry(child, &counter->child_list, child_list)
1528 perf_counter_for_each_sibling(child, func); 1528 perf_counter_for_each_sibling(child, func);
1529 mutex_unlock(&counter->mutex); 1529 mutex_unlock(&counter->child_mutex);
1530} 1530}
1531 1531
1532static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1532static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
@@ -3106,7 +3106,9 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,
3106 if (!group_leader) 3106 if (!group_leader)
3107 group_leader = counter; 3107 group_leader = counter;
3108 3108
3109 mutex_init(&counter->mutex); 3109 mutex_init(&counter->child_mutex);
3110 INIT_LIST_HEAD(&counter->child_list);
3111
3110 INIT_LIST_HEAD(&counter->list_entry); 3112 INIT_LIST_HEAD(&counter->list_entry);
3111 INIT_LIST_HEAD(&counter->event_entry); 3113 INIT_LIST_HEAD(&counter->event_entry);
3112 INIT_LIST_HEAD(&counter->sibling_list); 3114 INIT_LIST_HEAD(&counter->sibling_list);
@@ -3114,8 +3116,6 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,
3114 3116
3115 mutex_init(&counter->mmap_mutex); 3117 mutex_init(&counter->mmap_mutex);
3116 3118
3117 INIT_LIST_HEAD(&counter->child_list);
3118
3119 counter->cpu = cpu; 3119 counter->cpu = cpu;
3120 counter->hw_event = *hw_event; 3120 counter->hw_event = *hw_event;
3121 counter->group_leader = group_leader; 3121 counter->group_leader = group_leader;
@@ -3346,10 +3346,9 @@ inherit_counter(struct perf_counter *parent_counter,
3346 /* 3346 /*
3347 * Link this into the parent counter's child list 3347 * Link this into the parent counter's child list
3348 */ 3348 */
3349 mutex_lock(&parent_counter->mutex); 3349 mutex_lock(&parent_counter->child_mutex);
3350 list_add_tail(&child_counter->child_list, &parent_counter->child_list); 3350 list_add_tail(&child_counter->child_list, &parent_counter->child_list);
3351 3351 mutex_unlock(&parent_counter->child_mutex);
3352 mutex_unlock(&parent_counter->mutex);
3353 3352
3354 return child_counter; 3353 return child_counter;
3355} 3354}
@@ -3396,9 +3395,9 @@ static void sync_child_counter(struct perf_counter *child_counter,
3396 /* 3395 /*
3397 * Remove this counter from the parent's list 3396 * Remove this counter from the parent's list
3398 */ 3397 */
3399 mutex_lock(&parent_counter->mutex); 3398 mutex_lock(&parent_counter->child_mutex);
3400 list_del_init(&child_counter->child_list); 3399 list_del_init(&child_counter->child_list);
3401 mutex_unlock(&parent_counter->mutex); 3400 mutex_unlock(&parent_counter->child_mutex);
3402 3401
3403 /* 3402 /*
3404 * Release the parent counter, if this was the last 3403 * Release the parent counter, if this was the last
@@ -3414,17 +3413,9 @@ __perf_counter_exit_task(struct task_struct *child,
3414{ 3413{
3415 struct perf_counter *parent_counter; 3414 struct perf_counter *parent_counter;
3416 3415
3417 /*
3418 * Protect against concurrent operations on child_counter
3419 * due its fd getting closed, etc.
3420 */
3421 mutex_lock(&child_counter->mutex);
3422
3423 update_counter_times(child_counter); 3416 update_counter_times(child_counter);
3424 list_del_counter(child_counter, child_ctx); 3417 list_del_counter(child_counter, child_ctx);
3425 3418
3426 mutex_unlock(&child_counter->mutex);
3427
3428 parent_counter = child_counter->parent; 3419 parent_counter = child_counter->parent;
3429 /* 3420 /*
3430 * It can happen that parent exits first, and has counters 3421 * It can happen that parent exits first, and has counters