aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-05-08 12:52:22 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-08 14:36:58 -0400
commit3df5edad87a998273aa5a9a8c728c05d855ad00e (patch)
treec8e8090b1fe518918ac85d92a172702e9544fa91 /kernel
parent7fc23a5380797012e92a9633169440f2f4a21253 (diff)
perf_counter: rework ioctl()s
Corey noticed that ioctl()s on grouped counters didn't work on the whole group. This extends the ioctl() interface to take a second argument that is interpreted as a flags field. We then provide PERF_IOC_FLAG_GROUP to toggle the behaviour. Having this flag gives the greatest flexibility, allowing you to individually enable/disable/reset counters in a group, or all together. [ Impact: fix group counter enable/disable semantics ] Reported-by: Corey Ashford <cjashfor@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> LKML-Reference: <20090508170028.837558214@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_counter.c104
1 files changed, 58 insertions, 46 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index fdb0d2421276..f4883f1f47eb 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -82,7 +82,7 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
82 * add it straight to the context's counter list, or to the group 82 * add it straight to the context's counter list, or to the group
83 * leader's sibling list: 83 * leader's sibling list:
84 */ 84 */
85 if (counter->group_leader == counter) 85 if (group_leader == counter)
86 list_add_tail(&counter->list_entry, &ctx->counter_list); 86 list_add_tail(&counter->list_entry, &ctx->counter_list);
87 else { 87 else {
88 list_add_tail(&counter->list_entry, &group_leader->sibling_list); 88 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
@@ -385,24 +385,6 @@ static void perf_counter_disable(struct perf_counter *counter)
385 spin_unlock_irq(&ctx->lock); 385 spin_unlock_irq(&ctx->lock);
386} 386}
387 387
388/*
389 * Disable a counter and all its children.
390 */
391static void perf_counter_disable_family(struct perf_counter *counter)
392{
393 struct perf_counter *child;
394
395 perf_counter_disable(counter);
396
397 /*
398 * Lock the mutex to protect the list of children
399 */
400 mutex_lock(&counter->mutex);
401 list_for_each_entry(child, &counter->child_list, child_list)
402 perf_counter_disable(child);
403 mutex_unlock(&counter->mutex);
404}
405
406static int 388static int
407counter_sched_in(struct perf_counter *counter, 389counter_sched_in(struct perf_counter *counter,
408 struct perf_cpu_context *cpuctx, 390 struct perf_cpu_context *cpuctx,
@@ -753,24 +735,6 @@ static int perf_counter_refresh(struct perf_counter *counter, int refresh)
753 return 0; 735 return 0;
754} 736}
755 737
756/*
757 * Enable a counter and all its children.
758 */
759static void perf_counter_enable_family(struct perf_counter *counter)
760{
761 struct perf_counter *child;
762
763 perf_counter_enable(counter);
764
765 /*
766 * Lock the mutex to protect the list of children
767 */
768 mutex_lock(&counter->mutex);
769 list_for_each_entry(child, &counter->child_list, child_list)
770 perf_counter_enable(child);
771 mutex_unlock(&counter->mutex);
772}
773
774void __perf_counter_sched_out(struct perf_counter_context *ctx, 738void __perf_counter_sched_out(struct perf_counter_context *ctx,
775 struct perf_cpu_context *cpuctx) 739 struct perf_cpu_context *cpuctx)
776{ 740{
@@ -1307,31 +1271,79 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
1307 1271
1308static void perf_counter_reset(struct perf_counter *counter) 1272static void perf_counter_reset(struct perf_counter *counter)
1309{ 1273{
1274 (void)perf_counter_read(counter);
1310 atomic_set(&counter->count, 0); 1275 atomic_set(&counter->count, 0);
1276 perf_counter_update_userpage(counter);
1277}
1278
1279static void perf_counter_for_each_sibling(struct perf_counter *counter,
1280 void (*func)(struct perf_counter *))
1281{
1282 struct perf_counter_context *ctx = counter->ctx;
1283 struct perf_counter *sibling;
1284
1285 spin_lock_irq(&ctx->lock);
1286 counter = counter->group_leader;
1287
1288 func(counter);
1289 list_for_each_entry(sibling, &counter->sibling_list, list_entry)
1290 func(sibling);
1291 spin_unlock_irq(&ctx->lock);
1292}
1293
1294static void perf_counter_for_each_child(struct perf_counter *counter,
1295 void (*func)(struct perf_counter *))
1296{
1297 struct perf_counter *child;
1298
1299 mutex_lock(&counter->mutex);
1300 func(counter);
1301 list_for_each_entry(child, &counter->child_list, child_list)
1302 func(child);
1303 mutex_unlock(&counter->mutex);
1304}
1305
1306static void perf_counter_for_each(struct perf_counter *counter,
1307 void (*func)(struct perf_counter *))
1308{
1309 struct perf_counter *child;
1310
1311 mutex_lock(&counter->mutex);
1312 perf_counter_for_each_sibling(counter, func);
1313 list_for_each_entry(child, &counter->child_list, child_list)
1314 perf_counter_for_each_sibling(child, func);
1315 mutex_unlock(&counter->mutex);
1311} 1316}
1312 1317
1313static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1318static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1314{ 1319{
1315 struct perf_counter *counter = file->private_data; 1320 struct perf_counter *counter = file->private_data;
1316 int err = 0; 1321 void (*func)(struct perf_counter *);
1322 u32 flags = arg;
1317 1323
1318 switch (cmd) { 1324 switch (cmd) {
1319 case PERF_COUNTER_IOC_ENABLE: 1325 case PERF_COUNTER_IOC_ENABLE:
1320 perf_counter_enable_family(counter); 1326 func = perf_counter_enable;
1321 break; 1327 break;
1322 case PERF_COUNTER_IOC_DISABLE: 1328 case PERF_COUNTER_IOC_DISABLE:
1323 perf_counter_disable_family(counter); 1329 func = perf_counter_disable;
1324 break;
1325 case PERF_COUNTER_IOC_REFRESH:
1326 err = perf_counter_refresh(counter, arg);
1327 break; 1330 break;
1328 case PERF_COUNTER_IOC_RESET: 1331 case PERF_COUNTER_IOC_RESET:
1329 perf_counter_reset(counter); 1332 func = perf_counter_reset;
1330 break; 1333 break;
1334
1335 case PERF_COUNTER_IOC_REFRESH:
1336 return perf_counter_refresh(counter, arg);
1331 default: 1337 default:
1332 err = -ENOTTY; 1338 return -ENOTTY;
1333 } 1339 }
1334 return err; 1340
1341 if (flags & PERF_IOC_FLAG_GROUP)
1342 perf_counter_for_each(counter, func);
1343 else
1344 perf_counter_for_each_child(counter, func);
1345
1346 return 0;
1335} 1347}
1336 1348
1337/* 1349/*