aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c71
1 files changed, 35 insertions, 36 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index cc768ab81ac8..13ad73aed4ca 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -258,9 +258,9 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
258 * leader's sibling list: 258 * leader's sibling list:
259 */ 259 */
260 if (group_leader == counter) 260 if (group_leader == counter)
261 list_add_tail(&counter->list_entry, &ctx->counter_list); 261 list_add_tail(&counter->group_entry, &ctx->group_list);
262 else { 262 else {
263 list_add_tail(&counter->list_entry, &group_leader->sibling_list); 263 list_add_tail(&counter->group_entry, &group_leader->sibling_list);
264 group_leader->nr_siblings++; 264 group_leader->nr_siblings++;
265 } 265 }
266 266
@@ -279,13 +279,13 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
279{ 279{
280 struct perf_counter *sibling, *tmp; 280 struct perf_counter *sibling, *tmp;
281 281
282 if (list_empty(&counter->list_entry)) 282 if (list_empty(&counter->group_entry))
283 return; 283 return;
284 ctx->nr_counters--; 284 ctx->nr_counters--;
285 if (counter->attr.inherit_stat) 285 if (counter->attr.inherit_stat)
286 ctx->nr_stat--; 286 ctx->nr_stat--;
287 287
288 list_del_init(&counter->list_entry); 288 list_del_init(&counter->group_entry);
289 list_del_rcu(&counter->event_entry); 289 list_del_rcu(&counter->event_entry);
290 290
291 if (counter->group_leader != counter) 291 if (counter->group_leader != counter)
@@ -296,10 +296,9 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
296 * upgrade the siblings to singleton counters by adding them 296 * upgrade the siblings to singleton counters by adding them
297 * to the context list directly: 297 * to the context list directly:
298 */ 298 */
299 list_for_each_entry_safe(sibling, tmp, 299 list_for_each_entry_safe(sibling, tmp, &counter->sibling_list, group_entry) {
300 &counter->sibling_list, list_entry) {
301 300
302 list_move_tail(&sibling->list_entry, &ctx->counter_list); 301 list_move_tail(&sibling->group_entry, &ctx->group_list);
303 sibling->group_leader = sibling; 302 sibling->group_leader = sibling;
304 } 303 }
305} 304}
@@ -343,7 +342,7 @@ group_sched_out(struct perf_counter *group_counter,
343 /* 342 /*
344 * Schedule out siblings (if any): 343 * Schedule out siblings (if any):
345 */ 344 */
346 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) 345 list_for_each_entry(counter, &group_counter->sibling_list, group_entry)
347 counter_sched_out(counter, cpuctx, ctx); 346 counter_sched_out(counter, cpuctx, ctx);
348 347
349 if (group_counter->attr.exclusive) 348 if (group_counter->attr.exclusive)
@@ -435,7 +434,7 @@ retry:
435 /* 434 /*
436 * If the context is active we need to retry the smp call. 435 * If the context is active we need to retry the smp call.
437 */ 436 */
438 if (ctx->nr_active && !list_empty(&counter->list_entry)) { 437 if (ctx->nr_active && !list_empty(&counter->group_entry)) {
439 spin_unlock_irq(&ctx->lock); 438 spin_unlock_irq(&ctx->lock);
440 goto retry; 439 goto retry;
441 } 440 }
@@ -445,7 +444,7 @@ retry:
445 * can remove the counter safely, if the call above did not 444 * can remove the counter safely, if the call above did not
446 * succeed. 445 * succeed.
447 */ 446 */
448 if (!list_empty(&counter->list_entry)) { 447 if (!list_empty(&counter->group_entry)) {
449 list_del_counter(counter, ctx); 448 list_del_counter(counter, ctx);
450 } 449 }
451 spin_unlock_irq(&ctx->lock); 450 spin_unlock_irq(&ctx->lock);
@@ -497,7 +496,7 @@ static void update_group_times(struct perf_counter *leader)
497 struct perf_counter *counter; 496 struct perf_counter *counter;
498 497
499 update_counter_times(leader); 498 update_counter_times(leader);
500 list_for_each_entry(counter, &leader->sibling_list, list_entry) 499 list_for_each_entry(counter, &leader->sibling_list, group_entry)
501 update_counter_times(counter); 500 update_counter_times(counter);
502} 501}
503 502
@@ -643,7 +642,7 @@ group_sched_in(struct perf_counter *group_counter,
643 /* 642 /*
644 * Schedule in siblings as one group (if any): 643 * Schedule in siblings as one group (if any):
645 */ 644 */
646 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { 645 list_for_each_entry(counter, &group_counter->sibling_list, group_entry) {
647 if (counter_sched_in(counter, cpuctx, ctx, cpu)) { 646 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
648 partial_group = counter; 647 partial_group = counter;
649 goto group_error; 648 goto group_error;
@@ -657,7 +656,7 @@ group_error:
657 * Groups can be scheduled in as one unit only, so undo any 656 * Groups can be scheduled in as one unit only, so undo any
658 * partial group before returning: 657 * partial group before returning:
659 */ 658 */
660 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) { 659 list_for_each_entry(counter, &group_counter->sibling_list, group_entry) {
661 if (counter == partial_group) 660 if (counter == partial_group)
662 break; 661 break;
663 counter_sched_out(counter, cpuctx, ctx); 662 counter_sched_out(counter, cpuctx, ctx);
@@ -678,7 +677,7 @@ static int is_software_only_group(struct perf_counter *leader)
678 if (!is_software_counter(leader)) 677 if (!is_software_counter(leader))
679 return 0; 678 return 0;
680 679
681 list_for_each_entry(counter, &leader->sibling_list, list_entry) 680 list_for_each_entry(counter, &leader->sibling_list, group_entry)
682 if (!is_software_counter(counter)) 681 if (!is_software_counter(counter))
683 return 0; 682 return 0;
684 683
@@ -842,7 +841,7 @@ retry:
842 /* 841 /*
843 * we need to retry the smp call. 842 * we need to retry the smp call.
844 */ 843 */
845 if (ctx->is_active && list_empty(&counter->list_entry)) { 844 if (ctx->is_active && list_empty(&counter->group_entry)) {
846 spin_unlock_irq(&ctx->lock); 845 spin_unlock_irq(&ctx->lock);
847 goto retry; 846 goto retry;
848 } 847 }
@@ -852,7 +851,7 @@ retry:
852 * can add the counter safely, if it the call above did not 851 * can add the counter safely, if it the call above did not
853 * succeed. 852 * succeed.
854 */ 853 */
855 if (list_empty(&counter->list_entry)) 854 if (list_empty(&counter->group_entry))
856 add_counter_to_ctx(counter, ctx); 855 add_counter_to_ctx(counter, ctx);
857 spin_unlock_irq(&ctx->lock); 856 spin_unlock_irq(&ctx->lock);
858} 857}
@@ -872,7 +871,7 @@ static void __perf_counter_mark_enabled(struct perf_counter *counter,
872 871
873 counter->state = PERF_COUNTER_STATE_INACTIVE; 872 counter->state = PERF_COUNTER_STATE_INACTIVE;
874 counter->tstamp_enabled = ctx->time - counter->total_time_enabled; 873 counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
875 list_for_each_entry(sub, &counter->sibling_list, list_entry) 874 list_for_each_entry(sub, &counter->sibling_list, group_entry)
876 if (sub->state >= PERF_COUNTER_STATE_INACTIVE) 875 if (sub->state >= PERF_COUNTER_STATE_INACTIVE)
877 sub->tstamp_enabled = 876 sub->tstamp_enabled =
878 ctx->time - sub->total_time_enabled; 877 ctx->time - sub->total_time_enabled;
@@ -1032,7 +1031,7 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx,
1032 1031
1033 perf_disable(); 1032 perf_disable();
1034 if (ctx->nr_active) { 1033 if (ctx->nr_active) {
1035 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1034 list_for_each_entry(counter, &ctx->group_list, group_entry) {
1036 if (counter != counter->group_leader) 1035 if (counter != counter->group_leader)
1037 counter_sched_out(counter, cpuctx, ctx); 1036 counter_sched_out(counter, cpuctx, ctx);
1038 else 1037 else
@@ -1252,7 +1251,7 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
1252 * First go through the list and put on any pinned groups 1251 * First go through the list and put on any pinned groups
1253 * in order to give them the best chance of going on. 1252 * in order to give them the best chance of going on.
1254 */ 1253 */
1255 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1254 list_for_each_entry(counter, &ctx->group_list, group_entry) {
1256 if (counter->state <= PERF_COUNTER_STATE_OFF || 1255 if (counter->state <= PERF_COUNTER_STATE_OFF ||
1257 !counter->attr.pinned) 1256 !counter->attr.pinned)
1258 continue; 1257 continue;
@@ -1276,7 +1275,7 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
1276 } 1275 }
1277 } 1276 }
1278 1277
1279 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1278 list_for_each_entry(counter, &ctx->group_list, group_entry) {
1280 /* 1279 /*
1281 * Ignore counters in OFF or ERROR state, and 1280 * Ignore counters in OFF or ERROR state, and
1282 * ignore pinned counters since we did them already. 1281 * ignore pinned counters since we did them already.
@@ -1369,7 +1368,7 @@ static void perf_ctx_adjust_freq(struct perf_counter_context *ctx)
1369 u64 interrupts, freq; 1368 u64 interrupts, freq;
1370 1369
1371 spin_lock(&ctx->lock); 1370 spin_lock(&ctx->lock);
1372 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1371 list_for_each_entry(counter, &ctx->group_list, group_entry) {
1373 if (counter->state != PERF_COUNTER_STATE_ACTIVE) 1372 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1374 continue; 1373 continue;
1375 1374
@@ -1441,8 +1440,8 @@ static void rotate_ctx(struct perf_counter_context *ctx)
1441 * Rotate the first entry last (works just fine for group counters too): 1440 * Rotate the first entry last (works just fine for group counters too):
1442 */ 1441 */
1443 perf_disable(); 1442 perf_disable();
1444 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1443 list_for_each_entry(counter, &ctx->group_list, group_entry) {
1445 list_move_tail(&counter->list_entry, &ctx->counter_list); 1444 list_move_tail(&counter->group_entry, &ctx->group_list);
1446 break; 1445 break;
1447 } 1446 }
1448 perf_enable(); 1447 perf_enable();
@@ -1498,7 +1497,7 @@ static void perf_counter_enable_on_exec(struct task_struct *task)
1498 1497
1499 spin_lock(&ctx->lock); 1498 spin_lock(&ctx->lock);
1500 1499
1501 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 1500 list_for_each_entry(counter, &ctx->group_list, group_entry) {
1502 if (!counter->attr.enable_on_exec) 1501 if (!counter->attr.enable_on_exec)
1503 continue; 1502 continue;
1504 counter->attr.enable_on_exec = 0; 1503 counter->attr.enable_on_exec = 0;
@@ -1575,7 +1574,7 @@ __perf_counter_init_context(struct perf_counter_context *ctx,
1575 memset(ctx, 0, sizeof(*ctx)); 1574 memset(ctx, 0, sizeof(*ctx));
1576 spin_lock_init(&ctx->lock); 1575 spin_lock_init(&ctx->lock);
1577 mutex_init(&ctx->mutex); 1576 mutex_init(&ctx->mutex);
1578 INIT_LIST_HEAD(&ctx->counter_list); 1577 INIT_LIST_HEAD(&ctx->group_list);
1579 INIT_LIST_HEAD(&ctx->event_list); 1578 INIT_LIST_HEAD(&ctx->event_list);
1580 atomic_set(&ctx->refcount, 1); 1579 atomic_set(&ctx->refcount, 1);
1581 ctx->task = task; 1580 ctx->task = task;
@@ -1818,7 +1817,7 @@ static int perf_counter_read_group(struct perf_counter *counter,
1818 1817
1819 size += err; 1818 size += err;
1820 1819
1821 list_for_each_entry(sub, &leader->sibling_list, list_entry) { 1820 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
1822 err = perf_counter_read_entry(sub, read_format, 1821 err = perf_counter_read_entry(sub, read_format,
1823 buf + size); 1822 buf + size);
1824 if (err < 0) 1823 if (err < 0)
@@ -1948,7 +1947,7 @@ static void perf_counter_for_each(struct perf_counter *counter,
1948 1947
1949 perf_counter_for_each_child(counter, func); 1948 perf_counter_for_each_child(counter, func);
1950 func(counter); 1949 func(counter);
1951 list_for_each_entry(sibling, &counter->sibling_list, list_entry) 1950 list_for_each_entry(sibling, &counter->sibling_list, group_entry)
1952 perf_counter_for_each_child(counter, func); 1951 perf_counter_for_each_child(counter, func);
1953 mutex_unlock(&ctx->mutex); 1952 mutex_unlock(&ctx->mutex);
1954} 1953}
@@ -2832,7 +2831,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
2832 2831
2833 perf_output_copy(handle, values, n * sizeof(u64)); 2832 perf_output_copy(handle, values, n * sizeof(u64));
2834 2833
2835 list_for_each_entry(sub, &leader->sibling_list, list_entry) { 2834 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2836 n = 0; 2835 n = 0;
2837 2836
2838 if (sub != counter) 2837 if (sub != counter)
@@ -4118,7 +4117,7 @@ perf_counter_alloc(struct perf_counter_attr *attr,
4118 mutex_init(&counter->child_mutex); 4117 mutex_init(&counter->child_mutex);
4119 INIT_LIST_HEAD(&counter->child_list); 4118 INIT_LIST_HEAD(&counter->child_list);
4120 4119
4121 INIT_LIST_HEAD(&counter->list_entry); 4120 INIT_LIST_HEAD(&counter->group_entry);
4122 INIT_LIST_HEAD(&counter->event_entry); 4121 INIT_LIST_HEAD(&counter->event_entry);
4123 INIT_LIST_HEAD(&counter->sibling_list); 4122 INIT_LIST_HEAD(&counter->sibling_list);
4124 init_waitqueue_head(&counter->waitq); 4123 init_waitqueue_head(&counter->waitq);
@@ -4544,7 +4543,7 @@ static int inherit_group(struct perf_counter *parent_counter,
4544 child, NULL, child_ctx); 4543 child, NULL, child_ctx);
4545 if (IS_ERR(leader)) 4544 if (IS_ERR(leader))
4546 return PTR_ERR(leader); 4545 return PTR_ERR(leader);
4547 list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) { 4546 list_for_each_entry(sub, &parent_counter->sibling_list, group_entry) {
4548 child_ctr = inherit_counter(sub, parent, parent_ctx, 4547 child_ctr = inherit_counter(sub, parent, parent_ctx,
4549 child, leader, child_ctx); 4548 child, leader, child_ctx);
4550 if (IS_ERR(child_ctr)) 4549 if (IS_ERR(child_ctr))
@@ -4670,8 +4669,8 @@ void perf_counter_exit_task(struct task_struct *child)
4670 mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING); 4669 mutex_lock_nested(&child_ctx->mutex, SINGLE_DEPTH_NESTING);
4671 4670
4672again: 4671again:
4673 list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list, 4672 list_for_each_entry_safe(child_counter, tmp, &child_ctx->group_list,
4674 list_entry) 4673 group_entry)
4675 __perf_counter_exit_task(child_counter, child_ctx, child); 4674 __perf_counter_exit_task(child_counter, child_ctx, child);
4676 4675
4677 /* 4676 /*
@@ -4679,7 +4678,7 @@ again:
4679 * its siblings to the list, but we obtained 'tmp' before that which 4678 * its siblings to the list, but we obtained 'tmp' before that which
4680 * will still point to the list head terminating the iteration. 4679 * will still point to the list head terminating the iteration.
4681 */ 4680 */
4682 if (!list_empty(&child_ctx->counter_list)) 4681 if (!list_empty(&child_ctx->group_list))
4683 goto again; 4682 goto again;
4684 4683
4685 mutex_unlock(&child_ctx->mutex); 4684 mutex_unlock(&child_ctx->mutex);
@@ -4701,7 +4700,7 @@ void perf_counter_free_task(struct task_struct *task)
4701 4700
4702 mutex_lock(&ctx->mutex); 4701 mutex_lock(&ctx->mutex);
4703again: 4702again:
4704 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) { 4703 list_for_each_entry_safe(counter, tmp, &ctx->group_list, group_entry) {
4705 struct perf_counter *parent = counter->parent; 4704 struct perf_counter *parent = counter->parent;
4706 4705
4707 if (WARN_ON_ONCE(!parent)) 4706 if (WARN_ON_ONCE(!parent))
@@ -4717,7 +4716,7 @@ again:
4717 free_counter(counter); 4716 free_counter(counter);
4718 } 4717 }
4719 4718
4720 if (!list_empty(&ctx->counter_list)) 4719 if (!list_empty(&ctx->group_list))
4721 goto again; 4720 goto again;
4722 4721
4723 mutex_unlock(&ctx->mutex); 4722 mutex_unlock(&ctx->mutex);
@@ -4847,7 +4846,7 @@ static void __perf_counter_exit_cpu(void *info)
4847 struct perf_counter_context *ctx = &cpuctx->ctx; 4846 struct perf_counter_context *ctx = &cpuctx->ctx;
4848 struct perf_counter *counter, *tmp; 4847 struct perf_counter *counter, *tmp;
4849 4848
4850 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) 4849 list_for_each_entry_safe(counter, tmp, &ctx->group_list, group_entry)
4851 __perf_counter_remove_from_context(counter); 4850 __perf_counter_remove_from_context(counter);
4852} 4851}
4853static void perf_counter_exit_cpu(int cpu) 4852static void perf_counter_exit_cpu(int cpu)