aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/perf_event.c10
-rw-r--r--arch/sparc/kernel/perf_event.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event.c18
-rw-r--r--include/linux/perf_event.h2
-rw-r--r--kernel/perf_event.c45
5 files changed, 38 insertions, 47 deletions
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 1eb85fbf53a5..b6cf8f1f4d35 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -718,10 +718,10 @@ static int collect_events(struct perf_event *group, int max_count,
718 return n; 718 return n;
719} 719}
720 720
721static void event_sched_in(struct perf_event *event, int cpu) 721static void event_sched_in(struct perf_event *event)
722{ 722{
723 event->state = PERF_EVENT_STATE_ACTIVE; 723 event->state = PERF_EVENT_STATE_ACTIVE;
724 event->oncpu = cpu; 724 event->oncpu = smp_processor_id();
725 event->tstamp_running += event->ctx->time - event->tstamp_stopped; 725 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
726 if (is_software_event(event)) 726 if (is_software_event(event))
727 event->pmu->enable(event); 727 event->pmu->enable(event);
@@ -735,7 +735,7 @@ static void event_sched_in(struct perf_event *event, int cpu)
735 */ 735 */
736int hw_perf_group_sched_in(struct perf_event *group_leader, 736int hw_perf_group_sched_in(struct perf_event *group_leader,
737 struct perf_cpu_context *cpuctx, 737 struct perf_cpu_context *cpuctx,
738 struct perf_event_context *ctx, int cpu) 738 struct perf_event_context *ctx)
739{ 739{
740 struct cpu_hw_events *cpuhw; 740 struct cpu_hw_events *cpuhw;
741 long i, n, n0; 741 long i, n, n0;
@@ -766,10 +766,10 @@ int hw_perf_group_sched_in(struct perf_event *group_leader,
766 cpuhw->event[i]->hw.config = cpuhw->events[i]; 766 cpuhw->event[i]->hw.config = cpuhw->events[i];
767 cpuctx->active_oncpu += n; 767 cpuctx->active_oncpu += n;
768 n = 1; 768 n = 1;
769 event_sched_in(group_leader, cpu); 769 event_sched_in(group_leader);
770 list_for_each_entry(sub, &group_leader->sibling_list, group_entry) { 770 list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
771 if (sub->state != PERF_EVENT_STATE_OFF) { 771 if (sub->state != PERF_EVENT_STATE_OFF) {
772 event_sched_in(sub, cpu); 772 event_sched_in(sub);
773 ++n; 773 ++n;
774 } 774 }
775 } 775 }
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index e856456ec02f..9f2b2bac8b2b 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -980,10 +980,10 @@ static int collect_events(struct perf_event *group, int max_count,
980 return n; 980 return n;
981} 981}
982 982
983static void event_sched_in(struct perf_event *event, int cpu) 983static void event_sched_in(struct perf_event *event)
984{ 984{
985 event->state = PERF_EVENT_STATE_ACTIVE; 985 event->state = PERF_EVENT_STATE_ACTIVE;
986 event->oncpu = cpu; 986 event->oncpu = smp_processor_id();
987 event->tstamp_running += event->ctx->time - event->tstamp_stopped; 987 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
988 if (is_software_event(event)) 988 if (is_software_event(event))
989 event->pmu->enable(event); 989 event->pmu->enable(event);
@@ -991,7 +991,7 @@ static void event_sched_in(struct perf_event *event, int cpu)
991 991
992int hw_perf_group_sched_in(struct perf_event *group_leader, 992int hw_perf_group_sched_in(struct perf_event *group_leader,
993 struct perf_cpu_context *cpuctx, 993 struct perf_cpu_context *cpuctx,
994 struct perf_event_context *ctx, int cpu) 994 struct perf_event_context *ctx)
995{ 995{
996 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 996 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
997 struct perf_event *sub; 997 struct perf_event *sub;
@@ -1015,10 +1015,10 @@ int hw_perf_group_sched_in(struct perf_event *group_leader,
1015 1015
1016 cpuctx->active_oncpu += n; 1016 cpuctx->active_oncpu += n;
1017 n = 1; 1017 n = 1;
1018 event_sched_in(group_leader, cpu); 1018 event_sched_in(group_leader);
1019 list_for_each_entry(sub, &group_leader->sibling_list, group_entry) { 1019 list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
1020 if (sub->state != PERF_EVENT_STATE_OFF) { 1020 if (sub->state != PERF_EVENT_STATE_OFF) {
1021 event_sched_in(sub, cpu); 1021 event_sched_in(sub);
1022 n++; 1022 n++;
1023 } 1023 }
1024 } 1024 }
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index aa12f36e4711..ad096562d694 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -2403,12 +2403,12 @@ done:
2403} 2403}
2404 2404
2405static int x86_event_sched_in(struct perf_event *event, 2405static int x86_event_sched_in(struct perf_event *event,
2406 struct perf_cpu_context *cpuctx, int cpu) 2406 struct perf_cpu_context *cpuctx)
2407{ 2407{
2408 int ret = 0; 2408 int ret = 0;
2409 2409
2410 event->state = PERF_EVENT_STATE_ACTIVE; 2410 event->state = PERF_EVENT_STATE_ACTIVE;
2411 event->oncpu = cpu; 2411 event->oncpu = smp_processor_id();
2412 event->tstamp_running += event->ctx->time - event->tstamp_stopped; 2412 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
2413 2413
2414 if (!is_x86_event(event)) 2414 if (!is_x86_event(event))
@@ -2424,7 +2424,7 @@ static int x86_event_sched_in(struct perf_event *event,
2424} 2424}
2425 2425
2426static void x86_event_sched_out(struct perf_event *event, 2426static void x86_event_sched_out(struct perf_event *event,
2427 struct perf_cpu_context *cpuctx, int cpu) 2427 struct perf_cpu_context *cpuctx)
2428{ 2428{
2429 event->state = PERF_EVENT_STATE_INACTIVE; 2429 event->state = PERF_EVENT_STATE_INACTIVE;
2430 event->oncpu = -1; 2430 event->oncpu = -1;
@@ -2452,9 +2452,9 @@ static void x86_event_sched_out(struct perf_event *event,
2452 */ 2452 */
2453int hw_perf_group_sched_in(struct perf_event *leader, 2453int hw_perf_group_sched_in(struct perf_event *leader,
2454 struct perf_cpu_context *cpuctx, 2454 struct perf_cpu_context *cpuctx,
2455 struct perf_event_context *ctx, int cpu) 2455 struct perf_event_context *ctx)
2456{ 2456{
2457 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 2457 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
2458 struct perf_event *sub; 2458 struct perf_event *sub;
2459 int assign[X86_PMC_IDX_MAX]; 2459 int assign[X86_PMC_IDX_MAX];
2460 int n0, n1, ret; 2460 int n0, n1, ret;
@@ -2468,14 +2468,14 @@ int hw_perf_group_sched_in(struct perf_event *leader,
2468 if (ret) 2468 if (ret)
2469 return ret; 2469 return ret;
2470 2470
2471 ret = x86_event_sched_in(leader, cpuctx, cpu); 2471 ret = x86_event_sched_in(leader, cpuctx);
2472 if (ret) 2472 if (ret)
2473 return ret; 2473 return ret;
2474 2474
2475 n1 = 1; 2475 n1 = 1;
2476 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 2476 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2477 if (sub->state > PERF_EVENT_STATE_OFF) { 2477 if (sub->state > PERF_EVENT_STATE_OFF) {
2478 ret = x86_event_sched_in(sub, cpuctx, cpu); 2478 ret = x86_event_sched_in(sub, cpuctx);
2479 if (ret) 2479 if (ret)
2480 goto undo; 2480 goto undo;
2481 ++n1; 2481 ++n1;
@@ -2500,11 +2500,11 @@ int hw_perf_group_sched_in(struct perf_event *leader,
2500 */ 2500 */
2501 return 1; 2501 return 1;
2502undo: 2502undo:
2503 x86_event_sched_out(leader, cpuctx, cpu); 2503 x86_event_sched_out(leader, cpuctx);
2504 n0 = 1; 2504 n0 = 1;
2505 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 2505 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2506 if (sub->state == PERF_EVENT_STATE_ACTIVE) { 2506 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
2507 x86_event_sched_out(sub, cpuctx, cpu); 2507 x86_event_sched_out(sub, cpuctx);
2508 if (++n0 == n1) 2508 if (++n0 == n1)
2509 break; 2509 break;
2510 } 2510 }
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index b08dfdad08cb..d0e072c5b58a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -772,7 +772,7 @@ extern int perf_event_task_disable(void);
772extern int perf_event_task_enable(void); 772extern int perf_event_task_enable(void);
773extern int hw_perf_group_sched_in(struct perf_event *group_leader, 773extern int hw_perf_group_sched_in(struct perf_event *group_leader,
774 struct perf_cpu_context *cpuctx, 774 struct perf_cpu_context *cpuctx,
775 struct perf_event_context *ctx, int cpu); 775 struct perf_event_context *ctx);
776extern void perf_event_update_userpage(struct perf_event *event); 776extern void perf_event_update_userpage(struct perf_event *event);
777extern int perf_event_release_kernel(struct perf_event *event); 777extern int perf_event_release_kernel(struct perf_event *event);
778extern struct perf_event * 778extern struct perf_event *
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index fb4e56eb58f4..05b6c6b825e3 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -103,7 +103,7 @@ void __weak hw_perf_event_setup_offline(int cpu) { barrier(); }
103int __weak 103int __weak
104hw_perf_group_sched_in(struct perf_event *group_leader, 104hw_perf_group_sched_in(struct perf_event *group_leader,
105 struct perf_cpu_context *cpuctx, 105 struct perf_cpu_context *cpuctx,
106 struct perf_event_context *ctx, int cpu) 106 struct perf_event_context *ctx)
107{ 107{
108 return 0; 108 return 0;
109} 109}
@@ -633,14 +633,13 @@ void perf_event_disable(struct perf_event *event)
633static int 633static int
634event_sched_in(struct perf_event *event, 634event_sched_in(struct perf_event *event,
635 struct perf_cpu_context *cpuctx, 635 struct perf_cpu_context *cpuctx,
636 struct perf_event_context *ctx, 636 struct perf_event_context *ctx)
637 int cpu)
638{ 637{
639 if (event->state <= PERF_EVENT_STATE_OFF) 638 if (event->state <= PERF_EVENT_STATE_OFF)
640 return 0; 639 return 0;
641 640
642 event->state = PERF_EVENT_STATE_ACTIVE; 641 event->state = PERF_EVENT_STATE_ACTIVE;
643 event->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */ 642 event->oncpu = smp_processor_id();
644 /* 643 /*
645 * The new state must be visible before we turn it on in the hardware: 644 * The new state must be visible before we turn it on in the hardware:
646 */ 645 */
@@ -667,8 +666,7 @@ event_sched_in(struct perf_event *event,
667static int 666static int
668group_sched_in(struct perf_event *group_event, 667group_sched_in(struct perf_event *group_event,
669 struct perf_cpu_context *cpuctx, 668 struct perf_cpu_context *cpuctx,
670 struct perf_event_context *ctx, 669 struct perf_event_context *ctx)
671 int cpu)
672{ 670{
673 struct perf_event *event, *partial_group; 671 struct perf_event *event, *partial_group;
674 int ret; 672 int ret;
@@ -676,18 +674,18 @@ group_sched_in(struct perf_event *group_event,
676 if (group_event->state == PERF_EVENT_STATE_OFF) 674 if (group_event->state == PERF_EVENT_STATE_OFF)
677 return 0; 675 return 0;
678 676
679 ret = hw_perf_group_sched_in(group_event, cpuctx, ctx, cpu); 677 ret = hw_perf_group_sched_in(group_event, cpuctx, ctx);
680 if (ret) 678 if (ret)
681 return ret < 0 ? ret : 0; 679 return ret < 0 ? ret : 0;
682 680
683 if (event_sched_in(group_event, cpuctx, ctx, cpu)) 681 if (event_sched_in(group_event, cpuctx, ctx))
684 return -EAGAIN; 682 return -EAGAIN;
685 683
686 /* 684 /*
687 * Schedule in siblings as one group (if any): 685 * Schedule in siblings as one group (if any):
688 */ 686 */
689 list_for_each_entry(event, &group_event->sibling_list, group_entry) { 687 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
690 if (event_sched_in(event, cpuctx, ctx, cpu)) { 688 if (event_sched_in(event, cpuctx, ctx)) {
691 partial_group = event; 689 partial_group = event;
692 goto group_error; 690 goto group_error;
693 } 691 }
@@ -761,7 +759,6 @@ static void __perf_install_in_context(void *info)
761 struct perf_event *event = info; 759 struct perf_event *event = info;
762 struct perf_event_context *ctx = event->ctx; 760 struct perf_event_context *ctx = event->ctx;
763 struct perf_event *leader = event->group_leader; 761 struct perf_event *leader = event->group_leader;
764 int cpu = smp_processor_id();
765 int err; 762 int err;
766 763
767 /* 764 /*
@@ -808,7 +805,7 @@ static void __perf_install_in_context(void *info)
808 if (!group_can_go_on(event, cpuctx, 1)) 805 if (!group_can_go_on(event, cpuctx, 1))
809 err = -EEXIST; 806 err = -EEXIST;
810 else 807 else
811 err = event_sched_in(event, cpuctx, ctx, cpu); 808 err = event_sched_in(event, cpuctx, ctx);
812 809
813 if (err) { 810 if (err) {
814 /* 811 /*
@@ -950,11 +947,9 @@ static void __perf_event_enable(void *info)
950 } else { 947 } else {
951 perf_disable(); 948 perf_disable();
952 if (event == leader) 949 if (event == leader)
953 err = group_sched_in(event, cpuctx, ctx, 950 err = group_sched_in(event, cpuctx, ctx);
954 smp_processor_id());
955 else 951 else
956 err = event_sched_in(event, cpuctx, ctx, 952 err = event_sched_in(event, cpuctx, ctx);
957 smp_processor_id());
958 perf_enable(); 953 perf_enable();
959 } 954 }
960 955
@@ -1281,19 +1276,18 @@ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
1281 1276
1282static void 1277static void
1283ctx_pinned_sched_in(struct perf_event_context *ctx, 1278ctx_pinned_sched_in(struct perf_event_context *ctx,
1284 struct perf_cpu_context *cpuctx, 1279 struct perf_cpu_context *cpuctx)
1285 int cpu)
1286{ 1280{
1287 struct perf_event *event; 1281 struct perf_event *event;
1288 1282
1289 list_for_each_entry(event, &ctx->pinned_groups, group_entry) { 1283 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1290 if (event->state <= PERF_EVENT_STATE_OFF) 1284 if (event->state <= PERF_EVENT_STATE_OFF)
1291 continue; 1285 continue;
1292 if (event->cpu != -1 && event->cpu != cpu) 1286 if (event->cpu != -1 && event->cpu != smp_processor_id())
1293 continue; 1287 continue;
1294 1288
1295 if (group_can_go_on(event, cpuctx, 1)) 1289 if (group_can_go_on(event, cpuctx, 1))
1296 group_sched_in(event, cpuctx, ctx, cpu); 1290 group_sched_in(event, cpuctx, ctx);
1297 1291
1298 /* 1292 /*
1299 * If this pinned group hasn't been scheduled, 1293 * If this pinned group hasn't been scheduled,
@@ -1308,8 +1302,7 @@ ctx_pinned_sched_in(struct perf_event_context *ctx,
1308 1302
1309static void 1303static void
1310ctx_flexible_sched_in(struct perf_event_context *ctx, 1304ctx_flexible_sched_in(struct perf_event_context *ctx,
1311 struct perf_cpu_context *cpuctx, 1305 struct perf_cpu_context *cpuctx)
1312 int cpu)
1313{ 1306{
1314 struct perf_event *event; 1307 struct perf_event *event;
1315 int can_add_hw = 1; 1308 int can_add_hw = 1;
@@ -1322,11 +1315,11 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
1322 * Listen to the 'cpu' scheduling filter constraint 1315 * Listen to the 'cpu' scheduling filter constraint
1323 * of events: 1316 * of events:
1324 */ 1317 */
1325 if (event->cpu != -1 && event->cpu != cpu) 1318 if (event->cpu != -1 && event->cpu != smp_processor_id())
1326 continue; 1319 continue;
1327 1320
1328 if (group_can_go_on(event, cpuctx, can_add_hw)) 1321 if (group_can_go_on(event, cpuctx, can_add_hw))
1329 if (group_sched_in(event, cpuctx, ctx, cpu)) 1322 if (group_sched_in(event, cpuctx, ctx))
1330 can_add_hw = 0; 1323 can_add_hw = 0;
1331 } 1324 }
1332} 1325}
@@ -1336,8 +1329,6 @@ ctx_sched_in(struct perf_event_context *ctx,
1336 struct perf_cpu_context *cpuctx, 1329 struct perf_cpu_context *cpuctx,
1337 enum event_type_t event_type) 1330 enum event_type_t event_type)
1338{ 1331{
1339 int cpu = smp_processor_id();
1340
1341 raw_spin_lock(&ctx->lock); 1332 raw_spin_lock(&ctx->lock);
1342 ctx->is_active = 1; 1333 ctx->is_active = 1;
1343 if (likely(!ctx->nr_events)) 1334 if (likely(!ctx->nr_events))
@@ -1352,11 +1343,11 @@ ctx_sched_in(struct perf_event_context *ctx,
1352 * in order to give them the best chance of going on. 1343 * in order to give them the best chance of going on.
1353 */ 1344 */
1354 if (event_type & EVENT_PINNED) 1345 if (event_type & EVENT_PINNED)
1355 ctx_pinned_sched_in(ctx, cpuctx, cpu); 1346 ctx_pinned_sched_in(ctx, cpuctx);
1356 1347
1357 /* Then walk through the lower prio flexible groups */ 1348 /* Then walk through the lower prio flexible groups */
1358 if (event_type & EVENT_FLEXIBLE) 1349 if (event_type & EVENT_FLEXIBLE)
1359 ctx_flexible_sched_in(ctx, cpuctx, cpu); 1350 ctx_flexible_sched_in(ctx, cpuctx);
1360 1351
1361 perf_enable(); 1352 perf_enable();
1362 out: 1353 out: