aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2010-02-11 07:21:58 -0500
committerIngo Molnar <mingo@elte.hu>2010-02-26 04:56:53 -0500
commit6e37738a2fac964583debe91099bc3248554f6e5 (patch)
tree1f1bd19fcfcd328be139a796e2016877814436ad /arch
parent38331f62c20456454eed9ebea2525f072c6f1d2e (diff)
perf_events: Simplify code by removing cpu argument to hw_perf_group_sched_in()
Since the cpu argument to hw_perf_group_sched_in() is always smp_processor_id(), simplify the code a little by removing this argument and using the current cpu where needed. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: David Miller <davem@davemloft.net> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <1265890918.5396.3.camel@laptop> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/perf_event.c10
-rw-r--r--arch/sparc/kernel/perf_event.c10
-rw-r--r--arch/x86/kernel/cpu/perf_event.c18
3 files changed, 19 insertions, 19 deletions
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index 1eb85fbf53a5..b6cf8f1f4d35 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -718,10 +718,10 @@ static int collect_events(struct perf_event *group, int max_count,
718 return n; 718 return n;
719} 719}
720 720
721static void event_sched_in(struct perf_event *event, int cpu) 721static void event_sched_in(struct perf_event *event)
722{ 722{
723 event->state = PERF_EVENT_STATE_ACTIVE; 723 event->state = PERF_EVENT_STATE_ACTIVE;
724 event->oncpu = cpu; 724 event->oncpu = smp_processor_id();
725 event->tstamp_running += event->ctx->time - event->tstamp_stopped; 725 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
726 if (is_software_event(event)) 726 if (is_software_event(event))
727 event->pmu->enable(event); 727 event->pmu->enable(event);
@@ -735,7 +735,7 @@ static void event_sched_in(struct perf_event *event, int cpu)
735 */ 735 */
736int hw_perf_group_sched_in(struct perf_event *group_leader, 736int hw_perf_group_sched_in(struct perf_event *group_leader,
737 struct perf_cpu_context *cpuctx, 737 struct perf_cpu_context *cpuctx,
738 struct perf_event_context *ctx, int cpu) 738 struct perf_event_context *ctx)
739{ 739{
740 struct cpu_hw_events *cpuhw; 740 struct cpu_hw_events *cpuhw;
741 long i, n, n0; 741 long i, n, n0;
@@ -766,10 +766,10 @@ int hw_perf_group_sched_in(struct perf_event *group_leader,
766 cpuhw->event[i]->hw.config = cpuhw->events[i]; 766 cpuhw->event[i]->hw.config = cpuhw->events[i];
767 cpuctx->active_oncpu += n; 767 cpuctx->active_oncpu += n;
768 n = 1; 768 n = 1;
769 event_sched_in(group_leader, cpu); 769 event_sched_in(group_leader);
770 list_for_each_entry(sub, &group_leader->sibling_list, group_entry) { 770 list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
771 if (sub->state != PERF_EVENT_STATE_OFF) { 771 if (sub->state != PERF_EVENT_STATE_OFF) {
772 event_sched_in(sub, cpu); 772 event_sched_in(sub);
773 ++n; 773 ++n;
774 } 774 }
775 } 775 }
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index e856456ec02f..9f2b2bac8b2b 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -980,10 +980,10 @@ static int collect_events(struct perf_event *group, int max_count,
980 return n; 980 return n;
981} 981}
982 982
983static void event_sched_in(struct perf_event *event, int cpu) 983static void event_sched_in(struct perf_event *event)
984{ 984{
985 event->state = PERF_EVENT_STATE_ACTIVE; 985 event->state = PERF_EVENT_STATE_ACTIVE;
986 event->oncpu = cpu; 986 event->oncpu = smp_processor_id();
987 event->tstamp_running += event->ctx->time - event->tstamp_stopped; 987 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
988 if (is_software_event(event)) 988 if (is_software_event(event))
989 event->pmu->enable(event); 989 event->pmu->enable(event);
@@ -991,7 +991,7 @@ static void event_sched_in(struct perf_event *event, int cpu)
991 991
992int hw_perf_group_sched_in(struct perf_event *group_leader, 992int hw_perf_group_sched_in(struct perf_event *group_leader,
993 struct perf_cpu_context *cpuctx, 993 struct perf_cpu_context *cpuctx,
994 struct perf_event_context *ctx, int cpu) 994 struct perf_event_context *ctx)
995{ 995{
996 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 996 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
997 struct perf_event *sub; 997 struct perf_event *sub;
@@ -1015,10 +1015,10 @@ int hw_perf_group_sched_in(struct perf_event *group_leader,
1015 1015
1016 cpuctx->active_oncpu += n; 1016 cpuctx->active_oncpu += n;
1017 n = 1; 1017 n = 1;
1018 event_sched_in(group_leader, cpu); 1018 event_sched_in(group_leader);
1019 list_for_each_entry(sub, &group_leader->sibling_list, group_entry) { 1019 list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
1020 if (sub->state != PERF_EVENT_STATE_OFF) { 1020 if (sub->state != PERF_EVENT_STATE_OFF) {
1021 event_sched_in(sub, cpu); 1021 event_sched_in(sub);
1022 n++; 1022 n++;
1023 } 1023 }
1024 } 1024 }
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index aa12f36e4711..ad096562d694 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -2403,12 +2403,12 @@ done:
2403} 2403}
2404 2404
2405static int x86_event_sched_in(struct perf_event *event, 2405static int x86_event_sched_in(struct perf_event *event,
2406 struct perf_cpu_context *cpuctx, int cpu) 2406 struct perf_cpu_context *cpuctx)
2407{ 2407{
2408 int ret = 0; 2408 int ret = 0;
2409 2409
2410 event->state = PERF_EVENT_STATE_ACTIVE; 2410 event->state = PERF_EVENT_STATE_ACTIVE;
2411 event->oncpu = cpu; 2411 event->oncpu = smp_processor_id();
2412 event->tstamp_running += event->ctx->time - event->tstamp_stopped; 2412 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
2413 2413
2414 if (!is_x86_event(event)) 2414 if (!is_x86_event(event))
@@ -2424,7 +2424,7 @@ static int x86_event_sched_in(struct perf_event *event,
2424} 2424}
2425 2425
2426static void x86_event_sched_out(struct perf_event *event, 2426static void x86_event_sched_out(struct perf_event *event,
2427 struct perf_cpu_context *cpuctx, int cpu) 2427 struct perf_cpu_context *cpuctx)
2428{ 2428{
2429 event->state = PERF_EVENT_STATE_INACTIVE; 2429 event->state = PERF_EVENT_STATE_INACTIVE;
2430 event->oncpu = -1; 2430 event->oncpu = -1;
@@ -2452,9 +2452,9 @@ static void x86_event_sched_out(struct perf_event *event,
2452 */ 2452 */
2453int hw_perf_group_sched_in(struct perf_event *leader, 2453int hw_perf_group_sched_in(struct perf_event *leader,
2454 struct perf_cpu_context *cpuctx, 2454 struct perf_cpu_context *cpuctx,
2455 struct perf_event_context *ctx, int cpu) 2455 struct perf_event_context *ctx)
2456{ 2456{
2457 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 2457 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
2458 struct perf_event *sub; 2458 struct perf_event *sub;
2459 int assign[X86_PMC_IDX_MAX]; 2459 int assign[X86_PMC_IDX_MAX];
2460 int n0, n1, ret; 2460 int n0, n1, ret;
@@ -2468,14 +2468,14 @@ int hw_perf_group_sched_in(struct perf_event *leader,
2468 if (ret) 2468 if (ret)
2469 return ret; 2469 return ret;
2470 2470
2471 ret = x86_event_sched_in(leader, cpuctx, cpu); 2471 ret = x86_event_sched_in(leader, cpuctx);
2472 if (ret) 2472 if (ret)
2473 return ret; 2473 return ret;
2474 2474
2475 n1 = 1; 2475 n1 = 1;
2476 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 2476 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2477 if (sub->state > PERF_EVENT_STATE_OFF) { 2477 if (sub->state > PERF_EVENT_STATE_OFF) {
2478 ret = x86_event_sched_in(sub, cpuctx, cpu); 2478 ret = x86_event_sched_in(sub, cpuctx);
2479 if (ret) 2479 if (ret)
2480 goto undo; 2480 goto undo;
2481 ++n1; 2481 ++n1;
@@ -2500,11 +2500,11 @@ int hw_perf_group_sched_in(struct perf_event *leader,
2500 */ 2500 */
2501 return 1; 2501 return 1;
2502undo: 2502undo:
2503 x86_event_sched_out(leader, cpuctx, cpu); 2503 x86_event_sched_out(leader, cpuctx);
2504 n0 = 1; 2504 n0 = 1;
2505 list_for_each_entry(sub, &leader->sibling_list, group_entry) { 2505 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2506 if (sub->state == PERF_EVENT_STATE_ACTIVE) { 2506 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
2507 x86_event_sched_out(sub, cpuctx, cpu); 2507 x86_event_sched_out(sub, cpuctx);
2508 if (++n0 == n1) 2508 if (++n0 == n1)
2509 break; 2509 break;
2510 } 2510 }