diff options
author | Paul Mackerras <paulus@samba.org> | 2009-09-21 19:48:08 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-22 03:30:40 -0400 |
commit | a8f90e906783f1f815120eefe813b23cb396e9bd (patch) | |
tree | 2c1ff063ebed84906cd8950c5c0560e319dfda3f /arch | |
parent | 43c1266ce4dc06bfd236cec31e11e9ecd69c0bef (diff) |
perf_event, powerpc: Fix compilation after big perf_counter rename
This fixes two places in the powerpc perf_event (perf_counter) code
where 'list_entry' needs to be changed to 'group_entry', but were
missed in commit 65abc865 ("perf_counter: Rename list_entry ->
group_entry, counter_list -> group_list").
This also changes 'event' back to 'counter' in a couple of
contexts:
* Field and function names that deal with the limited-function
counters: it's really the hardware counters whose function is
limited, not the events that they count. Hence:
MAX_LIMITED_HWEVENTS -> MAX_LIMITED_HWCOUNTERS
limited_event -> limited_counter
freeze/thaw_limited_events -> freeze/thaw_limited_counters
* The machine-specific PMU description struct (struct power_pmu): this
renames 'n_event' back to 'n_counter' since it really describes how
many hardware counters the machine has. (Renaming this back avoids
a compile error in each of the machine-specific PMU back-ends where
they initialize their power_pmu struct.)
Signed-off-by: Paul Mackerras <paulus@samba.org>
Cc: linuxppc-dev@ozlabs.org
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <19128.4280.813369.589704@cargo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/include/asm/perf_event.h | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/perf_event.c | 38 |
2 files changed, 21 insertions, 21 deletions
diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h index 2499aaadaeb9..3288ce3997e0 100644 --- a/arch/powerpc/include/asm/perf_event.h +++ b/arch/powerpc/include/asm/perf_event.h | |||
@@ -14,7 +14,7 @@ | |||
14 | 14 | ||
15 | #define MAX_HWEVENTS 8 | 15 | #define MAX_HWEVENTS 8 |
16 | #define MAX_EVENT_ALTERNATIVES 8 | 16 | #define MAX_EVENT_ALTERNATIVES 8 |
17 | #define MAX_LIMITED_HWEVENTS 2 | 17 | #define MAX_LIMITED_HWCOUNTERS 2 |
18 | 18 | ||
19 | /* | 19 | /* |
20 | * This struct provides the constants and functions needed to | 20 | * This struct provides the constants and functions needed to |
@@ -22,7 +22,7 @@ | |||
22 | */ | 22 | */ |
23 | struct power_pmu { | 23 | struct power_pmu { |
24 | const char *name; | 24 | const char *name; |
25 | int n_event; | 25 | int n_counter; |
26 | int max_alternatives; | 26 | int max_alternatives; |
27 | unsigned long add_fields; | 27 | unsigned long add_fields; |
28 | unsigned long test_adder; | 28 | unsigned long test_adder; |
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 197b7d958796..bbcbae183e92 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
@@ -30,8 +30,8 @@ struct cpu_hw_events { | |||
30 | u64 events[MAX_HWEVENTS]; | 30 | u64 events[MAX_HWEVENTS]; |
31 | unsigned int flags[MAX_HWEVENTS]; | 31 | unsigned int flags[MAX_HWEVENTS]; |
32 | unsigned long mmcr[3]; | 32 | unsigned long mmcr[3]; |
33 | struct perf_event *limited_event[MAX_LIMITED_HWEVENTS]; | 33 | struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS]; |
34 | u8 limited_hwidx[MAX_LIMITED_HWEVENTS]; | 34 | u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS]; |
35 | u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; | 35 | u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; |
36 | unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; | 36 | unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; |
37 | unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; | 37 | unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES]; |
@@ -253,7 +253,7 @@ static int power_check_constraints(struct cpu_hw_events *cpuhw, | |||
253 | unsigned long addf = ppmu->add_fields; | 253 | unsigned long addf = ppmu->add_fields; |
254 | unsigned long tadd = ppmu->test_adder; | 254 | unsigned long tadd = ppmu->test_adder; |
255 | 255 | ||
256 | if (n_ev > ppmu->n_event) | 256 | if (n_ev > ppmu->n_counter) |
257 | return -1; | 257 | return -1; |
258 | 258 | ||
259 | /* First see if the events will go on as-is */ | 259 | /* First see if the events will go on as-is */ |
@@ -426,7 +426,7 @@ static int is_limited_pmc(int pmcnum) | |||
426 | && (pmcnum == 5 || pmcnum == 6); | 426 | && (pmcnum == 5 || pmcnum == 6); |
427 | } | 427 | } |
428 | 428 | ||
429 | static void freeze_limited_events(struct cpu_hw_events *cpuhw, | 429 | static void freeze_limited_counters(struct cpu_hw_events *cpuhw, |
430 | unsigned long pmc5, unsigned long pmc6) | 430 | unsigned long pmc5, unsigned long pmc6) |
431 | { | 431 | { |
432 | struct perf_event *event; | 432 | struct perf_event *event; |
@@ -434,7 +434,7 @@ static void freeze_limited_events(struct cpu_hw_events *cpuhw, | |||
434 | int i; | 434 | int i; |
435 | 435 | ||
436 | for (i = 0; i < cpuhw->n_limited; ++i) { | 436 | for (i = 0; i < cpuhw->n_limited; ++i) { |
437 | event = cpuhw->limited_event[i]; | 437 | event = cpuhw->limited_counter[i]; |
438 | if (!event->hw.idx) | 438 | if (!event->hw.idx) |
439 | continue; | 439 | continue; |
440 | val = (event->hw.idx == 5) ? pmc5 : pmc6; | 440 | val = (event->hw.idx == 5) ? pmc5 : pmc6; |
@@ -445,7 +445,7 @@ static void freeze_limited_events(struct cpu_hw_events *cpuhw, | |||
445 | } | 445 | } |
446 | } | 446 | } |
447 | 447 | ||
448 | static void thaw_limited_events(struct cpu_hw_events *cpuhw, | 448 | static void thaw_limited_counters(struct cpu_hw_events *cpuhw, |
449 | unsigned long pmc5, unsigned long pmc6) | 449 | unsigned long pmc5, unsigned long pmc6) |
450 | { | 450 | { |
451 | struct perf_event *event; | 451 | struct perf_event *event; |
@@ -453,7 +453,7 @@ static void thaw_limited_events(struct cpu_hw_events *cpuhw, | |||
453 | int i; | 453 | int i; |
454 | 454 | ||
455 | for (i = 0; i < cpuhw->n_limited; ++i) { | 455 | for (i = 0; i < cpuhw->n_limited; ++i) { |
456 | event = cpuhw->limited_event[i]; | 456 | event = cpuhw->limited_counter[i]; |
457 | event->hw.idx = cpuhw->limited_hwidx[i]; | 457 | event->hw.idx = cpuhw->limited_hwidx[i]; |
458 | val = (event->hw.idx == 5) ? pmc5 : pmc6; | 458 | val = (event->hw.idx == 5) ? pmc5 : pmc6; |
459 | atomic64_set(&event->hw.prev_count, val); | 459 | atomic64_set(&event->hw.prev_count, val); |
@@ -495,9 +495,9 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) | |||
495 | "i" (SPRN_PMC5), "i" (SPRN_PMC6)); | 495 | "i" (SPRN_PMC5), "i" (SPRN_PMC6)); |
496 | 496 | ||
497 | if (mmcr0 & MMCR0_FC) | 497 | if (mmcr0 & MMCR0_FC) |
498 | freeze_limited_events(cpuhw, pmc5, pmc6); | 498 | freeze_limited_counters(cpuhw, pmc5, pmc6); |
499 | else | 499 | else |
500 | thaw_limited_events(cpuhw, pmc5, pmc6); | 500 | thaw_limited_counters(cpuhw, pmc5, pmc6); |
501 | 501 | ||
502 | /* | 502 | /* |
503 | * Write the full MMCR0 including the event overflow interrupt | 503 | * Write the full MMCR0 including the event overflow interrupt |
@@ -653,7 +653,7 @@ void hw_perf_enable(void) | |||
653 | continue; | 653 | continue; |
654 | idx = hwc_index[i] + 1; | 654 | idx = hwc_index[i] + 1; |
655 | if (is_limited_pmc(idx)) { | 655 | if (is_limited_pmc(idx)) { |
656 | cpuhw->limited_event[n_lim] = event; | 656 | cpuhw->limited_counter[n_lim] = event; |
657 | cpuhw->limited_hwidx[n_lim] = idx; | 657 | cpuhw->limited_hwidx[n_lim] = idx; |
658 | ++n_lim; | 658 | ++n_lim; |
659 | continue; | 659 | continue; |
@@ -702,7 +702,7 @@ static int collect_events(struct perf_event *group, int max_count, | |||
702 | flags[n] = group->hw.event_base; | 702 | flags[n] = group->hw.event_base; |
703 | events[n++] = group->hw.config; | 703 | events[n++] = group->hw.config; |
704 | } | 704 | } |
705 | list_for_each_entry(event, &group->sibling_list, list_entry) { | 705 | list_for_each_entry(event, &group->sibling_list, group_entry) { |
706 | if (!is_software_event(event) && | 706 | if (!is_software_event(event) && |
707 | event->state != PERF_EVENT_STATE_OFF) { | 707 | event->state != PERF_EVENT_STATE_OFF) { |
708 | if (n >= max_count) | 708 | if (n >= max_count) |
@@ -742,7 +742,7 @@ int hw_perf_group_sched_in(struct perf_event *group_leader, | |||
742 | return 0; | 742 | return 0; |
743 | cpuhw = &__get_cpu_var(cpu_hw_events); | 743 | cpuhw = &__get_cpu_var(cpu_hw_events); |
744 | n0 = cpuhw->n_events; | 744 | n0 = cpuhw->n_events; |
745 | n = collect_events(group_leader, ppmu->n_event - n0, | 745 | n = collect_events(group_leader, ppmu->n_counter - n0, |
746 | &cpuhw->event[n0], &cpuhw->events[n0], | 746 | &cpuhw->event[n0], &cpuhw->events[n0], |
747 | &cpuhw->flags[n0]); | 747 | &cpuhw->flags[n0]); |
748 | if (n < 0) | 748 | if (n < 0) |
@@ -764,7 +764,7 @@ int hw_perf_group_sched_in(struct perf_event *group_leader, | |||
764 | cpuctx->active_oncpu += n; | 764 | cpuctx->active_oncpu += n; |
765 | n = 1; | 765 | n = 1; |
766 | event_sched_in(group_leader, cpu); | 766 | event_sched_in(group_leader, cpu); |
767 | list_for_each_entry(sub, &group_leader->sibling_list, list_entry) { | 767 | list_for_each_entry(sub, &group_leader->sibling_list, group_entry) { |
768 | if (sub->state != PERF_EVENT_STATE_OFF) { | 768 | if (sub->state != PERF_EVENT_STATE_OFF) { |
769 | event_sched_in(sub, cpu); | 769 | event_sched_in(sub, cpu); |
770 | ++n; | 770 | ++n; |
@@ -797,7 +797,7 @@ static int power_pmu_enable(struct perf_event *event) | |||
797 | */ | 797 | */ |
798 | cpuhw = &__get_cpu_var(cpu_hw_events); | 798 | cpuhw = &__get_cpu_var(cpu_hw_events); |
799 | n0 = cpuhw->n_events; | 799 | n0 = cpuhw->n_events; |
800 | if (n0 >= ppmu->n_event) | 800 | if (n0 >= ppmu->n_counter) |
801 | goto out; | 801 | goto out; |
802 | cpuhw->event[n0] = event; | 802 | cpuhw->event[n0] = event; |
803 | cpuhw->events[n0] = event->hw.config; | 803 | cpuhw->events[n0] = event->hw.config; |
@@ -848,11 +848,11 @@ static void power_pmu_disable(struct perf_event *event) | |||
848 | } | 848 | } |
849 | } | 849 | } |
850 | for (i = 0; i < cpuhw->n_limited; ++i) | 850 | for (i = 0; i < cpuhw->n_limited; ++i) |
851 | if (event == cpuhw->limited_event[i]) | 851 | if (event == cpuhw->limited_counter[i]) |
852 | break; | 852 | break; |
853 | if (i < cpuhw->n_limited) { | 853 | if (i < cpuhw->n_limited) { |
854 | while (++i < cpuhw->n_limited) { | 854 | while (++i < cpuhw->n_limited) { |
855 | cpuhw->limited_event[i-1] = cpuhw->limited_event[i]; | 855 | cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i]; |
856 | cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i]; | 856 | cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i]; |
857 | } | 857 | } |
858 | --cpuhw->n_limited; | 858 | --cpuhw->n_limited; |
@@ -1078,7 +1078,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
1078 | */ | 1078 | */ |
1079 | n = 0; | 1079 | n = 0; |
1080 | if (event->group_leader != event) { | 1080 | if (event->group_leader != event) { |
1081 | n = collect_events(event->group_leader, ppmu->n_event - 1, | 1081 | n = collect_events(event->group_leader, ppmu->n_counter - 1, |
1082 | ctrs, events, cflags); | 1082 | ctrs, events, cflags); |
1083 | if (n < 0) | 1083 | if (n < 0) |
1084 | return ERR_PTR(-EINVAL); | 1084 | return ERR_PTR(-EINVAL); |
@@ -1230,7 +1230,7 @@ static void perf_event_interrupt(struct pt_regs *regs) | |||
1230 | int nmi; | 1230 | int nmi; |
1231 | 1231 | ||
1232 | if (cpuhw->n_limited) | 1232 | if (cpuhw->n_limited) |
1233 | freeze_limited_events(cpuhw, mfspr(SPRN_PMC5), | 1233 | freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5), |
1234 | mfspr(SPRN_PMC6)); | 1234 | mfspr(SPRN_PMC6)); |
1235 | 1235 | ||
1236 | perf_read_regs(regs); | 1236 | perf_read_regs(regs); |
@@ -1260,7 +1260,7 @@ static void perf_event_interrupt(struct pt_regs *regs) | |||
1260 | * Any that we processed in the previous loop will not be negative. | 1260 | * Any that we processed in the previous loop will not be negative. |
1261 | */ | 1261 | */ |
1262 | if (!found) { | 1262 | if (!found) { |
1263 | for (i = 0; i < ppmu->n_event; ++i) { | 1263 | for (i = 0; i < ppmu->n_counter; ++i) { |
1264 | if (is_limited_pmc(i + 1)) | 1264 | if (is_limited_pmc(i + 1)) |
1265 | continue; | 1265 | continue; |
1266 | val = read_pmc(i + 1); | 1266 | val = read_pmc(i + 1); |