diff options
Diffstat (limited to 'arch/powerpc/kernel/perf_counter.c')
-rw-r--r-- | arch/powerpc/kernel/perf_counter.c | 13 |
1 files changed, 6 insertions, 7 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index bb1b463c1361..db8d5cafc159 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c | |||
@@ -534,7 +534,7 @@ void hw_perf_enable(void) | |||
534 | continue; | 534 | continue; |
535 | } | 535 | } |
536 | val = 0; | 536 | val = 0; |
537 | if (counter->hw_event.irq_period) { | 537 | if (counter->hw.irq_period) { |
538 | left = atomic64_read(&counter->hw.period_left); | 538 | left = atomic64_read(&counter->hw.period_left); |
539 | if (left < 0x80000000L) | 539 | if (left < 0x80000000L) |
540 | val = 0x80000000L - left; | 540 | val = 0x80000000L - left; |
@@ -829,8 +829,6 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter) | |||
829 | 829 | ||
830 | if (!ppmu) | 830 | if (!ppmu) |
831 | return ERR_PTR(-ENXIO); | 831 | return ERR_PTR(-ENXIO); |
832 | if ((s64)counter->hw_event.irq_period < 0) | ||
833 | return ERR_PTR(-EINVAL); | ||
834 | if (!perf_event_raw(&counter->hw_event)) { | 832 | if (!perf_event_raw(&counter->hw_event)) { |
835 | ev = perf_event_id(&counter->hw_event); | 833 | ev = perf_event_id(&counter->hw_event); |
836 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) | 834 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) |
@@ -901,7 +899,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter) | |||
901 | 899 | ||
902 | counter->hw.config = events[n]; | 900 | counter->hw.config = events[n]; |
903 | counter->hw.counter_base = cflags[n]; | 901 | counter->hw.counter_base = cflags[n]; |
904 | atomic64_set(&counter->hw.period_left, counter->hw_event.irq_period); | 902 | atomic64_set(&counter->hw.period_left, counter->hw.irq_period); |
905 | 903 | ||
906 | /* | 904 | /* |
907 | * See if we need to reserve the PMU. | 905 | * See if we need to reserve the PMU. |
@@ -934,6 +932,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter) | |||
934 | static void record_and_restart(struct perf_counter *counter, long val, | 932 | static void record_and_restart(struct perf_counter *counter, long val, |
935 | struct pt_regs *regs, int nmi) | 933 | struct pt_regs *regs, int nmi) |
936 | { | 934 | { |
935 | u64 period = counter->hw.irq_period; | ||
937 | s64 prev, delta, left; | 936 | s64 prev, delta, left; |
938 | int record = 0; | 937 | int record = 0; |
939 | 938 | ||
@@ -948,11 +947,11 @@ static void record_and_restart(struct perf_counter *counter, long val, | |||
948 | */ | 947 | */ |
949 | val = 0; | 948 | val = 0; |
950 | left = atomic64_read(&counter->hw.period_left) - delta; | 949 | left = atomic64_read(&counter->hw.period_left) - delta; |
951 | if (counter->hw_event.irq_period) { | 950 | if (period) { |
952 | if (left <= 0) { | 951 | if (left <= 0) { |
953 | left += counter->hw_event.irq_period; | 952 | left += period; |
954 | if (left <= 0) | 953 | if (left <= 0) |
955 | left = counter->hw_event.irq_period; | 954 | left = period; |
956 | record = 1; | 955 | record = 1; |
957 | } | 956 | } |
958 | if (left < 0x80000000L) | 957 | if (left < 0x80000000L) |