aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/perf_counter.c12
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c8
2 files changed, 10 insertions, 10 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index f96d55f55bd6..c9633321e7a5 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -535,7 +535,7 @@ void hw_perf_enable(void)
535 continue; 535 continue;
536 } 536 }
537 val = 0; 537 val = 0;
538 if (counter->hw.irq_period) { 538 if (counter->hw.sample_period) {
539 left = atomic64_read(&counter->hw.period_left); 539 left = atomic64_read(&counter->hw.period_left);
540 if (left < 0x80000000L) 540 if (left < 0x80000000L)
541 val = 0x80000000L - left; 541 val = 0x80000000L - left;
@@ -749,12 +749,12 @@ static void power_pmu_unthrottle(struct perf_counter *counter)
749 s64 val, left; 749 s64 val, left;
750 unsigned long flags; 750 unsigned long flags;
751 751
752 if (!counter->hw.idx || !counter->hw.irq_period) 752 if (!counter->hw.idx || !counter->hw.sample_period)
753 return; 753 return;
754 local_irq_save(flags); 754 local_irq_save(flags);
755 perf_disable(); 755 perf_disable();
756 power_pmu_read(counter); 756 power_pmu_read(counter);
757 left = counter->hw.irq_period; 757 left = counter->hw.sample_period;
758 val = 0; 758 val = 0;
759 if (left < 0x80000000L) 759 if (left < 0x80000000L)
760 val = 0x80000000L - left; 760 val = 0x80000000L - left;
@@ -789,7 +789,7 @@ static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev,
789 if (counter->hw_event.exclude_user 789 if (counter->hw_event.exclude_user
790 || counter->hw_event.exclude_kernel 790 || counter->hw_event.exclude_kernel
791 || counter->hw_event.exclude_hv 791 || counter->hw_event.exclude_hv
792 || counter->hw_event.irq_period) 792 || counter->hw_event.sample_period)
793 return 0; 793 return 0;
794 794
795 if (ppmu->limited_pmc_event(ev)) 795 if (ppmu->limited_pmc_event(ev))
@@ -925,7 +925,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
925 925
926 counter->hw.config = events[n]; 926 counter->hw.config = events[n];
927 counter->hw.counter_base = cflags[n]; 927 counter->hw.counter_base = cflags[n];
928 atomic64_set(&counter->hw.period_left, counter->hw.irq_period); 928 atomic64_set(&counter->hw.period_left, counter->hw.sample_period);
929 929
930 /* 930 /*
931 * See if we need to reserve the PMU. 931 * See if we need to reserve the PMU.
@@ -958,7 +958,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
958static void record_and_restart(struct perf_counter *counter, long val, 958static void record_and_restart(struct perf_counter *counter, long val,
959 struct pt_regs *regs, int nmi) 959 struct pt_regs *regs, int nmi)
960{ 960{
961 u64 period = counter->hw.irq_period; 961 u64 period = counter->hw.sample_period;
962 s64 prev, delta, left; 962 s64 prev, delta, left;
963 int record = 0; 963 int record = 0;
964 u64 addr, mmcra, sdsync; 964 u64 addr, mmcra, sdsync;
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 316b0c995f38..ec06aa5e9282 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -290,11 +290,11 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
290 hwc->nmi = 1; 290 hwc->nmi = 1;
291 hw_event->nmi = 1; 291 hw_event->nmi = 1;
292 292
293 if (!hwc->irq_period) 293 if (!hwc->sample_period)
294 hwc->irq_period = x86_pmu.max_period; 294 hwc->sample_period = x86_pmu.max_period;
295 295
296 atomic64_set(&hwc->period_left, 296 atomic64_set(&hwc->period_left,
297 min(x86_pmu.max_period, hwc->irq_period)); 297 min(x86_pmu.max_period, hwc->sample_period));
298 298
299 /* 299 /*
300 * Raw event type provide the config in the event structure 300 * Raw event type provide the config in the event structure
@@ -462,7 +462,7 @@ x86_perf_counter_set_period(struct perf_counter *counter,
462 struct hw_perf_counter *hwc, int idx) 462 struct hw_perf_counter *hwc, int idx)
463{ 463{
464 s64 left = atomic64_read(&hwc->period_left); 464 s64 left = atomic64_read(&hwc->period_left);
465 s64 period = min(x86_pmu.max_period, hwc->irq_period); 465 s64 period = min(x86_pmu.max_period, hwc->sample_period);
466 int err; 466 int err;
467 467
468 /* 468 /*