aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/perf_counter.c9
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c15
-rw-r--r--include/linux/perf_counter.h6
-rw-r--r--kernel/perf_counter.c9
4 files changed, 28 insertions, 11 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index 5e0bf399c433..4990ce2e5f08 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -767,6 +767,7 @@ static void power_pmu_unthrottle(struct perf_counter *counter)
767 perf_disable(); 767 perf_disable();
768 power_pmu_read(counter); 768 power_pmu_read(counter);
769 left = counter->hw.sample_period; 769 left = counter->hw.sample_period;
770 counter->hw.last_period = left;
770 val = 0; 771 val = 0;
771 if (left < 0x80000000L) 772 if (left < 0x80000000L)
772 val = 0x80000000L - left; 773 val = 0x80000000L - left;
@@ -937,7 +938,8 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
937 938
938 counter->hw.config = events[n]; 939 counter->hw.config = events[n];
939 counter->hw.counter_base = cflags[n]; 940 counter->hw.counter_base = cflags[n];
940 atomic64_set(&counter->hw.period_left, counter->hw.sample_period); 941 counter->hw.last_period = counter->hw.sample_period;
942 atomic64_set(&counter->hw.period_left, counter->hw.last_period);
941 943
942 /* 944 /*
943 * See if we need to reserve the PMU. 945 * See if we need to reserve the PMU.
@@ -1002,8 +1004,9 @@ static void record_and_restart(struct perf_counter *counter, long val,
1002 */ 1004 */
1003 if (record) { 1005 if (record) {
1004 struct perf_sample_data data = { 1006 struct perf_sample_data data = {
1005 .regs = regs, 1007 .regs = regs,
1006 .addr = 0, 1008 .addr = 0,
1009 .period = counter->hw.last_period,
1007 }; 1010 };
1008 1011
1009 if (counter->attr.sample_type & PERF_SAMPLE_ADDR) { 1012 if (counter->attr.sample_type & PERF_SAMPLE_ADDR) {
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 82a23d487f92..57ae1bec81be 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -698,6 +698,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
698 698
699 if (!hwc->sample_period) { 699 if (!hwc->sample_period) {
700 hwc->sample_period = x86_pmu.max_period; 700 hwc->sample_period = x86_pmu.max_period;
701 hwc->last_period = hwc->sample_period;
701 atomic64_set(&hwc->period_left, hwc->sample_period); 702 atomic64_set(&hwc->period_left, hwc->sample_period);
702 } 703 }
703 704
@@ -880,12 +881,14 @@ x86_perf_counter_set_period(struct perf_counter *counter,
880 if (unlikely(left <= -period)) { 881 if (unlikely(left <= -period)) {
881 left = period; 882 left = period;
882 atomic64_set(&hwc->period_left, left); 883 atomic64_set(&hwc->period_left, left);
884 hwc->last_period = period;
883 ret = 1; 885 ret = 1;
884 } 886 }
885 887
886 if (unlikely(left <= 0)) { 888 if (unlikely(left <= 0)) {
887 left += period; 889 left += period;
888 atomic64_set(&hwc->period_left, left); 890 atomic64_set(&hwc->period_left, left);
891 hwc->last_period = period;
889 ret = 1; 892 ret = 1;
890 } 893 }
891 /* 894 /*
@@ -1257,9 +1260,12 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
1257 if (val & (1ULL << (x86_pmu.counter_bits - 1))) 1260 if (val & (1ULL << (x86_pmu.counter_bits - 1)))
1258 continue; 1261 continue;
1259 1262
1260 /* counter overflow */ 1263 /*
1261 handled = 1; 1264 * counter overflow
1262 inc_irq_stat(apic_perf_irqs); 1265 */
1266 handled = 1;
1267 data.period = counter->hw.last_period;
1268
1263 if (!x86_perf_counter_set_period(counter, hwc, idx)) 1269 if (!x86_perf_counter_set_period(counter, hwc, idx))
1264 continue; 1270 continue;
1265 1271
@@ -1267,6 +1273,9 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
1267 amd_pmu_disable_counter(hwc, idx); 1273 amd_pmu_disable_counter(hwc, idx);
1268 } 1274 }
1269 1275
1276 if (handled)
1277 inc_irq_stat(apic_perf_irqs);
1278
1270 return handled; 1279 return handled;
1271} 1280}
1272 1281
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index d8c0eb480f9a..5b966472b458 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -366,6 +366,7 @@ struct hw_perf_counter {
366 }; 366 };
367 atomic64_t prev_count; 367 atomic64_t prev_count;
368 u64 sample_period; 368 u64 sample_period;
369 u64 last_period;
369 atomic64_t period_left; 370 atomic64_t period_left;
370 u64 interrupts; 371 u64 interrupts;
371 372
@@ -606,8 +607,9 @@ extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
606extern void perf_counter_update_userpage(struct perf_counter *counter); 607extern void perf_counter_update_userpage(struct perf_counter *counter);
607 608
608struct perf_sample_data { 609struct perf_sample_data {
609 struct pt_regs *regs; 610 struct pt_regs *regs;
610 u64 addr; 611 u64 addr;
612 u64 period;
611}; 613};
612 614
613extern int perf_counter_overflow(struct perf_counter *counter, int nmi, 615extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 4fe85e804f43..8b89b40bd0f0 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -2495,7 +2495,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2495 perf_output_put(&handle, cpu_entry); 2495 perf_output_put(&handle, cpu_entry);
2496 2496
2497 if (sample_type & PERF_SAMPLE_PERIOD) 2497 if (sample_type & PERF_SAMPLE_PERIOD)
2498 perf_output_put(&handle, counter->hw.sample_period); 2498 perf_output_put(&handle, data->period);
2499 2499
2500 /* 2500 /*
2501 * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult. 2501 * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
@@ -3040,11 +3040,13 @@ static void perf_swcounter_set_period(struct perf_counter *counter)
3040 if (unlikely(left <= -period)) { 3040 if (unlikely(left <= -period)) {
3041 left = period; 3041 left = period;
3042 atomic64_set(&hwc->period_left, left); 3042 atomic64_set(&hwc->period_left, left);
3043 hwc->last_period = period;
3043 } 3044 }
3044 3045
3045 if (unlikely(left <= 0)) { 3046 if (unlikely(left <= 0)) {
3046 left += period; 3047 left += period;
3047 atomic64_add(period, &hwc->period_left); 3048 atomic64_add(period, &hwc->period_left);
3049 hwc->last_period = period;
3048 } 3050 }
3049 3051
3050 atomic64_set(&hwc->prev_count, -left); 3052 atomic64_set(&hwc->prev_count, -left);
@@ -3086,8 +3088,9 @@ static void perf_swcounter_overflow(struct perf_counter *counter,
3086 int nmi, struct pt_regs *regs, u64 addr) 3088 int nmi, struct pt_regs *regs, u64 addr)
3087{ 3089{
3088 struct perf_sample_data data = { 3090 struct perf_sample_data data = {
3089 .regs = regs, 3091 .regs = regs,
3090 .addr = addr, 3092 .addr = addr,
3093 .period = counter->hw.last_period,
3091 }; 3094 };
3092 3095
3093 perf_swcounter_update(counter); 3096 perf_swcounter_update(counter);