diff options
Diffstat (limited to 'arch/arm/kernel/perf_event.c')
-rw-r--r-- | arch/arm/kernel/perf_event.c | 45 |
1 files changed, 34 insertions, 11 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 5bb91bf3d47f..b2abfa18f137 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -180,7 +180,7 @@ armpmu_event_set_period(struct perf_event *event, | |||
180 | u64 | 180 | u64 |
181 | armpmu_event_update(struct perf_event *event, | 181 | armpmu_event_update(struct perf_event *event, |
182 | struct hw_perf_event *hwc, | 182 | struct hw_perf_event *hwc, |
183 | int idx, int overflow) | 183 | int idx) |
184 | { | 184 | { |
185 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 185 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
186 | u64 delta, prev_raw_count, new_raw_count; | 186 | u64 delta, prev_raw_count, new_raw_count; |
@@ -193,13 +193,7 @@ again: | |||
193 | new_raw_count) != prev_raw_count) | 193 | new_raw_count) != prev_raw_count) |
194 | goto again; | 194 | goto again; |
195 | 195 | ||
196 | new_raw_count &= armpmu->max_period; | 196 | delta = (new_raw_count - prev_raw_count) & armpmu->max_period; |
197 | prev_raw_count &= armpmu->max_period; | ||
198 | |||
199 | if (overflow) | ||
200 | delta = armpmu->max_period - prev_raw_count + new_raw_count + 1; | ||
201 | else | ||
202 | delta = new_raw_count - prev_raw_count; | ||
203 | 197 | ||
204 | local64_add(delta, &event->count); | 198 | local64_add(delta, &event->count); |
205 | local64_sub(delta, &hwc->period_left); | 199 | local64_sub(delta, &hwc->period_left); |
@@ -216,7 +210,7 @@ armpmu_read(struct perf_event *event) | |||
216 | if (hwc->idx < 0) | 210 | if (hwc->idx < 0) |
217 | return; | 211 | return; |
218 | 212 | ||
219 | armpmu_event_update(event, hwc, hwc->idx, 0); | 213 | armpmu_event_update(event, hwc, hwc->idx); |
220 | } | 214 | } |
221 | 215 | ||
222 | static void | 216 | static void |
@@ -232,7 +226,7 @@ armpmu_stop(struct perf_event *event, int flags) | |||
232 | if (!(hwc->state & PERF_HES_STOPPED)) { | 226 | if (!(hwc->state & PERF_HES_STOPPED)) { |
233 | armpmu->disable(hwc, hwc->idx); | 227 | armpmu->disable(hwc, hwc->idx); |
234 | barrier(); /* why? */ | 228 | barrier(); /* why? */ |
235 | armpmu_event_update(event, hwc, hwc->idx, 0); | 229 | armpmu_event_update(event, hwc, hwc->idx); |
236 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | 230 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
237 | } | 231 | } |
238 | } | 232 | } |
@@ -518,7 +512,13 @@ __hw_perf_event_init(struct perf_event *event) | |||
518 | hwc->config_base |= (unsigned long)mapping; | 512 | hwc->config_base |= (unsigned long)mapping; |
519 | 513 | ||
520 | if (!hwc->sample_period) { | 514 | if (!hwc->sample_period) { |
521 | hwc->sample_period = armpmu->max_period; | 515 | /* |
516 | * For non-sampling runs, limit the sample_period to half | ||
517 | * of the counter width. That way, the new counter value | ||
518 | * is far less likely to overtake the previous one unless | ||
519 | * you have some serious IRQ latency issues. | ||
520 | */ | ||
521 | hwc->sample_period = armpmu->max_period >> 1; | ||
522 | hwc->last_period = hwc->sample_period; | 522 | hwc->last_period = hwc->sample_period; |
523 | local64_set(&hwc->period_left, hwc->sample_period); | 523 | local64_set(&hwc->period_left, hwc->sample_period); |
524 | } | 524 | } |
@@ -680,6 +680,28 @@ static void __init cpu_pmu_init(struct arm_pmu *armpmu) | |||
680 | } | 680 | } |
681 | 681 | ||
682 | /* | 682 | /* |
683 | * PMU hardware loses all context when a CPU goes offline. | ||
684 | * When a CPU is hotplugged back in, since some hardware registers are | ||
685 | * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading | ||
686 | * junk values out of them. | ||
687 | */ | ||
688 | static int __cpuinit pmu_cpu_notify(struct notifier_block *b, | ||
689 | unsigned long action, void *hcpu) | ||
690 | { | ||
691 | if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) | ||
692 | return NOTIFY_DONE; | ||
693 | |||
694 | if (cpu_pmu && cpu_pmu->reset) | ||
695 | cpu_pmu->reset(NULL); | ||
696 | |||
697 | return NOTIFY_OK; | ||
698 | } | ||
699 | |||
700 | static struct notifier_block __cpuinitdata pmu_cpu_notifier = { | ||
701 | .notifier_call = pmu_cpu_notify, | ||
702 | }; | ||
703 | |||
704 | /* | ||
683 | * CPU PMU identification and registration. | 705 | * CPU PMU identification and registration. |
684 | */ | 706 | */ |
685 | static int __init | 707 | static int __init |
@@ -730,6 +752,7 @@ init_hw_perf_events(void) | |||
730 | pr_info("enabled with %s PMU driver, %d counters available\n", | 752 | pr_info("enabled with %s PMU driver, %d counters available\n", |
731 | cpu_pmu->name, cpu_pmu->num_events); | 753 | cpu_pmu->name, cpu_pmu->num_events); |
732 | cpu_pmu_init(cpu_pmu); | 754 | cpu_pmu_init(cpu_pmu); |
755 | register_cpu_notifier(&pmu_cpu_notifier); | ||
733 | armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW); | 756 | armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW); |
734 | } else { | 757 | } else { |
735 | pr_info("no hardware support available\n"); | 758 | pr_info("no hardware support available\n"); |