summaryrefslogtreecommitdiffstats
path: root/drivers/perf
diff options
context:
space:
mode:
authorLorenzo Pieralisi <lorenzo.pieralisi@arm.com>2016-02-23 13:22:39 -0500
committerWill Deacon <will.deacon@arm.com>2016-02-26 09:37:06 -0500
commitda4e4f18afe0f3729d68f3785c5802f786d36e34 (patch)
tree79bea24abaddb8f5c7ed32b763dbb182a5e072bb /drivers/perf
parent94085fe570e7b87597d4695e6fa77d4256efd29e (diff)
drivers/perf: arm_pmu: implement CPU_PM notifier
When a CPU is suspended (either through suspend-to-RAM or CPUidle), its PMU registers content can be lost, which means that counters registers values that were initialized on power down entry have to be reprogrammed on power-up to make sure the counters set-up is preserved (ie on power-up registers take the reset values on Cold or Warm reset, which can be architecturally UNKNOWN). To guarantee seamless profiling conditions across a core power down this patch adds a CPU PM notifier to ARM pmus, that upon CPU PM entry/exit from low-power states saves/restores the pmu registers set-up (by using the ARM perf API), so that the power-down/up cycle does not affect the perf behaviour (apart from a black-out period between power-up/down CPU PM notifications that is unavoidable). Cc: Will Deacon <will.deacon@arm.com> Cc: Sudeep Holla <sudeep.holla@arm.com> Cc: Daniel Lezcano <daniel.lezcano@linaro.org> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Acked-by: Ashwin Chaugule <ashwin.chaugule@linaro.org> Acked-by: Kevin Hilman <khilman@baylibre.com> Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'drivers/perf')
-rw-r--r--drivers/perf/arm_pmu.c95
1 files changed, 95 insertions, 0 deletions
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index ca63a452393a..11bacc7220a1 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/bitmap.h> 14#include <linux/bitmap.h>
15#include <linux/cpumask.h> 15#include <linux/cpumask.h>
16#include <linux/cpu_pm.h>
16#include <linux/export.h> 17#include <linux/export.h>
17#include <linux/kernel.h> 18#include <linux/kernel.h>
18#include <linux/of_device.h> 19#include <linux/of_device.h>
@@ -710,6 +711,93 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
710 return NOTIFY_OK; 711 return NOTIFY_OK;
711} 712}
712 713
714#ifdef CONFIG_CPU_PM
715static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
716{
717 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
718 struct perf_event *event;
719 int idx;
720
721 for (idx = 0; idx < armpmu->num_events; idx++) {
722 /*
723 * If the counter is not used skip it, there is no
724 * need of stopping/restarting it.
725 */
726 if (!test_bit(idx, hw_events->used_mask))
727 continue;
728
729 event = hw_events->events[idx];
730
731 switch (cmd) {
732 case CPU_PM_ENTER:
733 /*
734 * Stop and update the counter
735 */
736 armpmu_stop(event, PERF_EF_UPDATE);
737 break;
738 case CPU_PM_EXIT:
739 case CPU_PM_ENTER_FAILED:
740 /* Restore and enable the counter */
741 armpmu_start(event, PERF_EF_RELOAD);
742 break;
743 default:
744 break;
745 }
746 }
747}
748
749static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
750 void *v)
751{
752 struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
753 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
754 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
755
756 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
757 return NOTIFY_DONE;
758
759 /*
760 * Always reset the PMU registers on power-up even if
761 * there are no events running.
762 */
763 if (cmd == CPU_PM_EXIT && armpmu->reset)
764 armpmu->reset(armpmu);
765
766 if (!enabled)
767 return NOTIFY_OK;
768
769 switch (cmd) {
770 case CPU_PM_ENTER:
771 armpmu->stop(armpmu);
772 cpu_pm_pmu_setup(armpmu, cmd);
773 break;
774 case CPU_PM_EXIT:
775 cpu_pm_pmu_setup(armpmu, cmd);
776 case CPU_PM_ENTER_FAILED:
777 armpmu->start(armpmu);
778 break;
779 default:
780 return NOTIFY_DONE;
781 }
782
783 return NOTIFY_OK;
784}
785
786static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
787{
788 cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify;
789 return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb);
790}
791
792static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
793{
794 cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
795}
796#else
797static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
798static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
799#endif
800
713static int cpu_pmu_init(struct arm_pmu *cpu_pmu) 801static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
714{ 802{
715 int err; 803 int err;
@@ -725,6 +813,10 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
725 if (err) 813 if (err)
726 goto out_hw_events; 814 goto out_hw_events;
727 815
816 err = cpu_pm_pmu_register(cpu_pmu);
817 if (err)
818 goto out_unregister;
819
728 for_each_possible_cpu(cpu) { 820 for_each_possible_cpu(cpu) {
729 struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu); 821 struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
730 raw_spin_lock_init(&events->pmu_lock); 822 raw_spin_lock_init(&events->pmu_lock);
@@ -746,6 +838,8 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
746 838
747 return 0; 839 return 0;
748 840
841out_unregister:
842 unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
749out_hw_events: 843out_hw_events:
750 free_percpu(cpu_hw_events); 844 free_percpu(cpu_hw_events);
751 return err; 845 return err;
@@ -753,6 +847,7 @@ out_hw_events:
753 847
754static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) 848static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
755{ 849{
850 cpu_pm_pmu_unregister(cpu_pmu);
756 unregister_cpu_notifier(&cpu_pmu->hotplug_nb); 851 unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
757 free_percpu(cpu_pmu->hw_events); 852 free_percpu(cpu_pmu->hw_events);
758} 853}