aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-02-22 17:19:24 -0500
committerIngo Molnar <mingo@kernel.org>2016-02-29 03:35:24 -0500
commit7162b8fea63061b6231bc5e8a0fed55167e71b4c (patch)
tree20b0d879581bac4dd39317e4ffe0a6b4a9585d15
parent512089d98457b7913d2e4762a44af52fbcd87470 (diff)
perf/x86/intel/rapl: Refactor the code some more
Split out code from init into seperate functions. Tidy up the code and get rid of pointless comments. I wish there would be comments for code which is not obvious.... Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andi Kleen <andi.kleen@intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Harish Chegondi <harish.chegondi@intel.com> Cc: Jacob Pan <jacob.jun.pan@linux.intel.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Kan Liang <kan.liang@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/r/20160222221012.588544679@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/events/intel/rapl.c61
1 files changed, 31 insertions, 30 deletions
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index f31e4b417adf..ba5043b12c3e 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -110,7 +110,7 @@ static ssize_t __rapl_##_var##_show(struct kobject *kobj, \
110static struct kobj_attribute format_attr_##_var = \ 110static struct kobj_attribute format_attr_##_var = \
111 __ATTR(_name, 0444, __rapl_##_var##_show, NULL) 111 __ATTR(_name, 0444, __rapl_##_var##_show, NULL)
112 112
113#define RAPL_CNTR_WIDTH 32 /* 32-bit rapl counters */ 113#define RAPL_CNTR_WIDTH 32
114 114
115#define RAPL_EVENT_ATTR_STR(_name, v, str) \ 115#define RAPL_EVENT_ATTR_STR(_name, v, str) \
116static struct perf_pmu_events_attr event_attr_##v = { \ 116static struct perf_pmu_events_attr event_attr_##v = { \
@@ -120,15 +120,16 @@ static struct perf_pmu_events_attr event_attr_##v = { \
120}; 120};
121 121
122struct rapl_pmu { 122struct rapl_pmu {
123 spinlock_t lock; 123 spinlock_t lock;
124 int n_active; /* number of active events */ 124 int n_active;
125 struct list_head active_list; 125 struct list_head active_list;
126 struct pmu *pmu; /* pointer to rapl_pmu_class */ 126 struct pmu *pmu;
127 ktime_t timer_interval; /* in ktime_t unit */ 127 ktime_t timer_interval;
128 struct hrtimer hrtimer; 128 struct hrtimer hrtimer;
129}; 129};
130 130
131static int rapl_hw_unit[NR_RAPL_DOMAINS] __read_mostly; /* 1/2^hw_unit Joule */ 131 /* 1/2^hw_unit Joule */
132static int rapl_hw_unit[NR_RAPL_DOMAINS] __read_mostly;
132static struct pmu rapl_pmu_class; 133static struct pmu rapl_pmu_class;
133static cpumask_t rapl_cpu_mask; 134static cpumask_t rapl_cpu_mask;
134static int rapl_cntr_mask; 135static int rapl_cntr_mask;
@@ -200,11 +201,6 @@ static void rapl_start_hrtimer(struct rapl_pmu *pmu)
200 HRTIMER_MODE_REL_PINNED); 201 HRTIMER_MODE_REL_PINNED);
201} 202}
202 203
203static void rapl_stop_hrtimer(struct rapl_pmu *pmu)
204{
205 hrtimer_cancel(&pmu->hrtimer);
206}
207
208static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer) 204static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
209{ 205{
210 struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); 206 struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
@@ -216,9 +212,8 @@ static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
216 212
217 spin_lock_irqsave(&pmu->lock, flags); 213 spin_lock_irqsave(&pmu->lock, flags);
218 214
219 list_for_each_entry(event, &pmu->active_list, active_entry) { 215 list_for_each_entry(event, &pmu->active_list, active_entry)
220 rapl_event_update(event); 216 rapl_event_update(event);
221 }
222 217
223 spin_unlock_irqrestore(&pmu->lock, flags); 218 spin_unlock_irqrestore(&pmu->lock, flags);
224 219
@@ -275,7 +270,7 @@ static void rapl_pmu_event_stop(struct perf_event *event, int mode)
275 WARN_ON_ONCE(pmu->n_active <= 0); 270 WARN_ON_ONCE(pmu->n_active <= 0);
276 pmu->n_active--; 271 pmu->n_active--;
277 if (pmu->n_active == 0) 272 if (pmu->n_active == 0)
278 rapl_stop_hrtimer(pmu); 273 hrtimer_cancel(&pmu->hrtimer);
279 274
280 list_del(&event->active_entry); 275 list_del(&event->active_entry);
281 276
@@ -542,7 +537,7 @@ static void rapl_cpu_exit(int cpu)
542 perf_pmu_migrate_context(pmu->pmu, cpu, target); 537 perf_pmu_migrate_context(pmu->pmu, cpu, target);
543 538
544 /* cancel overflow polling timer for CPU */ 539 /* cancel overflow polling timer for CPU */
545 rapl_stop_hrtimer(pmu); 540 hrtimer_cancel(&pmu->hrtimer);
546} 541}
547 542
548static void rapl_cpu_init(int cpu) 543static void rapl_cpu_init(int cpu)
@@ -698,6 +693,20 @@ static void __init rapl_advertise(void)
698 } 693 }
699} 694}
700 695
696static int __init rapl_prepare_cpus(void)
697{
698 unsigned int cpu;
699 int ret;
700
701 for_each_online_cpu(cpu) {
702 ret = rapl_cpu_prepare(cpu);
703 if (ret)
704 return ret;
705 rapl_cpu_init(cpu);
706 }
707 return 0;
708}
709
701static void __init cleanup_rapl_pmus(void) 710static void __init cleanup_rapl_pmus(void)
702{ 711{
703 int cpu; 712 int cpu;
@@ -706,7 +715,7 @@ static void __init cleanup_rapl_pmus(void)
706 kfree(per_cpu(rapl_pmu, cpu)); 715 kfree(per_cpu(rapl_pmu, cpu));
707} 716}
708 717
709static const struct x86_cpu_id rapl_cpu_match[] = { 718static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
710 [0] = { .vendor = X86_VENDOR_INTEL, .family = 6 }, 719 [0] = { .vendor = X86_VENDOR_INTEL, .family = 6 },
711 [1] = {}, 720 [1] = {},
712}; 721};
@@ -714,15 +723,11 @@ static const struct x86_cpu_id rapl_cpu_match[] = {
714static int __init rapl_pmu_init(void) 723static int __init rapl_pmu_init(void)
715{ 724{
716 void (*quirk)(void) = NULL; 725 void (*quirk)(void) = NULL;
717 int cpu, ret; 726 int ret;
718 727
719 /*
720 * check for Intel processor family 6
721 */
722 if (!x86_match_cpu(rapl_cpu_match)) 728 if (!x86_match_cpu(rapl_cpu_match))
723 return -ENODEV; 729 return -ENODEV;
724 730
725 /* check supported CPU */
726 switch (boot_cpu_data.x86_model) { 731 switch (boot_cpu_data.x86_model) {
727 case 42: /* Sandy Bridge */ 732 case 42: /* Sandy Bridge */
728 case 58: /* Ivy Bridge */ 733 case 58: /* Ivy Bridge */
@@ -751,7 +756,6 @@ static int __init rapl_pmu_init(void)
751 rapl_pmu_events_group.attrs = rapl_events_knl_attr; 756 rapl_pmu_events_group.attrs = rapl_events_knl_attr;
752 break; 757 break;
753 default: 758 default:
754 /* unsupported */
755 return -ENODEV; 759 return -ENODEV;
756 } 760 }
757 761
@@ -761,12 +765,9 @@ static int __init rapl_pmu_init(void)
761 765
762 cpu_notifier_register_begin(); 766 cpu_notifier_register_begin();
763 767
764 for_each_online_cpu(cpu) { 768 ret = rapl_prepare_cpus();
765 ret = rapl_cpu_prepare(cpu); 769 if (ret)
766 if (ret) 770 goto out;
767 goto out;
768 rapl_cpu_init(cpu);
769 }
770 771
771 ret = perf_pmu_register(&rapl_pmu_class, "power", -1); 772 ret = perf_pmu_register(&rapl_pmu_class, "power", -1);
772 if (ret) 773 if (ret)