aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2017-01-31 17:58:39 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-02-09 02:08:29 -0500
commitbebb9d75e84d38c7a10fcd4619fb1554a46f4715 (patch)
tree6427b181860a37b05c4d2998590c671ec8eabadb
parente6bd712154aaf121bff1791431854886f5983712 (diff)
perf/x86/intel/uncore: Clean up hotplug conversion fallout
commit 1aa6cfd33df492939b0be15ebdbcff1f8ae5ddb6 upstream. The recent conversion to the hotplug state machine kept two mechanisms from the original code: 1) The first_init logic which adds the number of online CPUs in a package to the refcount. That's wrong because the callbacks are executed for all online CPUs. Remove it so the refcounting is correct. 2) The on_each_cpu() call to undo box->init() in the error handling path. That's bogus because when the prepare callback fails no box has been initialized yet. Remove it. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Sebastian Siewior <bigeasy@linutronix.de> Cc: Stephane Eranian <eranian@google.com> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: Yasuaki Ishimatsu <yasu.isimatu@gmail.com> Fixes: 1a246b9f58c6 ("perf/x86/intel/uncore: Convert to hotplug state machine") Link: http://lkml.kernel.org/r/20170131230141.298032324@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--arch/x86/events/intel/uncore.c44
1 files changed, 4 insertions, 40 deletions
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index dbaaf7dc8373..19d646a783fd 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -763,30 +763,6 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
763 pmu->registered = false; 763 pmu->registered = false;
764} 764}
765 765
766static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
767{
768 struct intel_uncore_pmu *pmu = type->pmus;
769 struct intel_uncore_box *box;
770 int i, pkg;
771
772 if (pmu) {
773 pkg = topology_physical_package_id(cpu);
774 for (i = 0; i < type->num_boxes; i++, pmu++) {
775 box = pmu->boxes[pkg];
776 if (box)
777 uncore_box_exit(box);
778 }
779 }
780}
781
782static void uncore_exit_boxes(void *dummy)
783{
784 struct intel_uncore_type **types;
785
786 for (types = uncore_msr_uncores; *types; types++)
787 __uncore_exit_boxes(*types++, smp_processor_id());
788}
789
790static void uncore_free_boxes(struct intel_uncore_pmu *pmu) 766static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
791{ 767{
792 int pkg; 768 int pkg;
@@ -1077,22 +1053,12 @@ static int uncore_cpu_dying(unsigned int cpu)
1077 return 0; 1053 return 0;
1078} 1054}
1079 1055
1080static int first_init;
1081
1082static int uncore_cpu_starting(unsigned int cpu) 1056static int uncore_cpu_starting(unsigned int cpu)
1083{ 1057{
1084 struct intel_uncore_type *type, **types = uncore_msr_uncores; 1058 struct intel_uncore_type *type, **types = uncore_msr_uncores;
1085 struct intel_uncore_pmu *pmu; 1059 struct intel_uncore_pmu *pmu;
1086 struct intel_uncore_box *box; 1060 struct intel_uncore_box *box;
1087 int i, pkg, ncpus = 1; 1061 int i, pkg;
1088
1089 if (first_init) {
1090 /*
1091 * On init we get the number of online cpus in the package
1092 * and set refcount for all of them.
1093 */
1094 ncpus = cpumask_weight(topology_core_cpumask(cpu));
1095 }
1096 1062
1097 pkg = topology_logical_package_id(cpu); 1063 pkg = topology_logical_package_id(cpu);
1098 for (; *types; types++) { 1064 for (; *types; types++) {
@@ -1103,7 +1069,7 @@ static int uncore_cpu_starting(unsigned int cpu)
1103 if (!box) 1069 if (!box)
1104 continue; 1070 continue;
1105 /* The first cpu on a package activates the box */ 1071 /* The first cpu on a package activates the box */
1106 if (atomic_add_return(ncpus, &box->refcnt) == ncpus) 1072 if (atomic_inc_return(&box->refcnt) == 1)
1107 uncore_box_init(box); 1073 uncore_box_init(box);
1108 } 1074 }
1109 } 1075 }
@@ -1407,19 +1373,17 @@ static int __init intel_uncore_init(void)
1407 "PERF_X86_UNCORE_PREP", 1373 "PERF_X86_UNCORE_PREP",
1408 uncore_cpu_prepare, NULL); 1374 uncore_cpu_prepare, NULL);
1409 } 1375 }
1410 first_init = 1; 1376
1411 cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING, 1377 cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
1412 "AP_PERF_X86_UNCORE_STARTING", 1378 "AP_PERF_X86_UNCORE_STARTING",
1413 uncore_cpu_starting, uncore_cpu_dying); 1379 uncore_cpu_starting, uncore_cpu_dying);
1414 first_init = 0; 1380
1415 cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE, 1381 cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
1416 "AP_PERF_X86_UNCORE_ONLINE", 1382 "AP_PERF_X86_UNCORE_ONLINE",
1417 uncore_event_cpu_online, uncore_event_cpu_offline); 1383 uncore_event_cpu_online, uncore_event_cpu_offline);
1418 return 0; 1384 return 0;
1419 1385
1420err: 1386err:
1421 /* Undo box->init_box() */
1422 on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
1423 uncore_types_exit(uncore_msr_uncores); 1387 uncore_types_exit(uncore_msr_uncores);
1424 uncore_pci_exit(); 1388 uncore_pci_exit();
1425 return ret; 1389 return ret;