aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorStephane Eranian <eranian@google.com>2014-02-11 10:20:07 -0500
committerThomas Gleixner <tglx@linutronix.de>2014-02-21 15:49:07 -0500
commit411cf180fa00521f9bfb1d022e3ebf059a2d299f (patch)
tree6aa5199b6d82b369fe16f6f8f72348d7ea33bcca /arch/x86
parentcd578abb24aa67ce468c427d3356c08ea32cf768 (diff)
perf/x86/uncore: fix initialization of cpumask
On certain processors, the uncore PMU boxes may only be msr-bsed or PCI-based. But in both cases, the cpumask, suggesting on which CPUs to monitor to get full coverage of the particular PMU, must be created. However with the current code base, the cpumask was only created on processor which had at least one MSR-based uncore PMU. This patch removes that restriction and ensures the cpumask is created even when there is no msr-based PMU. For instance, on SNB client where only a PCI-based memory controller PMU is supported. Cc: mingo@elte.hu Cc: acme@redhat.com Cc: ak@linux.intel.com Cc: zheng.z.yan@intel.com Cc: peterz@infradead.org Signed-off-by: Stephane Eranian <eranian@google.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1392132015-14521-2-git-send-email-eranian@google.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c61
1 files changed, 37 insertions, 24 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 29c248799ced..fe4255b9be55 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -3764,7 +3764,7 @@ static void __init uncore_cpu_setup(void *dummy)
3764 3764
3765static int __init uncore_cpu_init(void) 3765static int __init uncore_cpu_init(void)
3766{ 3766{
3767 int ret, cpu, max_cores; 3767 int ret, max_cores;
3768 3768
3769 max_cores = boot_cpu_data.x86_max_cores; 3769 max_cores = boot_cpu_data.x86_max_cores;
3770 switch (boot_cpu_data.x86_model) { 3770 switch (boot_cpu_data.x86_model) {
@@ -3808,29 +3808,6 @@ static int __init uncore_cpu_init(void)
3808 if (ret) 3808 if (ret)
3809 return ret; 3809 return ret;
3810 3810
3811 get_online_cpus();
3812
3813 for_each_online_cpu(cpu) {
3814 int i, phys_id = topology_physical_package_id(cpu);
3815
3816 for_each_cpu(i, &uncore_cpu_mask) {
3817 if (phys_id == topology_physical_package_id(i)) {
3818 phys_id = -1;
3819 break;
3820 }
3821 }
3822 if (phys_id < 0)
3823 continue;
3824
3825 uncore_cpu_prepare(cpu, phys_id);
3826 uncore_event_init_cpu(cpu);
3827 }
3828 on_each_cpu(uncore_cpu_setup, NULL, 1);
3829
3830 register_cpu_notifier(&uncore_cpu_nb);
3831
3832 put_online_cpus();
3833
3834 return 0; 3811 return 0;
3835} 3812}
3836 3813
@@ -3859,6 +3836,41 @@ static int __init uncore_pmus_register(void)
3859 return 0; 3836 return 0;
3860} 3837}
3861 3838
3839static void uncore_cpumask_init(void)
3840{
3841 int cpu;
3842
3843 /*
3844 * ony invoke once from msr or pci init code
3845 */
3846 if (!cpumask_empty(&uncore_cpu_mask))
3847 return;
3848
3849 get_online_cpus();
3850
3851 for_each_online_cpu(cpu) {
3852 int i, phys_id = topology_physical_package_id(cpu);
3853
3854 for_each_cpu(i, &uncore_cpu_mask) {
3855 if (phys_id == topology_physical_package_id(i)) {
3856 phys_id = -1;
3857 break;
3858 }
3859 }
3860 if (phys_id < 0)
3861 continue;
3862
3863 uncore_cpu_prepare(cpu, phys_id);
3864 uncore_event_init_cpu(cpu);
3865 }
3866 on_each_cpu(uncore_cpu_setup, NULL, 1);
3867
3868 register_cpu_notifier(&uncore_cpu_nb);
3869
3870 put_online_cpus();
3871}
3872
3873
3862static int __init intel_uncore_init(void) 3874static int __init intel_uncore_init(void)
3863{ 3875{
3864 int ret; 3876 int ret;
@@ -3877,6 +3889,7 @@ static int __init intel_uncore_init(void)
3877 uncore_pci_exit(); 3889 uncore_pci_exit();
3878 goto fail; 3890 goto fail;
3879 } 3891 }
3892 uncore_cpumask_init();
3880 3893
3881 uncore_pmus_register(); 3894 uncore_pmus_register();
3882 return 0; 3895 return 0;