diff options
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 18 |
2 files changed, 13 insertions, 6 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 4d6ce5d612da..26604188aa49 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <asm/stacktrace.h> | 30 | #include <asm/stacktrace.h> |
31 | #include <asm/nmi.h> | 31 | #include <asm/nmi.h> |
32 | #include <asm/compat.h> | 32 | #include <asm/compat.h> |
33 | #include <asm/smp.h> | ||
33 | 34 | ||
34 | #if 0 | 35 | #if 0 |
35 | #undef wrmsrl | 36 | #undef wrmsrl |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 6e9b6763ff48..8fc2b2cee1da 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -1205,6 +1205,9 @@ static int intel_pmu_cpu_prepare(int cpu) | |||
1205 | { | 1205 | { |
1206 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | 1206 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); |
1207 | 1207 | ||
1208 | if (!cpu_has_ht_siblings()) | ||
1209 | return NOTIFY_OK; | ||
1210 | |||
1208 | cpuc->per_core = kzalloc_node(sizeof(struct intel_percore), | 1211 | cpuc->per_core = kzalloc_node(sizeof(struct intel_percore), |
1209 | GFP_KERNEL, cpu_to_node(cpu)); | 1212 | GFP_KERNEL, cpu_to_node(cpu)); |
1210 | if (!cpuc->per_core) | 1213 | if (!cpuc->per_core) |
@@ -1221,6 +1224,15 @@ static void intel_pmu_cpu_starting(int cpu) | |||
1221 | int core_id = topology_core_id(cpu); | 1224 | int core_id = topology_core_id(cpu); |
1222 | int i; | 1225 | int i; |
1223 | 1226 | ||
1227 | init_debug_store_on_cpu(cpu); | ||
1228 | /* | ||
1229 | * Deal with CPUs that don't clear their LBRs on power-up. | ||
1230 | */ | ||
1231 | intel_pmu_lbr_reset(); | ||
1232 | |||
1233 | if (!cpu_has_ht_siblings()) | ||
1234 | return; | ||
1235 | |||
1224 | for_each_cpu(i, topology_thread_cpumask(cpu)) { | 1236 | for_each_cpu(i, topology_thread_cpumask(cpu)) { |
1225 | struct intel_percore *pc = per_cpu(cpu_hw_events, i).per_core; | 1237 | struct intel_percore *pc = per_cpu(cpu_hw_events, i).per_core; |
1226 | 1238 | ||
@@ -1233,12 +1245,6 @@ static void intel_pmu_cpu_starting(int cpu) | |||
1233 | 1245 | ||
1234 | cpuc->per_core->core_id = core_id; | 1246 | cpuc->per_core->core_id = core_id; |
1235 | cpuc->per_core->refcnt++; | 1247 | cpuc->per_core->refcnt++; |
1236 | |||
1237 | init_debug_store_on_cpu(cpu); | ||
1238 | /* | ||
1239 | * Deal with CPUs that don't clear their LBRs on power-up. | ||
1240 | */ | ||
1241 | intel_pmu_lbr_reset(); | ||
1242 | } | 1248 | } |
1243 | 1249 | ||
1244 | static void intel_pmu_cpu_dying(int cpu) | 1250 | static void intel_pmu_cpu_dying(int cpu) |