diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-03-23 14:31:15 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-04-02 13:30:02 -0400 |
commit | b38b24ead33417146e051453d04bf60b8d2d7e25 (patch) | |
tree | 9f9801c5b10dd5a57b07deaace6fd12a36740d65 /arch/x86/kernel/cpu/perf_event.c | |
parent | 85257024096a96fc5c00ce59d685f62bbed3ad95 (diff) |
perf, x86: Fix AMD hotplug & constraint initialization
Commit 3f6da39 ("perf: Rework and fix the arch CPU-hotplug hooks") moved
the amd northbridge allocation from CPUS_ONLINE to CPUS_PREPARE_UP
however amd_nb_id() doesn't work yet on prepare so it would simply bail
basically reverting to a state where we do not properly track node wide
constraints - causing weird perf results.
Fix up the AMD NorthBridge initialization code by allocating from
CPU_UP_PREPARE and installing it from CPU_STARTING once we have the
proper nb_id. It also properly deals with the allocation failing.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
[ robustify using amd_has_nb() ]
Signed-off-by: Stephane Eranian <eranian@google.com>
LKML-Reference: <1269353485.5109.48.camel@twins>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event.c')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 8 |
1 files changed, 5 insertions, 3 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 5fb490c6ee5..bd28cf9d8a8 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -158,7 +158,7 @@ struct x86_pmu { | |||
158 | struct perf_event *event); | 158 | struct perf_event *event); |
159 | struct event_constraint *event_constraints; | 159 | struct event_constraint *event_constraints; |
160 | 160 | ||
161 | void (*cpu_prepare)(int cpu); | 161 | int (*cpu_prepare)(int cpu); |
162 | void (*cpu_starting)(int cpu); | 162 | void (*cpu_starting)(int cpu); |
163 | void (*cpu_dying)(int cpu); | 163 | void (*cpu_dying)(int cpu); |
164 | void (*cpu_dead)(int cpu); | 164 | void (*cpu_dead)(int cpu); |
@@ -1333,11 +1333,12 @@ static int __cpuinit | |||
1333 | x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | 1333 | x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) |
1334 | { | 1334 | { |
1335 | unsigned int cpu = (long)hcpu; | 1335 | unsigned int cpu = (long)hcpu; |
1336 | int ret = NOTIFY_OK; | ||
1336 | 1337 | ||
1337 | switch (action & ~CPU_TASKS_FROZEN) { | 1338 | switch (action & ~CPU_TASKS_FROZEN) { |
1338 | case CPU_UP_PREPARE: | 1339 | case CPU_UP_PREPARE: |
1339 | if (x86_pmu.cpu_prepare) | 1340 | if (x86_pmu.cpu_prepare) |
1340 | x86_pmu.cpu_prepare(cpu); | 1341 | ret = x86_pmu.cpu_prepare(cpu); |
1341 | break; | 1342 | break; |
1342 | 1343 | ||
1343 | case CPU_STARTING: | 1344 | case CPU_STARTING: |
@@ -1350,6 +1351,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | |||
1350 | x86_pmu.cpu_dying(cpu); | 1351 | x86_pmu.cpu_dying(cpu); |
1351 | break; | 1352 | break; |
1352 | 1353 | ||
1354 | case CPU_UP_CANCELED: | ||
1353 | case CPU_DEAD: | 1355 | case CPU_DEAD: |
1354 | if (x86_pmu.cpu_dead) | 1356 | if (x86_pmu.cpu_dead) |
1355 | x86_pmu.cpu_dead(cpu); | 1357 | x86_pmu.cpu_dead(cpu); |
@@ -1359,7 +1361,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | |||
1359 | break; | 1361 | break; |
1360 | } | 1362 | } |
1361 | 1363 | ||
1362 | return NOTIFY_OK; | 1364 | return ret; |
1363 | } | 1365 | } |
1364 | 1366 | ||
1365 | static void __init pmu_check_apic(void) | 1367 | static void __init pmu_check_apic(void) |