diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-03-23 14:31:15 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-04-02 13:30:02 -0400 |
commit | b38b24ead33417146e051453d04bf60b8d2d7e25 (patch) | |
tree | 9f9801c5b10dd5a57b07deaace6fd12a36740d65 /arch | |
parent | 85257024096a96fc5c00ce59d685f62bbed3ad95 (diff) |
perf, x86: Fix AMD hotplug & constraint initialization
Commit 3f6da39 ("perf: Rework and fix the arch CPU-hotplug hooks") moved
the amd northbridge allocation from CPUS_ONLINE to CPUS_PREPARE_UP
however amd_nb_id() doesn't work yet on prepare so it would simply bail
basically reverting to a state where we do not properly track node wide
constraints - causing weird perf results.
Fix up the AMD NorthBridge initialization code by allocating from
CPU_UP_PREPARE and installing it from CPU_STARTING once we have the
proper nb_id. It also properly deals with the allocation failing.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
[ robustify using amd_has_nb() ]
Signed-off-by: Stephane Eranian <eranian@google.com>
LKML-Reference: <1269353485.5109.48.camel@twins>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_amd.c | 80 |
2 files changed, 52 insertions, 36 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 5fb490c6ee5c..bd28cf9d8a82 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -158,7 +158,7 @@ struct x86_pmu { | |||
158 | struct perf_event *event); | 158 | struct perf_event *event); |
159 | struct event_constraint *event_constraints; | 159 | struct event_constraint *event_constraints; |
160 | 160 | ||
161 | void (*cpu_prepare)(int cpu); | 161 | int (*cpu_prepare)(int cpu); |
162 | void (*cpu_starting)(int cpu); | 162 | void (*cpu_starting)(int cpu); |
163 | void (*cpu_dying)(int cpu); | 163 | void (*cpu_dying)(int cpu); |
164 | void (*cpu_dead)(int cpu); | 164 | void (*cpu_dead)(int cpu); |
@@ -1333,11 +1333,12 @@ static int __cpuinit | |||
1333 | x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | 1333 | x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) |
1334 | { | 1334 | { |
1335 | unsigned int cpu = (long)hcpu; | 1335 | unsigned int cpu = (long)hcpu; |
1336 | int ret = NOTIFY_OK; | ||
1336 | 1337 | ||
1337 | switch (action & ~CPU_TASKS_FROZEN) { | 1338 | switch (action & ~CPU_TASKS_FROZEN) { |
1338 | case CPU_UP_PREPARE: | 1339 | case CPU_UP_PREPARE: |
1339 | if (x86_pmu.cpu_prepare) | 1340 | if (x86_pmu.cpu_prepare) |
1340 | x86_pmu.cpu_prepare(cpu); | 1341 | ret = x86_pmu.cpu_prepare(cpu); |
1341 | break; | 1342 | break; |
1342 | 1343 | ||
1343 | case CPU_STARTING: | 1344 | case CPU_STARTING: |
@@ -1350,6 +1351,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | |||
1350 | x86_pmu.cpu_dying(cpu); | 1351 | x86_pmu.cpu_dying(cpu); |
1351 | break; | 1352 | break; |
1352 | 1353 | ||
1354 | case CPU_UP_CANCELED: | ||
1353 | case CPU_DEAD: | 1355 | case CPU_DEAD: |
1354 | if (x86_pmu.cpu_dead) | 1356 | if (x86_pmu.cpu_dead) |
1355 | x86_pmu.cpu_dead(cpu); | 1357 | x86_pmu.cpu_dead(cpu); |
@@ -1359,7 +1361,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | |||
1359 | break; | 1361 | break; |
1360 | } | 1362 | } |
1361 | 1363 | ||
1362 | return NOTIFY_OK; | 1364 | return ret; |
1363 | } | 1365 | } |
1364 | 1366 | ||
1365 | static void __init pmu_check_apic(void) | 1367 | static void __init pmu_check_apic(void) |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index b87e0b6970cb..db6f7d4056e1 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -137,6 +137,13 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc) | |||
137 | return (hwc->config & 0xe0) == 0xe0; | 137 | return (hwc->config & 0xe0) == 0xe0; |
138 | } | 138 | } |
139 | 139 | ||
140 | static inline int amd_has_nb(struct cpu_hw_events *cpuc) | ||
141 | { | ||
142 | struct amd_nb *nb = cpuc->amd_nb; | ||
143 | |||
144 | return nb && nb->nb_id != -1; | ||
145 | } | ||
146 | |||
140 | static void amd_put_event_constraints(struct cpu_hw_events *cpuc, | 147 | static void amd_put_event_constraints(struct cpu_hw_events *cpuc, |
141 | struct perf_event *event) | 148 | struct perf_event *event) |
142 | { | 149 | { |
@@ -147,7 +154,7 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc, | |||
147 | /* | 154 | /* |
148 | * only care about NB events | 155 | * only care about NB events |
149 | */ | 156 | */ |
150 | if (!(nb && amd_is_nb_event(hwc))) | 157 | if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc))) |
151 | return; | 158 | return; |
152 | 159 | ||
153 | /* | 160 | /* |
@@ -214,7 +221,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event) | |||
214 | /* | 221 | /* |
215 | * if not NB event or no NB, then no constraints | 222 | * if not NB event or no NB, then no constraints |
216 | */ | 223 | */ |
217 | if (!(nb && amd_is_nb_event(hwc))) | 224 | if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc))) |
218 | return &unconstrained; | 225 | return &unconstrained; |
219 | 226 | ||
220 | /* | 227 | /* |
@@ -293,51 +300,55 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) | |||
293 | return nb; | 300 | return nb; |
294 | } | 301 | } |
295 | 302 | ||
296 | static void amd_pmu_cpu_online(int cpu) | 303 | static int amd_pmu_cpu_prepare(int cpu) |
304 | { | ||
305 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | ||
306 | |||
307 | WARN_ON_ONCE(cpuc->amd_nb); | ||
308 | |||
309 | if (boot_cpu_data.x86_max_cores < 2) | ||
310 | return NOTIFY_OK; | ||
311 | |||
312 | cpuc->amd_nb = amd_alloc_nb(cpu, -1); | ||
313 | if (!cpuc->amd_nb) | ||
314 | return NOTIFY_BAD; | ||
315 | |||
316 | return NOTIFY_OK; | ||
317 | } | ||
318 | |||
319 | static void amd_pmu_cpu_starting(int cpu) | ||
297 | { | 320 | { |
298 | struct cpu_hw_events *cpu1, *cpu2; | 321 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); |
299 | struct amd_nb *nb = NULL; | 322 | struct amd_nb *nb; |
300 | int i, nb_id; | 323 | int i, nb_id; |
301 | 324 | ||
302 | if (boot_cpu_data.x86_max_cores < 2) | 325 | if (boot_cpu_data.x86_max_cores < 2) |
303 | return; | 326 | return; |
304 | 327 | ||
305 | /* | ||
306 | * function may be called too early in the | ||
307 | * boot process, in which case nb_id is bogus | ||
308 | */ | ||
309 | nb_id = amd_get_nb_id(cpu); | 328 | nb_id = amd_get_nb_id(cpu); |
310 | if (nb_id == BAD_APICID) | 329 | WARN_ON_ONCE(nb_id == BAD_APICID); |
311 | return; | ||
312 | |||
313 | cpu1 = &per_cpu(cpu_hw_events, cpu); | ||
314 | cpu1->amd_nb = NULL; | ||
315 | 330 | ||
316 | raw_spin_lock(&amd_nb_lock); | 331 | raw_spin_lock(&amd_nb_lock); |
317 | 332 | ||
318 | for_each_online_cpu(i) { | 333 | for_each_online_cpu(i) { |
319 | cpu2 = &per_cpu(cpu_hw_events, i); | 334 | nb = per_cpu(cpu_hw_events, i).amd_nb; |
320 | nb = cpu2->amd_nb; | 335 | if (WARN_ON_ONCE(!nb)) |
321 | if (!nb) | ||
322 | continue; | 336 | continue; |
323 | if (nb->nb_id == nb_id) | ||
324 | goto found; | ||
325 | } | ||
326 | 337 | ||
327 | nb = amd_alloc_nb(cpu, nb_id); | 338 | if (nb->nb_id == nb_id) { |
328 | if (!nb) { | 339 | kfree(cpuc->amd_nb); |
329 | pr_err("perf_events: failed NB allocation for CPU%d\n", cpu); | 340 | cpuc->amd_nb = nb; |
330 | raw_spin_unlock(&amd_nb_lock); | 341 | break; |
331 | return; | 342 | } |
332 | } | 343 | } |
333 | found: | 344 | |
334 | nb->refcnt++; | 345 | cpuc->amd_nb->nb_id = nb_id; |
335 | cpu1->amd_nb = nb; | 346 | cpuc->amd_nb->refcnt++; |
336 | 347 | ||
337 | raw_spin_unlock(&amd_nb_lock); | 348 | raw_spin_unlock(&amd_nb_lock); |
338 | } | 349 | } |
339 | 350 | ||
340 | static void amd_pmu_cpu_offline(int cpu) | 351 | static void amd_pmu_cpu_dead(int cpu) |
341 | { | 352 | { |
342 | struct cpu_hw_events *cpuhw; | 353 | struct cpu_hw_events *cpuhw; |
343 | 354 | ||
@@ -349,8 +360,10 @@ static void amd_pmu_cpu_offline(int cpu) | |||
349 | raw_spin_lock(&amd_nb_lock); | 360 | raw_spin_lock(&amd_nb_lock); |
350 | 361 | ||
351 | if (cpuhw->amd_nb) { | 362 | if (cpuhw->amd_nb) { |
352 | if (--cpuhw->amd_nb->refcnt == 0) | 363 | struct amd_nb *nb = cpuhw->amd_nb; |
353 | kfree(cpuhw->amd_nb); | 364 | |
365 | if (nb->nb_id == -1 || --nb->refcnt == 0) | ||
366 | kfree(nb); | ||
354 | 367 | ||
355 | cpuhw->amd_nb = NULL; | 368 | cpuhw->amd_nb = NULL; |
356 | } | 369 | } |
@@ -379,8 +392,9 @@ static __initconst struct x86_pmu amd_pmu = { | |||
379 | .get_event_constraints = amd_get_event_constraints, | 392 | .get_event_constraints = amd_get_event_constraints, |
380 | .put_event_constraints = amd_put_event_constraints, | 393 | .put_event_constraints = amd_put_event_constraints, |
381 | 394 | ||
382 | .cpu_prepare = amd_pmu_cpu_online, | 395 | .cpu_prepare = amd_pmu_cpu_prepare, |
383 | .cpu_dead = amd_pmu_cpu_offline, | 396 | .cpu_starting = amd_pmu_cpu_starting, |
397 | .cpu_dead = amd_pmu_cpu_dead, | ||
384 | }; | 398 | }; |
385 | 399 | ||
386 | static __init int amd_pmu_init(void) | 400 | static __init int amd_pmu_init(void) |