diff options
author | Zhouyi Zhou <zhouzhouyi@gmail.com> | 2014-06-11 00:09:03 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-07-16 07:31:06 -0400 |
commit | 503d3291a937b726757c1f7c45fa02389d2f4324 (patch) | |
tree | 1bd0ff14d7fb370a250c391e7eb7d99cc374fbb2 | |
parent | 8b5b584daf3b92fc5cdc83919e64231817d2f5a7 (diff) |
perf/x86/amd: Try to fix some mem allocation failure handling
According to Peter's advice, put the failure handling to a goto chain.
Compiled in x86_64, could you check if there is anything that I missed.
Signed-off-by: Zhouyi Zhou <yizhouzhou@ict.ac.cn>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/1402459743-20513-1-git-send-email-zhouzhouyi@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_amd_uncore.c | 111 |
1 files changed, 84 insertions, 27 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c index 3bbdf4cd38b9..30790d798e6b 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c | |||
@@ -294,31 +294,41 @@ static struct amd_uncore *amd_uncore_alloc(unsigned int cpu) | |||
294 | cpu_to_node(cpu)); | 294 | cpu_to_node(cpu)); |
295 | } | 295 | } |
296 | 296 | ||
297 | static void amd_uncore_cpu_up_prepare(unsigned int cpu) | 297 | static int amd_uncore_cpu_up_prepare(unsigned int cpu) |
298 | { | 298 | { |
299 | struct amd_uncore *uncore; | 299 | struct amd_uncore *uncore_nb = NULL, *uncore_l2; |
300 | 300 | ||
301 | if (amd_uncore_nb) { | 301 | if (amd_uncore_nb) { |
302 | uncore = amd_uncore_alloc(cpu); | 302 | uncore_nb = amd_uncore_alloc(cpu); |
303 | uncore->cpu = cpu; | 303 | if (!uncore_nb) |
304 | uncore->num_counters = NUM_COUNTERS_NB; | 304 | goto fail; |
305 | uncore->rdpmc_base = RDPMC_BASE_NB; | 305 | uncore_nb->cpu = cpu; |
306 | uncore->msr_base = MSR_F15H_NB_PERF_CTL; | 306 | uncore_nb->num_counters = NUM_COUNTERS_NB; |
307 | uncore->active_mask = &amd_nb_active_mask; | 307 | uncore_nb->rdpmc_base = RDPMC_BASE_NB; |
308 | uncore->pmu = &amd_nb_pmu; | 308 | uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL; |
309 | *per_cpu_ptr(amd_uncore_nb, cpu) = uncore; | 309 | uncore_nb->active_mask = &amd_nb_active_mask; |
310 | uncore_nb->pmu = &amd_nb_pmu; | ||
311 | *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb; | ||
310 | } | 312 | } |
311 | 313 | ||
312 | if (amd_uncore_l2) { | 314 | if (amd_uncore_l2) { |
313 | uncore = amd_uncore_alloc(cpu); | 315 | uncore_l2 = amd_uncore_alloc(cpu); |
314 | uncore->cpu = cpu; | 316 | if (!uncore_l2) |
315 | uncore->num_counters = NUM_COUNTERS_L2; | 317 | goto fail; |
316 | uncore->rdpmc_base = RDPMC_BASE_L2; | 318 | uncore_l2->cpu = cpu; |
317 | uncore->msr_base = MSR_F16H_L2I_PERF_CTL; | 319 | uncore_l2->num_counters = NUM_COUNTERS_L2; |
318 | uncore->active_mask = &amd_l2_active_mask; | 320 | uncore_l2->rdpmc_base = RDPMC_BASE_L2; |
319 | uncore->pmu = &amd_l2_pmu; | 321 | uncore_l2->msr_base = MSR_F16H_L2I_PERF_CTL; |
320 | *per_cpu_ptr(amd_uncore_l2, cpu) = uncore; | 322 | uncore_l2->active_mask = &amd_l2_active_mask; |
323 | uncore_l2->pmu = &amd_l2_pmu; | ||
324 | *per_cpu_ptr(amd_uncore_l2, cpu) = uncore_l2; | ||
321 | } | 325 | } |
326 | |||
327 | return 0; | ||
328 | |||
329 | fail: | ||
330 | kfree(uncore_nb); | ||
331 | return -ENOMEM; | ||
322 | } | 332 | } |
323 | 333 | ||
324 | static struct amd_uncore * | 334 | static struct amd_uncore * |
@@ -441,7 +451,7 @@ static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores) | |||
441 | 451 | ||
442 | if (!--uncore->refcnt) | 452 | if (!--uncore->refcnt) |
443 | kfree(uncore); | 453 | kfree(uncore); |
444 | *per_cpu_ptr(amd_uncore_nb, cpu) = NULL; | 454 | *per_cpu_ptr(uncores, cpu) = NULL; |
445 | } | 455 | } |
446 | 456 | ||
447 | static void amd_uncore_cpu_dead(unsigned int cpu) | 457 | static void amd_uncore_cpu_dead(unsigned int cpu) |
@@ -461,7 +471,8 @@ amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action, | |||
461 | 471 | ||
462 | switch (action & ~CPU_TASKS_FROZEN) { | 472 | switch (action & ~CPU_TASKS_FROZEN) { |
463 | case CPU_UP_PREPARE: | 473 | case CPU_UP_PREPARE: |
464 | amd_uncore_cpu_up_prepare(cpu); | 474 | if (amd_uncore_cpu_up_prepare(cpu)) |
475 | return notifier_from_errno(-ENOMEM); | ||
465 | break; | 476 | break; |
466 | 477 | ||
467 | case CPU_STARTING: | 478 | case CPU_STARTING: |
@@ -501,20 +512,33 @@ static void __init init_cpu_already_online(void *dummy) | |||
501 | amd_uncore_cpu_online(cpu); | 512 | amd_uncore_cpu_online(cpu); |
502 | } | 513 | } |
503 | 514 | ||
515 | static void cleanup_cpu_online(void *dummy) | ||
516 | { | ||
517 | unsigned int cpu = smp_processor_id(); | ||
518 | |||
519 | amd_uncore_cpu_dead(cpu); | ||
520 | } | ||
521 | |||
504 | static int __init amd_uncore_init(void) | 522 | static int __init amd_uncore_init(void) |
505 | { | 523 | { |
506 | unsigned int cpu; | 524 | unsigned int cpu, cpu2; |
507 | int ret = -ENODEV; | 525 | int ret = -ENODEV; |
508 | 526 | ||
509 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) | 527 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) |
510 | return -ENODEV; | 528 | goto fail_nodev; |
511 | 529 | ||
512 | if (!cpu_has_topoext) | 530 | if (!cpu_has_topoext) |
513 | return -ENODEV; | 531 | goto fail_nodev; |
514 | 532 | ||
515 | if (cpu_has_perfctr_nb) { | 533 | if (cpu_has_perfctr_nb) { |
516 | amd_uncore_nb = alloc_percpu(struct amd_uncore *); | 534 | amd_uncore_nb = alloc_percpu(struct amd_uncore *); |
517 | perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1); | 535 | if (!amd_uncore_nb) { |
536 | ret = -ENOMEM; | ||
537 | goto fail_nb; | ||
538 | } | ||
539 | ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1); | ||
540 | if (ret) | ||
541 | goto fail_nb; | ||
518 | 542 | ||
519 | printk(KERN_INFO "perf: AMD NB counters detected\n"); | 543 | printk(KERN_INFO "perf: AMD NB counters detected\n"); |
520 | ret = 0; | 544 | ret = 0; |
@@ -522,20 +546,28 @@ static int __init amd_uncore_init(void) | |||
522 | 546 | ||
523 | if (cpu_has_perfctr_l2) { | 547 | if (cpu_has_perfctr_l2) { |
524 | amd_uncore_l2 = alloc_percpu(struct amd_uncore *); | 548 | amd_uncore_l2 = alloc_percpu(struct amd_uncore *); |
525 | perf_pmu_register(&amd_l2_pmu, amd_l2_pmu.name, -1); | 549 | if (!amd_uncore_l2) { |
550 | ret = -ENOMEM; | ||
551 | goto fail_l2; | ||
552 | } | ||
553 | ret = perf_pmu_register(&amd_l2_pmu, amd_l2_pmu.name, -1); | ||
554 | if (ret) | ||
555 | goto fail_l2; | ||
526 | 556 | ||
527 | printk(KERN_INFO "perf: AMD L2I counters detected\n"); | 557 | printk(KERN_INFO "perf: AMD L2I counters detected\n"); |
528 | ret = 0; | 558 | ret = 0; |
529 | } | 559 | } |
530 | 560 | ||
531 | if (ret) | 561 | if (ret) |
532 | return -ENODEV; | 562 | goto fail_nodev; |
533 | 563 | ||
534 | cpu_notifier_register_begin(); | 564 | cpu_notifier_register_begin(); |
535 | 565 | ||
536 | /* init cpus already online before registering for hotplug notifier */ | 566 | /* init cpus already online before registering for hotplug notifier */ |
537 | for_each_online_cpu(cpu) { | 567 | for_each_online_cpu(cpu) { |
538 | amd_uncore_cpu_up_prepare(cpu); | 568 | ret = amd_uncore_cpu_up_prepare(cpu); |
569 | if (ret) | ||
570 | goto fail_online; | ||
539 | smp_call_function_single(cpu, init_cpu_already_online, NULL, 1); | 571 | smp_call_function_single(cpu, init_cpu_already_online, NULL, 1); |
540 | } | 572 | } |
541 | 573 | ||
@@ -543,5 +575,30 @@ static int __init amd_uncore_init(void) | |||
543 | cpu_notifier_register_done(); | 575 | cpu_notifier_register_done(); |
544 | 576 | ||
545 | return 0; | 577 | return 0; |
578 | |||
579 | |||
580 | fail_online: | ||
581 | for_each_online_cpu(cpu2) { | ||
582 | if (cpu2 == cpu) | ||
583 | break; | ||
584 | smp_call_function_single(cpu, cleanup_cpu_online, NULL, 1); | ||
585 | } | ||
586 | cpu_notifier_register_done(); | ||
587 | |||
588 | /* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */ | ||
589 | amd_uncore_nb = amd_uncore_l2 = NULL; | ||
590 | if (cpu_has_perfctr_l2) | ||
591 | perf_pmu_unregister(&amd_l2_pmu); | ||
592 | fail_l2: | ||
593 | if (cpu_has_perfctr_nb) | ||
594 | perf_pmu_unregister(&amd_nb_pmu); | ||
595 | if (amd_uncore_l2) | ||
596 | free_percpu(amd_uncore_l2); | ||
597 | fail_nb: | ||
598 | if (amd_uncore_nb) | ||
599 | free_percpu(amd_uncore_nb); | ||
600 | |||
601 | fail_nodev: | ||
602 | return ret; | ||
546 | } | 603 | } |
547 | device_initcall(amd_uncore_init); | 604 | device_initcall(amd_uncore_init); |