diff options
author | Akinobu Mita <akinobu.mita@gmail.com> | 2007-10-18 06:05:16 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-18 17:37:21 -0400 |
commit | ef1d7151d2901295278d5aada39c547ed8601419 (patch) | |
tree | 205782480d956655bd70a9f348fdfed6bda16795 | |
parent | d435d862baca3e25e5eec236762a43251b1e7ffc (diff) |
cpu hotplug: intel_cacheinfo: fix cpu hotplug error handling
- Fix resource leakage in error case within detect_cache_attributes()
- Don't register hotcpu notifier when cache_add_dev() returns error
- Introduce cache_dev_map cpumask to track whether cache interface for
CPU is successfully added by cache_add_dev() or not.
cache_add_dev() may fail with out of memory error. In order to
avoid cache_remove_dev() with that uninitialized cache interface when
CPU_DEAD event is delivered we need to have the cache_dev_map cpumask.
(We cannot change cache_add_dev() from CPU_ONLINE event handler
to CPU_UP_PREPARE event handler. Because cache_add_dev() needs
to do cpuid and store the results with its CPU online.)
[nix.or.die@googlemail.com: fix a section mismatch warning]
Cc: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Gautham R Shenoy <ego@in.ibm.com>
Cc: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Andi Kleen <ak@suse.de>
Cc: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Gabriel Craciunescu <nix.or.die@googlemail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 66 |
1 files changed, 45 insertions, 21 deletions
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 1826395ebeeb..297a24116949 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -499,6 +499,11 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) { | |||
499 | 499 | ||
500 | static void free_cache_attributes(unsigned int cpu) | 500 | static void free_cache_attributes(unsigned int cpu) |
501 | { | 501 | { |
502 | int i; | ||
503 | |||
504 | for (i = 0; i < num_cache_leaves; i++) | ||
505 | cache_remove_shared_cpu_map(cpu, i); | ||
506 | |||
502 | kfree(cpuid4_info[cpu]); | 507 | kfree(cpuid4_info[cpu]); |
503 | cpuid4_info[cpu] = NULL; | 508 | cpuid4_info[cpu] = NULL; |
504 | } | 509 | } |
@@ -506,8 +511,8 @@ static void free_cache_attributes(unsigned int cpu) | |||
506 | static int __cpuinit detect_cache_attributes(unsigned int cpu) | 511 | static int __cpuinit detect_cache_attributes(unsigned int cpu) |
507 | { | 512 | { |
508 | struct _cpuid4_info *this_leaf; | 513 | struct _cpuid4_info *this_leaf; |
509 | unsigned long j; | 514 | unsigned long j; |
510 | int retval; | 515 | int retval; |
511 | cpumask_t oldmask; | 516 | cpumask_t oldmask; |
512 | 517 | ||
513 | if (num_cache_leaves == 0) | 518 | if (num_cache_leaves == 0) |
@@ -524,19 +529,26 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
524 | goto out; | 529 | goto out; |
525 | 530 | ||
526 | /* Do cpuid and store the results */ | 531 | /* Do cpuid and store the results */ |
527 | retval = 0; | ||
528 | for (j = 0; j < num_cache_leaves; j++) { | 532 | for (j = 0; j < num_cache_leaves; j++) { |
529 | this_leaf = CPUID4_INFO_IDX(cpu, j); | 533 | this_leaf = CPUID4_INFO_IDX(cpu, j); |
530 | retval = cpuid4_cache_lookup(j, this_leaf); | 534 | retval = cpuid4_cache_lookup(j, this_leaf); |
531 | if (unlikely(retval < 0)) | 535 | if (unlikely(retval < 0)) { |
536 | int i; | ||
537 | |||
538 | for (i = 0; i < j; i++) | ||
539 | cache_remove_shared_cpu_map(cpu, i); | ||
532 | break; | 540 | break; |
541 | } | ||
533 | cache_shared_cpu_map_setup(cpu, j); | 542 | cache_shared_cpu_map_setup(cpu, j); |
534 | } | 543 | } |
535 | set_cpus_allowed(current, oldmask); | 544 | set_cpus_allowed(current, oldmask); |
536 | 545 | ||
537 | out: | 546 | out: |
538 | if (retval) | 547 | if (retval) { |
539 | free_cache_attributes(cpu); | 548 | kfree(cpuid4_info[cpu]); |
549 | cpuid4_info[cpu] = NULL; | ||
550 | } | ||
551 | |||
540 | return retval; | 552 | return retval; |
541 | } | 553 | } |
542 | 554 | ||
@@ -669,7 +681,7 @@ static struct kobj_type ktype_percpu_entry = { | |||
669 | .sysfs_ops = &sysfs_ops, | 681 | .sysfs_ops = &sysfs_ops, |
670 | }; | 682 | }; |
671 | 683 | ||
672 | static void cpuid4_cache_sysfs_exit(unsigned int cpu) | 684 | static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) |
673 | { | 685 | { |
674 | kfree(cache_kobject[cpu]); | 686 | kfree(cache_kobject[cpu]); |
675 | kfree(index_kobject[cpu]); | 687 | kfree(index_kobject[cpu]); |
@@ -680,13 +692,14 @@ static void cpuid4_cache_sysfs_exit(unsigned int cpu) | |||
680 | 692 | ||
681 | static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) | 693 | static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) |
682 | { | 694 | { |
695 | int err; | ||
683 | 696 | ||
684 | if (num_cache_leaves == 0) | 697 | if (num_cache_leaves == 0) |
685 | return -ENOENT; | 698 | return -ENOENT; |
686 | 699 | ||
687 | detect_cache_attributes(cpu); | 700 | err = detect_cache_attributes(cpu); |
688 | if (cpuid4_info[cpu] == NULL) | 701 | if (err) |
689 | return -ENOENT; | 702 | return err; |
690 | 703 | ||
691 | /* Allocate all required memory */ | 704 | /* Allocate all required memory */ |
692 | cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL); | 705 | cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL); |
@@ -705,13 +718,15 @@ err_out: | |||
705 | return -ENOMEM; | 718 | return -ENOMEM; |
706 | } | 719 | } |
707 | 720 | ||
721 | static cpumask_t cache_dev_map = CPU_MASK_NONE; | ||
722 | |||
708 | /* Add/Remove cache interface for CPU device */ | 723 | /* Add/Remove cache interface for CPU device */ |
709 | static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | 724 | static int __cpuinit cache_add_dev(struct sys_device * sys_dev) |
710 | { | 725 | { |
711 | unsigned int cpu = sys_dev->id; | 726 | unsigned int cpu = sys_dev->id; |
712 | unsigned long i, j; | 727 | unsigned long i, j; |
713 | struct _index_kobject *this_object; | 728 | struct _index_kobject *this_object; |
714 | int retval = 0; | 729 | int retval; |
715 | 730 | ||
716 | retval = cpuid4_cache_sysfs_init(cpu); | 731 | retval = cpuid4_cache_sysfs_init(cpu); |
717 | if (unlikely(retval < 0)) | 732 | if (unlikely(retval < 0)) |
@@ -721,6 +736,10 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
721 | kobject_set_name(cache_kobject[cpu], "%s", "cache"); | 736 | kobject_set_name(cache_kobject[cpu], "%s", "cache"); |
722 | cache_kobject[cpu]->ktype = &ktype_percpu_entry; | 737 | cache_kobject[cpu]->ktype = &ktype_percpu_entry; |
723 | retval = kobject_register(cache_kobject[cpu]); | 738 | retval = kobject_register(cache_kobject[cpu]); |
739 | if (retval < 0) { | ||
740 | cpuid4_cache_sysfs_exit(cpu); | ||
741 | return retval; | ||
742 | } | ||
724 | 743 | ||
725 | for (i = 0; i < num_cache_leaves; i++) { | 744 | for (i = 0; i < num_cache_leaves; i++) { |
726 | this_object = INDEX_KOBJECT_PTR(cpu,i); | 745 | this_object = INDEX_KOBJECT_PTR(cpu,i); |
@@ -740,6 +759,9 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
740 | break; | 759 | break; |
741 | } | 760 | } |
742 | } | 761 | } |
762 | if (!retval) | ||
763 | cpu_set(cpu, cache_dev_map); | ||
764 | |||
743 | return retval; | 765 | return retval; |
744 | } | 766 | } |
745 | 767 | ||
@@ -750,13 +772,14 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) | |||
750 | 772 | ||
751 | if (cpuid4_info[cpu] == NULL) | 773 | if (cpuid4_info[cpu] == NULL) |
752 | return; | 774 | return; |
753 | for (i = 0; i < num_cache_leaves; i++) { | 775 | if (!cpu_isset(cpu, cache_dev_map)) |
754 | cache_remove_shared_cpu_map(cpu, i); | 776 | return; |
777 | cpu_clear(cpu, cache_dev_map); | ||
778 | |||
779 | for (i = 0; i < num_cache_leaves; i++) | ||
755 | kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); | 780 | kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); |
756 | } | ||
757 | kobject_unregister(cache_kobject[cpu]); | 781 | kobject_unregister(cache_kobject[cpu]); |
758 | cpuid4_cache_sysfs_exit(cpu); | 782 | cpuid4_cache_sysfs_exit(cpu); |
759 | return; | ||
760 | } | 783 | } |
761 | 784 | ||
762 | static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, | 785 | static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, |
@@ -781,7 +804,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, | |||
781 | 804 | ||
782 | static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = | 805 | static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = |
783 | { | 806 | { |
784 | .notifier_call = cacheinfo_cpu_callback, | 807 | .notifier_call = cacheinfo_cpu_callback, |
785 | }; | 808 | }; |
786 | 809 | ||
787 | static int __cpuinit cache_sysfs_init(void) | 810 | static int __cpuinit cache_sysfs_init(void) |
@@ -791,14 +814,15 @@ static int __cpuinit cache_sysfs_init(void) | |||
791 | if (num_cache_leaves == 0) | 814 | if (num_cache_leaves == 0) |
792 | return 0; | 815 | return 0; |
793 | 816 | ||
794 | register_hotcpu_notifier(&cacheinfo_cpu_notifier); | ||
795 | |||
796 | for_each_online_cpu(i) { | 817 | for_each_online_cpu(i) { |
797 | struct sys_device *sys_dev = get_cpu_sysdev((unsigned int)i); | 818 | int err; |
819 | struct sys_device *sys_dev = get_cpu_sysdev(i); | ||
798 | 820 | ||
799 | cache_add_dev(sys_dev); | 821 | err = cache_add_dev(sys_dev); |
822 | if (err) | ||
823 | return err; | ||
800 | } | 824 | } |
801 | 825 | register_hotcpu_notifier(&cacheinfo_cpu_notifier); | |
802 | return 0; | 826 | return 0; |
803 | } | 827 | } |
804 | 828 | ||