diff options
author | David Rientjes <rientjes@google.com> | 2013-04-29 18:08:22 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-29 18:54:37 -0400 |
commit | 4edd7ceff0662afde195da6f6c43e7cbe1ed2dc4 (patch) | |
tree | 89e53f524dae229f9db490a1e091842302010c21 /mm | |
parent | fe74ebb106a5950e82222c8ea258a9c0d7c65f04 (diff) |
mm, hotplug: avoid compiling memory hotremove functions when disabled
__remove_pages() is only necessary for CONFIG_MEMORY_HOTREMOVE. PowerPC
pseries will return -EOPNOTSUPP if unsupported.
Adding an #ifdef causes several other functions it depends on to also
become unnecessary, which saves in .text when disabled (it's disabled in
most defconfigs besides powerpc, including x86). remove_memory_block()
becomes static since it is not referenced outside of
drivers/base/memory.c.
Build tested on x86 and powerpc with CONFIG_MEMORY_HOTREMOVE both enabled
and disabled.
Signed-off-by: David Rientjes <rientjes@google.com>
Acked-by: Toshi Kani <toshi.kani@hp.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Wen Congyang <wency@cn.fujitsu.com>
Cc: Tang Chen <tangchen@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory_hotplug.c | 68 | ||||
-rw-r--r-- | mm/sparse.c | 72 |
2 files changed, 74 insertions, 66 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index c916582591eb..60f6daad1076 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -436,6 +436,40 @@ static int __meminit __add_section(int nid, struct zone *zone, | |||
436 | return register_new_memory(nid, __pfn_to_section(phys_start_pfn)); | 436 | return register_new_memory(nid, __pfn_to_section(phys_start_pfn)); |
437 | } | 437 | } |
438 | 438 | ||
439 | /* | ||
440 | * Reasonably generic function for adding memory. It is | ||
441 | * expected that archs that support memory hotplug will | ||
442 | * call this function after deciding the zone to which to | ||
443 | * add the new pages. | ||
444 | */ | ||
445 | int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn, | ||
446 | unsigned long nr_pages) | ||
447 | { | ||
448 | unsigned long i; | ||
449 | int err = 0; | ||
450 | int start_sec, end_sec; | ||
451 | /* during initialize mem_map, align hot-added range to section */ | ||
452 | start_sec = pfn_to_section_nr(phys_start_pfn); | ||
453 | end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); | ||
454 | |||
455 | for (i = start_sec; i <= end_sec; i++) { | ||
456 | err = __add_section(nid, zone, i << PFN_SECTION_SHIFT); | ||
457 | |||
458 | /* | ||
459 | * EEXIST is finally dealt with by ioresource collision | ||
460 | * check. see add_memory() => register_memory_resource() | ||
461 | * Warning will be printed if there is collision. | ||
462 | */ | ||
463 | if (err && (err != -EEXIST)) | ||
464 | break; | ||
465 | err = 0; | ||
466 | } | ||
467 | |||
468 | return err; | ||
469 | } | ||
470 | EXPORT_SYMBOL_GPL(__add_pages); | ||
471 | |||
472 | #ifdef CONFIG_MEMORY_HOTREMOVE | ||
439 | /* find the smallest valid pfn in the range [start_pfn, end_pfn) */ | 473 | /* find the smallest valid pfn in the range [start_pfn, end_pfn) */ |
440 | static int find_smallest_section_pfn(int nid, struct zone *zone, | 474 | static int find_smallest_section_pfn(int nid, struct zone *zone, |
441 | unsigned long start_pfn, | 475 | unsigned long start_pfn, |
@@ -658,39 +692,6 @@ static int __remove_section(struct zone *zone, struct mem_section *ms) | |||
658 | return 0; | 692 | return 0; |
659 | } | 693 | } |
660 | 694 | ||
661 | /* | ||
662 | * Reasonably generic function for adding memory. It is | ||
663 | * expected that archs that support memory hotplug will | ||
664 | * call this function after deciding the zone to which to | ||
665 | * add the new pages. | ||
666 | */ | ||
667 | int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn, | ||
668 | unsigned long nr_pages) | ||
669 | { | ||
670 | unsigned long i; | ||
671 | int err = 0; | ||
672 | int start_sec, end_sec; | ||
673 | /* during initialize mem_map, align hot-added range to section */ | ||
674 | start_sec = pfn_to_section_nr(phys_start_pfn); | ||
675 | end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); | ||
676 | |||
677 | for (i = start_sec; i <= end_sec; i++) { | ||
678 | err = __add_section(nid, zone, i << PFN_SECTION_SHIFT); | ||
679 | |||
680 | /* | ||
681 | * EEXIST is finally dealt with by ioresource collision | ||
682 | * check. see add_memory() => register_memory_resource() | ||
683 | * Warning will be printed if there is collision. | ||
684 | */ | ||
685 | if (err && (err != -EEXIST)) | ||
686 | break; | ||
687 | err = 0; | ||
688 | } | ||
689 | |||
690 | return err; | ||
691 | } | ||
692 | EXPORT_SYMBOL_GPL(__add_pages); | ||
693 | |||
694 | /** | 695 | /** |
695 | * __remove_pages() - remove sections of pages from a zone | 696 | * __remove_pages() - remove sections of pages from a zone |
696 | * @zone: zone from which pages need to be removed | 697 | * @zone: zone from which pages need to be removed |
@@ -733,6 +734,7 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, | |||
733 | return ret; | 734 | return ret; |
734 | } | 735 | } |
735 | EXPORT_SYMBOL_GPL(__remove_pages); | 736 | EXPORT_SYMBOL_GPL(__remove_pages); |
737 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | ||
736 | 738 | ||
737 | int set_online_page_callback(online_page_callback_t callback) | 739 | int set_online_page_callback(online_page_callback_t callback) |
738 | { | 740 | { |
diff --git a/mm/sparse.c b/mm/sparse.c index a37be5f9050d..1c91f0d3f6ab 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -620,6 +620,7 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) | |||
620 | 620 | ||
621 | vmemmap_free(start, end); | 621 | vmemmap_free(start, end); |
622 | } | 622 | } |
623 | #ifdef CONFIG_MEMORY_HOTREMOVE | ||
623 | static void free_map_bootmem(struct page *memmap, unsigned long nr_pages) | 624 | static void free_map_bootmem(struct page *memmap, unsigned long nr_pages) |
624 | { | 625 | { |
625 | unsigned long start = (unsigned long)memmap; | 626 | unsigned long start = (unsigned long)memmap; |
@@ -627,6 +628,7 @@ static void free_map_bootmem(struct page *memmap, unsigned long nr_pages) | |||
627 | 628 | ||
628 | vmemmap_free(start, end); | 629 | vmemmap_free(start, end); |
629 | } | 630 | } |
631 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | ||
630 | #else | 632 | #else |
631 | static struct page *__kmalloc_section_memmap(unsigned long nr_pages) | 633 | static struct page *__kmalloc_section_memmap(unsigned long nr_pages) |
632 | { | 634 | { |
@@ -664,6 +666,7 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) | |||
664 | get_order(sizeof(struct page) * nr_pages)); | 666 | get_order(sizeof(struct page) * nr_pages)); |
665 | } | 667 | } |
666 | 668 | ||
669 | #ifdef CONFIG_MEMORY_HOTREMOVE | ||
667 | static void free_map_bootmem(struct page *memmap, unsigned long nr_pages) | 670 | static void free_map_bootmem(struct page *memmap, unsigned long nr_pages) |
668 | { | 671 | { |
669 | unsigned long maps_section_nr, removing_section_nr, i; | 672 | unsigned long maps_section_nr, removing_section_nr, i; |
@@ -690,40 +693,9 @@ static void free_map_bootmem(struct page *memmap, unsigned long nr_pages) | |||
690 | put_page_bootmem(page); | 693 | put_page_bootmem(page); |
691 | } | 694 | } |
692 | } | 695 | } |
696 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | ||
693 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | 697 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
694 | 698 | ||
695 | static void free_section_usemap(struct page *memmap, unsigned long *usemap) | ||
696 | { | ||
697 | struct page *usemap_page; | ||
698 | unsigned long nr_pages; | ||
699 | |||
700 | if (!usemap) | ||
701 | return; | ||
702 | |||
703 | usemap_page = virt_to_page(usemap); | ||
704 | /* | ||
705 | * Check to see if allocation came from hot-plug-add | ||
706 | */ | ||
707 | if (PageSlab(usemap_page) || PageCompound(usemap_page)) { | ||
708 | kfree(usemap); | ||
709 | if (memmap) | ||
710 | __kfree_section_memmap(memmap, PAGES_PER_SECTION); | ||
711 | return; | ||
712 | } | ||
713 | |||
714 | /* | ||
715 | * The usemap came from bootmem. This is packed with other usemaps | ||
716 | * on the section which has pgdat at boot time. Just keep it as is now. | ||
717 | */ | ||
718 | |||
719 | if (memmap) { | ||
720 | nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) | ||
721 | >> PAGE_SHIFT; | ||
722 | |||
723 | free_map_bootmem(memmap, nr_pages); | ||
724 | } | ||
725 | } | ||
726 | |||
727 | /* | 699 | /* |
728 | * returns the number of sections whose mem_maps were properly | 700 | * returns the number of sections whose mem_maps were properly |
729 | * set. If this is <=0, then that means that the passed-in | 701 | * set. If this is <=0, then that means that the passed-in |
@@ -800,6 +772,39 @@ static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) | |||
800 | } | 772 | } |
801 | #endif | 773 | #endif |
802 | 774 | ||
775 | #ifdef CONFIG_MEMORY_HOTREMOVE | ||
776 | static void free_section_usemap(struct page *memmap, unsigned long *usemap) | ||
777 | { | ||
778 | struct page *usemap_page; | ||
779 | unsigned long nr_pages; | ||
780 | |||
781 | if (!usemap) | ||
782 | return; | ||
783 | |||
784 | usemap_page = virt_to_page(usemap); | ||
785 | /* | ||
786 | * Check to see if allocation came from hot-plug-add | ||
787 | */ | ||
788 | if (PageSlab(usemap_page) || PageCompound(usemap_page)) { | ||
789 | kfree(usemap); | ||
790 | if (memmap) | ||
791 | __kfree_section_memmap(memmap, PAGES_PER_SECTION); | ||
792 | return; | ||
793 | } | ||
794 | |||
795 | /* | ||
796 | * The usemap came from bootmem. This is packed with other usemaps | ||
797 | * on the section which has pgdat at boot time. Just keep it as is now. | ||
798 | */ | ||
799 | |||
800 | if (memmap) { | ||
801 | nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) | ||
802 | >> PAGE_SHIFT; | ||
803 | |||
804 | free_map_bootmem(memmap, nr_pages); | ||
805 | } | ||
806 | } | ||
807 | |||
803 | void sparse_remove_one_section(struct zone *zone, struct mem_section *ms) | 808 | void sparse_remove_one_section(struct zone *zone, struct mem_section *ms) |
804 | { | 809 | { |
805 | struct page *memmap = NULL; | 810 | struct page *memmap = NULL; |
@@ -819,4 +824,5 @@ void sparse_remove_one_section(struct zone *zone, struct mem_section *ms) | |||
819 | clear_hwpoisoned_pages(memmap, PAGES_PER_SECTION); | 824 | clear_hwpoisoned_pages(memmap, PAGES_PER_SECTION); |
820 | free_section_usemap(memmap, usemap); | 825 | free_section_usemap(memmap, usemap); |
821 | } | 826 | } |
822 | #endif | 827 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
828 | #endif /* CONFIG_MEMORY_HOTPLUG */ | ||