aboutsummaryrefslogtreecommitdiffstats
path: root/mm/sparse.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/sparse.c')
-rw-r--r--mm/sparse.c53
1 files changed, 24 insertions, 29 deletions
diff --git a/mm/sparse.c b/mm/sparse.c
index 4ac1d7ef548f..8cc7be0e9590 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -590,33 +590,32 @@ void __init sparse_init(void)
590 590
591#ifdef CONFIG_MEMORY_HOTPLUG 591#ifdef CONFIG_MEMORY_HOTPLUG
592#ifdef CONFIG_SPARSEMEM_VMEMMAP 592#ifdef CONFIG_SPARSEMEM_VMEMMAP
593static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, 593static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
594 unsigned long nr_pages)
595{ 594{
596 /* This will make the necessary allocations eventually. */ 595 /* This will make the necessary allocations eventually. */
597 return sparse_mem_map_populate(pnum, nid); 596 return sparse_mem_map_populate(pnum, nid);
598} 597}
599static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) 598static void __kfree_section_memmap(struct page *memmap)
600{ 599{
601 unsigned long start = (unsigned long)memmap; 600 unsigned long start = (unsigned long)memmap;
602 unsigned long end = (unsigned long)(memmap + nr_pages); 601 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
603 602
604 vmemmap_free(start, end); 603 vmemmap_free(start, end);
605} 604}
606#ifdef CONFIG_MEMORY_HOTREMOVE 605#ifdef CONFIG_MEMORY_HOTREMOVE
607static void free_map_bootmem(struct page *memmap, unsigned long nr_pages) 606static void free_map_bootmem(struct page *memmap)
608{ 607{
609 unsigned long start = (unsigned long)memmap; 608 unsigned long start = (unsigned long)memmap;
610 unsigned long end = (unsigned long)(memmap + nr_pages); 609 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
611 610
612 vmemmap_free(start, end); 611 vmemmap_free(start, end);
613} 612}
614#endif /* CONFIG_MEMORY_HOTREMOVE */ 613#endif /* CONFIG_MEMORY_HOTREMOVE */
615#else 614#else
616static struct page *__kmalloc_section_memmap(unsigned long nr_pages) 615static struct page *__kmalloc_section_memmap(void)
617{ 616{
618 struct page *page, *ret; 617 struct page *page, *ret;
619 unsigned long memmap_size = sizeof(struct page) * nr_pages; 618 unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION;
620 619
621 page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); 620 page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
622 if (page) 621 if (page)
@@ -634,28 +633,30 @@ got_map_ptr:
634 return ret; 633 return ret;
635} 634}
636 635
637static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, 636static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
638 unsigned long nr_pages)
639{ 637{
640 return __kmalloc_section_memmap(nr_pages); 638 return __kmalloc_section_memmap();
641} 639}
642 640
643static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) 641static void __kfree_section_memmap(struct page *memmap)
644{ 642{
645 if (is_vmalloc_addr(memmap)) 643 if (is_vmalloc_addr(memmap))
646 vfree(memmap); 644 vfree(memmap);
647 else 645 else
648 free_pages((unsigned long)memmap, 646 free_pages((unsigned long)memmap,
649 get_order(sizeof(struct page) * nr_pages)); 647 get_order(sizeof(struct page) * PAGES_PER_SECTION));
650} 648}
651 649
652#ifdef CONFIG_MEMORY_HOTREMOVE 650#ifdef CONFIG_MEMORY_HOTREMOVE
653static void free_map_bootmem(struct page *memmap, unsigned long nr_pages) 651static void free_map_bootmem(struct page *memmap)
654{ 652{
655 unsigned long maps_section_nr, removing_section_nr, i; 653 unsigned long maps_section_nr, removing_section_nr, i;
656 unsigned long magic; 654 unsigned long magic, nr_pages;
657 struct page *page = virt_to_page(memmap); 655 struct page *page = virt_to_page(memmap);
658 656
657 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
658 >> PAGE_SHIFT;
659
659 for (i = 0; i < nr_pages; i++, page++) { 660 for (i = 0; i < nr_pages; i++, page++) {
660 magic = (unsigned long) page->lru.next; 661 magic = (unsigned long) page->lru.next;
661 662
@@ -684,8 +685,7 @@ static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
684 * set. If this is <=0, then that means that the passed-in 685 * set. If this is <=0, then that means that the passed-in
685 * map was not consumed and must be freed. 686 * map was not consumed and must be freed.
686 */ 687 */
687int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn, 688int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn)
688 int nr_pages)
689{ 689{
690 unsigned long section_nr = pfn_to_section_nr(start_pfn); 690 unsigned long section_nr = pfn_to_section_nr(start_pfn);
691 struct pglist_data *pgdat = zone->zone_pgdat; 691 struct pglist_data *pgdat = zone->zone_pgdat;
@@ -702,12 +702,12 @@ int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
702 ret = sparse_index_init(section_nr, pgdat->node_id); 702 ret = sparse_index_init(section_nr, pgdat->node_id);
703 if (ret < 0 && ret != -EEXIST) 703 if (ret < 0 && ret != -EEXIST)
704 return ret; 704 return ret;
705 memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages); 705 memmap = kmalloc_section_memmap(section_nr, pgdat->node_id);
706 if (!memmap) 706 if (!memmap)
707 return -ENOMEM; 707 return -ENOMEM;
708 usemap = __kmalloc_section_usemap(); 708 usemap = __kmalloc_section_usemap();
709 if (!usemap) { 709 if (!usemap) {
710 __kfree_section_memmap(memmap, nr_pages); 710 __kfree_section_memmap(memmap);
711 return -ENOMEM; 711 return -ENOMEM;
712 } 712 }
713 713
@@ -719,7 +719,7 @@ int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
719 goto out; 719 goto out;
720 } 720 }
721 721
722 memset(memmap, 0, sizeof(struct page) * nr_pages); 722 memset(memmap, 0, sizeof(struct page) * PAGES_PER_SECTION);
723 723
724 ms->section_mem_map |= SECTION_MARKED_PRESENT; 724 ms->section_mem_map |= SECTION_MARKED_PRESENT;
725 725
@@ -729,7 +729,7 @@ out:
729 pgdat_resize_unlock(pgdat, &flags); 729 pgdat_resize_unlock(pgdat, &flags);
730 if (ret <= 0) { 730 if (ret <= 0) {
731 kfree(usemap); 731 kfree(usemap);
732 __kfree_section_memmap(memmap, nr_pages); 732 __kfree_section_memmap(memmap);
733 } 733 }
734 return ret; 734 return ret;
735} 735}
@@ -759,7 +759,6 @@ static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
759static void free_section_usemap(struct page *memmap, unsigned long *usemap) 759static void free_section_usemap(struct page *memmap, unsigned long *usemap)
760{ 760{
761 struct page *usemap_page; 761 struct page *usemap_page;
762 unsigned long nr_pages;
763 762
764 if (!usemap) 763 if (!usemap)
765 return; 764 return;
@@ -771,7 +770,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
771 if (PageSlab(usemap_page) || PageCompound(usemap_page)) { 770 if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
772 kfree(usemap); 771 kfree(usemap);
773 if (memmap) 772 if (memmap)
774 __kfree_section_memmap(memmap, PAGES_PER_SECTION); 773 __kfree_section_memmap(memmap);
775 return; 774 return;
776 } 775 }
777 776
@@ -780,12 +779,8 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
780 * on the section which has pgdat at boot time. Just keep it as is now. 779 * on the section which has pgdat at boot time. Just keep it as is now.
781 */ 780 */
782 781
783 if (memmap) { 782 if (memmap)
784 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) 783 free_map_bootmem(memmap);
785 >> PAGE_SHIFT;
786
787 free_map_bootmem(memmap, nr_pages);
788 }
789} 784}
790 785
791void sparse_remove_one_section(struct zone *zone, struct mem_section *ms) 786void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)