aboutsummaryrefslogtreecommitdiffstats
path: root/mm/sparse.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/sparse.c')
-rw-r--r--mm/sparse.c43
1 files changed, 25 insertions, 18 deletions
diff --git a/mm/sparse.c b/mm/sparse.c
index 2609aba121e8..2583174b1d62 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -417,7 +417,8 @@ static void __init sparse_early_usemaps_alloc_node(void *data,
417} 417}
418 418
419#ifndef CONFIG_SPARSEMEM_VMEMMAP 419#ifndef CONFIG_SPARSEMEM_VMEMMAP
420struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) 420struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid,
421 struct vmem_altmap *altmap)
421{ 422{
422 struct page *map; 423 struct page *map;
423 unsigned long size; 424 unsigned long size;
@@ -472,7 +473,7 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
472 473
473 if (!present_section_nr(pnum)) 474 if (!present_section_nr(pnum))
474 continue; 475 continue;
475 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid); 476 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid, NULL);
476 if (map_map[pnum]) 477 if (map_map[pnum])
477 continue; 478 continue;
478 ms = __nr_to_section(pnum); 479 ms = __nr_to_section(pnum);
@@ -500,7 +501,7 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
500 struct mem_section *ms = __nr_to_section(pnum); 501 struct mem_section *ms = __nr_to_section(pnum);
501 int nid = sparse_early_nid(ms); 502 int nid = sparse_early_nid(ms);
502 503
503 map = sparse_mem_map_populate(pnum, nid); 504 map = sparse_mem_map_populate(pnum, nid, NULL);
504 if (map) 505 if (map)
505 return map; 506 return map;
506 507
@@ -678,17 +679,19 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
678#endif 679#endif
679 680
680#ifdef CONFIG_SPARSEMEM_VMEMMAP 681#ifdef CONFIG_SPARSEMEM_VMEMMAP
681static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid) 682static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
683 struct vmem_altmap *altmap)
682{ 684{
683 /* This will make the necessary allocations eventually. */ 685 /* This will make the necessary allocations eventually. */
684 return sparse_mem_map_populate(pnum, nid); 686 return sparse_mem_map_populate(pnum, nid, altmap);
685} 687}
686static void __kfree_section_memmap(struct page *memmap) 688static void __kfree_section_memmap(struct page *memmap,
689 struct vmem_altmap *altmap)
687{ 690{
688 unsigned long start = (unsigned long)memmap; 691 unsigned long start = (unsigned long)memmap;
689 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); 692 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
690 693
691 vmemmap_free(start, end); 694 vmemmap_free(start, end, altmap);
692} 695}
693#ifdef CONFIG_MEMORY_HOTREMOVE 696#ifdef CONFIG_MEMORY_HOTREMOVE
694static void free_map_bootmem(struct page *memmap) 697static void free_map_bootmem(struct page *memmap)
@@ -696,7 +699,7 @@ static void free_map_bootmem(struct page *memmap)
696 unsigned long start = (unsigned long)memmap; 699 unsigned long start = (unsigned long)memmap;
697 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); 700 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
698 701
699 vmemmap_free(start, end); 702 vmemmap_free(start, end, NULL);
700} 703}
701#endif /* CONFIG_MEMORY_HOTREMOVE */ 704#endif /* CONFIG_MEMORY_HOTREMOVE */
702#else 705#else
@@ -721,12 +724,14 @@ got_map_ptr:
721 return ret; 724 return ret;
722} 725}
723 726
724static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid) 727static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
728 struct vmem_altmap *altmap)
725{ 729{
726 return __kmalloc_section_memmap(); 730 return __kmalloc_section_memmap();
727} 731}
728 732
729static void __kfree_section_memmap(struct page *memmap) 733static void __kfree_section_memmap(struct page *memmap,
734 struct vmem_altmap *altmap)
730{ 735{
731 if (is_vmalloc_addr(memmap)) 736 if (is_vmalloc_addr(memmap))
732 vfree(memmap); 737 vfree(memmap);
@@ -773,7 +778,8 @@ static void free_map_bootmem(struct page *memmap)
773 * set. If this is <=0, then that means that the passed-in 778 * set. If this is <=0, then that means that the passed-in
774 * map was not consumed and must be freed. 779 * map was not consumed and must be freed.
775 */ 780 */
776int __meminit sparse_add_one_section(struct pglist_data *pgdat, unsigned long start_pfn) 781int __meminit sparse_add_one_section(struct pglist_data *pgdat,
782 unsigned long start_pfn, struct vmem_altmap *altmap)
777{ 783{
778 unsigned long section_nr = pfn_to_section_nr(start_pfn); 784 unsigned long section_nr = pfn_to_section_nr(start_pfn);
779 struct mem_section *ms; 785 struct mem_section *ms;
@@ -789,12 +795,12 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat, unsigned long st
789 ret = sparse_index_init(section_nr, pgdat->node_id); 795 ret = sparse_index_init(section_nr, pgdat->node_id);
790 if (ret < 0 && ret != -EEXIST) 796 if (ret < 0 && ret != -EEXIST)
791 return ret; 797 return ret;
792 memmap = kmalloc_section_memmap(section_nr, pgdat->node_id); 798 memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, altmap);
793 if (!memmap) 799 if (!memmap)
794 return -ENOMEM; 800 return -ENOMEM;
795 usemap = __kmalloc_section_usemap(); 801 usemap = __kmalloc_section_usemap();
796 if (!usemap) { 802 if (!usemap) {
797 __kfree_section_memmap(memmap); 803 __kfree_section_memmap(memmap, altmap);
798 return -ENOMEM; 804 return -ENOMEM;
799 } 805 }
800 806
@@ -816,7 +822,7 @@ out:
816 pgdat_resize_unlock(pgdat, &flags); 822 pgdat_resize_unlock(pgdat, &flags);
817 if (ret <= 0) { 823 if (ret <= 0) {
818 kfree(usemap); 824 kfree(usemap);
819 __kfree_section_memmap(memmap); 825 __kfree_section_memmap(memmap, altmap);
820 } 826 }
821 return ret; 827 return ret;
822} 828}
@@ -843,7 +849,8 @@ static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
843} 849}
844#endif 850#endif
845 851
846static void free_section_usemap(struct page *memmap, unsigned long *usemap) 852static void free_section_usemap(struct page *memmap, unsigned long *usemap,
853 struct vmem_altmap *altmap)
847{ 854{
848 struct page *usemap_page; 855 struct page *usemap_page;
849 856
@@ -857,7 +864,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
857 if (PageSlab(usemap_page) || PageCompound(usemap_page)) { 864 if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
858 kfree(usemap); 865 kfree(usemap);
859 if (memmap) 866 if (memmap)
860 __kfree_section_memmap(memmap); 867 __kfree_section_memmap(memmap, altmap);
861 return; 868 return;
862 } 869 }
863 870
@@ -871,7 +878,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
871} 878}
872 879
873void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, 880void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
874 unsigned long map_offset) 881 unsigned long map_offset, struct vmem_altmap *altmap)
875{ 882{
876 struct page *memmap = NULL; 883 struct page *memmap = NULL;
877 unsigned long *usemap = NULL, flags; 884 unsigned long *usemap = NULL, flags;
@@ -889,7 +896,7 @@ void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
889 896
890 clear_hwpoisoned_pages(memmap + map_offset, 897 clear_hwpoisoned_pages(memmap + map_offset,
891 PAGES_PER_SECTION - map_offset); 898 PAGES_PER_SECTION - map_offset);
892 free_section_usemap(memmap, usemap); 899 free_section_usemap(memmap, usemap, altmap);
893} 900}
894#endif /* CONFIG_MEMORY_HOTREMOVE */ 901#endif /* CONFIG_MEMORY_HOTREMOVE */
895#endif /* CONFIG_MEMORY_HOTPLUG */ 902#endif /* CONFIG_MEMORY_HOTPLUG */