aboutsummaryrefslogtreecommitdiffstats
path: root/mm/sparse.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-02-06 13:41:33 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-02-06 13:41:33 -0500
commit3ff1b28caaff1d66d2be7e6eb7c56f78e9046fbb (patch)
tree32d75a6db7f4985d37a9cfb7f1a1270963cfa404 /mm/sparse.c
parent105cf3c8c6264dce4bcdab877feb8037bc4109b1 (diff)
parentee95f4059a833839bf52972191b2d4c3d3cec552 (diff)
Merge tag 'libnvdimm-for-4.16' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull libnvdimm updates from Ross Zwisler: - Require struct page by default for filesystem DAX to remove a number of surprising failure cases. This includes failures with direct I/O, gdb and fork(2). - Add support for the new Platform Capabilities Structure added to the NFIT in ACPI 6.2a. This new table tells us whether the platform supports flushing of CPU and memory controller caches on unexpected power loss events. - Revamp vmem_altmap and dev_pagemap handling to clean up code and better support future future PCI P2P uses. - Deprecate the ND_IOCTL_SMART_THRESHOLD command whose payload has become out-of-sync with recent versions of the NVDIMM_FAMILY_INTEL spec, and instead rely on the generic ND_CMD_CALL approach used by the two other IOCTL families, NVDIMM_FAMILY_{HPE,MSFT}. - Enhance nfit_test so we can test some of the new things added in version 1.6 of the DSM specification. This includes testing firmware download and simulating the Last Shutdown State (LSS) status. * tag 'libnvdimm-for-4.16' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: (37 commits) libnvdimm, namespace: remove redundant initialization of 'nd_mapping' acpi, nfit: fix register dimm error handling libnvdimm, namespace: make min namespace size 4K tools/testing/nvdimm: force nfit_test to depend on instrumented modules libnvdimm/nfit_test: adding support for unit testing enable LSS status libnvdimm/nfit_test: add firmware download emulation nfit-test: Add platform cap support from ACPI 6.2a to test libnvdimm: expose platform persistence attribute for nd_region acpi: nfit: add persistent memory control flag for nd_region acpi: nfit: Add support for detect platform CPU cache flush on power loss device-dax: Fix trailing semicolon libnvdimm, btt: fix uninitialized err_lock dax: require 'struct page' by default for filesystem dax ext2: auto disable dax instead of failing mount ext4: auto disable dax instead of failing mount mm, dax: introduce pfn_t_special() mm: Fix devm_memremap_pages() collision handling mm: Fix memory size alignment in devm_memremap_pages_release() memremap: merge find_dev_pagemap into get_dev_pagemap memremap: change devm_memremap_pages interface to use struct dev_pagemap ...
Diffstat (limited to 'mm/sparse.c')
-rw-r--r--mm/sparse.c43
1 files changed, 25 insertions, 18 deletions
diff --git a/mm/sparse.c b/mm/sparse.c
index 6b8b5e91ceef..7af5e7a92528 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -421,7 +421,8 @@ static void __init sparse_early_usemaps_alloc_node(void *data,
421} 421}
422 422
423#ifndef CONFIG_SPARSEMEM_VMEMMAP 423#ifndef CONFIG_SPARSEMEM_VMEMMAP
424struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) 424struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid,
425 struct vmem_altmap *altmap)
425{ 426{
426 struct page *map; 427 struct page *map;
427 unsigned long size; 428 unsigned long size;
@@ -476,7 +477,7 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
476 477
477 if (!present_section_nr(pnum)) 478 if (!present_section_nr(pnum))
478 continue; 479 continue;
479 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid); 480 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid, NULL);
480 if (map_map[pnum]) 481 if (map_map[pnum])
481 continue; 482 continue;
482 ms = __nr_to_section(pnum); 483 ms = __nr_to_section(pnum);
@@ -504,7 +505,7 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
504 struct mem_section *ms = __nr_to_section(pnum); 505 struct mem_section *ms = __nr_to_section(pnum);
505 int nid = sparse_early_nid(ms); 506 int nid = sparse_early_nid(ms);
506 507
507 map = sparse_mem_map_populate(pnum, nid); 508 map = sparse_mem_map_populate(pnum, nid, NULL);
508 if (map) 509 if (map)
509 return map; 510 return map;
510 511
@@ -682,17 +683,19 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
682#endif 683#endif
683 684
684#ifdef CONFIG_SPARSEMEM_VMEMMAP 685#ifdef CONFIG_SPARSEMEM_VMEMMAP
685static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid) 686static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
687 struct vmem_altmap *altmap)
686{ 688{
687 /* This will make the necessary allocations eventually. */ 689 /* This will make the necessary allocations eventually. */
688 return sparse_mem_map_populate(pnum, nid); 690 return sparse_mem_map_populate(pnum, nid, altmap);
689} 691}
690static void __kfree_section_memmap(struct page *memmap) 692static void __kfree_section_memmap(struct page *memmap,
693 struct vmem_altmap *altmap)
691{ 694{
692 unsigned long start = (unsigned long)memmap; 695 unsigned long start = (unsigned long)memmap;
693 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); 696 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
694 697
695 vmemmap_free(start, end); 698 vmemmap_free(start, end, altmap);
696} 699}
697#ifdef CONFIG_MEMORY_HOTREMOVE 700#ifdef CONFIG_MEMORY_HOTREMOVE
698static void free_map_bootmem(struct page *memmap) 701static void free_map_bootmem(struct page *memmap)
@@ -700,7 +703,7 @@ static void free_map_bootmem(struct page *memmap)
700 unsigned long start = (unsigned long)memmap; 703 unsigned long start = (unsigned long)memmap;
701 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); 704 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
702 705
703 vmemmap_free(start, end); 706 vmemmap_free(start, end, NULL);
704} 707}
705#endif /* CONFIG_MEMORY_HOTREMOVE */ 708#endif /* CONFIG_MEMORY_HOTREMOVE */
706#else 709#else
@@ -725,12 +728,14 @@ got_map_ptr:
725 return ret; 728 return ret;
726} 729}
727 730
728static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid) 731static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
732 struct vmem_altmap *altmap)
729{ 733{
730 return __kmalloc_section_memmap(); 734 return __kmalloc_section_memmap();
731} 735}
732 736
733static void __kfree_section_memmap(struct page *memmap) 737static void __kfree_section_memmap(struct page *memmap,
738 struct vmem_altmap *altmap)
734{ 739{
735 if (is_vmalloc_addr(memmap)) 740 if (is_vmalloc_addr(memmap))
736 vfree(memmap); 741 vfree(memmap);
@@ -777,7 +782,8 @@ static void free_map_bootmem(struct page *memmap)
777 * set. If this is <=0, then that means that the passed-in 782 * set. If this is <=0, then that means that the passed-in
778 * map was not consumed and must be freed. 783 * map was not consumed and must be freed.
779 */ 784 */
780int __meminit sparse_add_one_section(struct pglist_data *pgdat, unsigned long start_pfn) 785int __meminit sparse_add_one_section(struct pglist_data *pgdat,
786 unsigned long start_pfn, struct vmem_altmap *altmap)
781{ 787{
782 unsigned long section_nr = pfn_to_section_nr(start_pfn); 788 unsigned long section_nr = pfn_to_section_nr(start_pfn);
783 struct mem_section *ms; 789 struct mem_section *ms;
@@ -793,12 +799,12 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat, unsigned long st
793 ret = sparse_index_init(section_nr, pgdat->node_id); 799 ret = sparse_index_init(section_nr, pgdat->node_id);
794 if (ret < 0 && ret != -EEXIST) 800 if (ret < 0 && ret != -EEXIST)
795 return ret; 801 return ret;
796 memmap = kmalloc_section_memmap(section_nr, pgdat->node_id); 802 memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, altmap);
797 if (!memmap) 803 if (!memmap)
798 return -ENOMEM; 804 return -ENOMEM;
799 usemap = __kmalloc_section_usemap(); 805 usemap = __kmalloc_section_usemap();
800 if (!usemap) { 806 if (!usemap) {
801 __kfree_section_memmap(memmap); 807 __kfree_section_memmap(memmap, altmap);
802 return -ENOMEM; 808 return -ENOMEM;
803 } 809 }
804 810
@@ -820,7 +826,7 @@ out:
820 pgdat_resize_unlock(pgdat, &flags); 826 pgdat_resize_unlock(pgdat, &flags);
821 if (ret <= 0) { 827 if (ret <= 0) {
822 kfree(usemap); 828 kfree(usemap);
823 __kfree_section_memmap(memmap); 829 __kfree_section_memmap(memmap, altmap);
824 } 830 }
825 return ret; 831 return ret;
826} 832}
@@ -847,7 +853,8 @@ static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
847} 853}
848#endif 854#endif
849 855
850static void free_section_usemap(struct page *memmap, unsigned long *usemap) 856static void free_section_usemap(struct page *memmap, unsigned long *usemap,
857 struct vmem_altmap *altmap)
851{ 858{
852 struct page *usemap_page; 859 struct page *usemap_page;
853 860
@@ -861,7 +868,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
861 if (PageSlab(usemap_page) || PageCompound(usemap_page)) { 868 if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
862 kfree(usemap); 869 kfree(usemap);
863 if (memmap) 870 if (memmap)
864 __kfree_section_memmap(memmap); 871 __kfree_section_memmap(memmap, altmap);
865 return; 872 return;
866 } 873 }
867 874
@@ -875,7 +882,7 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
875} 882}
876 883
877void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, 884void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
878 unsigned long map_offset) 885 unsigned long map_offset, struct vmem_altmap *altmap)
879{ 886{
880 struct page *memmap = NULL; 887 struct page *memmap = NULL;
881 unsigned long *usemap = NULL, flags; 888 unsigned long *usemap = NULL, flags;
@@ -893,7 +900,7 @@ void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
893 900
894 clear_hwpoisoned_pages(memmap + map_offset, 901 clear_hwpoisoned_pages(memmap + map_offset,
895 PAGES_PER_SECTION - map_offset); 902 PAGES_PER_SECTION - map_offset);
896 free_section_usemap(memmap, usemap); 903 free_section_usemap(memmap, usemap, altmap);
897} 904}
898#endif /* CONFIG_MEMORY_HOTREMOVE */ 905#endif /* CONFIG_MEMORY_HOTREMOVE */
899#endif /* CONFIG_MEMORY_HOTPLUG */ 906#endif /* CONFIG_MEMORY_HOTPLUG */