aboutsummaryrefslogtreecommitdiffstats
path: root/mm/sparse-vmemmap.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-02-06 13:41:33 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-02-06 13:41:33 -0500
commit3ff1b28caaff1d66d2be7e6eb7c56f78e9046fbb (patch)
tree32d75a6db7f4985d37a9cfb7f1a1270963cfa404 /mm/sparse-vmemmap.c
parent105cf3c8c6264dce4bcdab877feb8037bc4109b1 (diff)
parentee95f4059a833839bf52972191b2d4c3d3cec552 (diff)
Merge tag 'libnvdimm-for-4.16' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull libnvdimm updates from Ross Zwisler: - Require struct page by default for filesystem DAX to remove a number of surprising failure cases. This includes failures with direct I/O, gdb and fork(2). - Add support for the new Platform Capabilities Structure added to the NFIT in ACPI 6.2a. This new table tells us whether the platform supports flushing of CPU and memory controller caches on unexpected power loss events. - Revamp vmem_altmap and dev_pagemap handling to clean up code and better support future future PCI P2P uses. - Deprecate the ND_IOCTL_SMART_THRESHOLD command whose payload has become out-of-sync with recent versions of the NVDIMM_FAMILY_INTEL spec, and instead rely on the generic ND_CMD_CALL approach used by the two other IOCTL families, NVDIMM_FAMILY_{HPE,MSFT}. - Enhance nfit_test so we can test some of the new things added in version 1.6 of the DSM specification. This includes testing firmware download and simulating the Last Shutdown State (LSS) status. * tag 'libnvdimm-for-4.16' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: (37 commits) libnvdimm, namespace: remove redundant initialization of 'nd_mapping' acpi, nfit: fix register dimm error handling libnvdimm, namespace: make min namespace size 4K tools/testing/nvdimm: force nfit_test to depend on instrumented modules libnvdimm/nfit_test: adding support for unit testing enable LSS status libnvdimm/nfit_test: add firmware download emulation nfit-test: Add platform cap support from ACPI 6.2a to test libnvdimm: expose platform persistence attribute for nd_region acpi: nfit: add persistent memory control flag for nd_region acpi: nfit: Add support for detect platform CPU cache flush on power loss device-dax: Fix trailing semicolon libnvdimm, btt: fix uninitialized err_lock dax: require 'struct page' by default for filesystem dax ext2: auto disable dax instead of failing mount ext4: auto disable dax instead of failing mount mm, dax: introduce pfn_t_special() mm: Fix devm_memremap_pages() collision handling mm: Fix memory size alignment in devm_memremap_pages_release() memremap: merge find_dev_pagemap into get_dev_pagemap memremap: change devm_memremap_pages interface to use struct dev_pagemap ...
Diffstat (limited to 'mm/sparse-vmemmap.c')
-rw-r--r--mm/sparse-vmemmap.c67
1 files changed, 23 insertions, 44 deletions
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 17acf01791fa..bd0276d5f66b 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -74,7 +74,7 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node)
74} 74}
75 75
76/* need to make sure size is all the same during early stage */ 76/* need to make sure size is all the same during early stage */
77static void * __meminit alloc_block_buf(unsigned long size, int node) 77void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node)
78{ 78{
79 void *ptr; 79 void *ptr;
80 80
@@ -107,33 +107,16 @@ static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
107} 107}
108 108
109/** 109/**
110 * vmem_altmap_alloc - allocate pages from the vmem_altmap reservation 110 * altmap_alloc_block_buf - allocate pages from the device page map
111 * @altmap - reserved page pool for the allocation 111 * @altmap: device page map
112 * @nr_pfns - size (in pages) of the allocation 112 * @size: size (in bytes) of the allocation
113 * 113 *
114 * Allocations are aligned to the size of the request 114 * Allocations are aligned to the size of the request.
115 */ 115 */
116static unsigned long __meminit vmem_altmap_alloc(struct vmem_altmap *altmap, 116void * __meminit altmap_alloc_block_buf(unsigned long size,
117 unsigned long nr_pfns)
118{
119 unsigned long pfn = vmem_altmap_next_pfn(altmap);
120 unsigned long nr_align;
121
122 nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
123 nr_align = ALIGN(pfn, nr_align) - pfn;
124
125 if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
126 return ULONG_MAX;
127 altmap->alloc += nr_pfns;
128 altmap->align += nr_align;
129 return pfn + nr_align;
130}
131
132static void * __meminit altmap_alloc_block_buf(unsigned long size,
133 struct vmem_altmap *altmap) 117 struct vmem_altmap *altmap)
134{ 118{
135 unsigned long pfn, nr_pfns; 119 unsigned long pfn, nr_pfns, nr_align;
136 void *ptr;
137 120
138 if (size & ~PAGE_MASK) { 121 if (size & ~PAGE_MASK) {
139 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n", 122 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
@@ -141,25 +124,20 @@ static void * __meminit altmap_alloc_block_buf(unsigned long size,
141 return NULL; 124 return NULL;
142 } 125 }
143 126
127 pfn = vmem_altmap_next_pfn(altmap);
144 nr_pfns = size >> PAGE_SHIFT; 128 nr_pfns = size >> PAGE_SHIFT;
145 pfn = vmem_altmap_alloc(altmap, nr_pfns); 129 nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
146 if (pfn < ULONG_MAX) 130 nr_align = ALIGN(pfn, nr_align) - pfn;
147 ptr = __va(__pfn_to_phys(pfn)); 131 if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
148 else 132 return NULL;
149 ptr = NULL;
150 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
151 __func__, pfn, altmap->alloc, altmap->align, nr_pfns);
152 133
153 return ptr; 134 altmap->alloc += nr_pfns;
154} 135 altmap->align += nr_align;
136 pfn += nr_align;
155 137
156/* need to make sure size is all the same during early stage */ 138 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
157void * __meminit __vmemmap_alloc_block_buf(unsigned long size, int node, 139 __func__, pfn, altmap->alloc, altmap->align, nr_pfns);
158 struct vmem_altmap *altmap) 140 return __va(__pfn_to_phys(pfn));
159{
160 if (altmap)
161 return altmap_alloc_block_buf(size, altmap);
162 return alloc_block_buf(size, node);
163} 141}
164 142
165void __meminit vmemmap_verify(pte_t *pte, int node, 143void __meminit vmemmap_verify(pte_t *pte, int node,
@@ -178,7 +156,7 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
178 pte_t *pte = pte_offset_kernel(pmd, addr); 156 pte_t *pte = pte_offset_kernel(pmd, addr);
179 if (pte_none(*pte)) { 157 if (pte_none(*pte)) {
180 pte_t entry; 158 pte_t entry;
181 void *p = alloc_block_buf(PAGE_SIZE, node); 159 void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node);
182 if (!p) 160 if (!p)
183 return NULL; 161 return NULL;
184 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); 162 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
@@ -278,7 +256,8 @@ int __meminit vmemmap_populate_basepages(unsigned long start,
278 return 0; 256 return 0;
279} 257}
280 258
281struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid) 259struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid,
260 struct vmem_altmap *altmap)
282{ 261{
283 unsigned long start; 262 unsigned long start;
284 unsigned long end; 263 unsigned long end;
@@ -288,7 +267,7 @@ struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid)
288 start = (unsigned long)map; 267 start = (unsigned long)map;
289 end = (unsigned long)(map + PAGES_PER_SECTION); 268 end = (unsigned long)(map + PAGES_PER_SECTION);
290 269
291 if (vmemmap_populate(start, end, nid)) 270 if (vmemmap_populate(start, end, nid, altmap))
292 return NULL; 271 return NULL;
293 272
294 return map; 273 return map;
@@ -318,7 +297,7 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
318 if (!present_section_nr(pnum)) 297 if (!present_section_nr(pnum))
319 continue; 298 continue;
320 299
321 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid); 300 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid, NULL);
322 if (map_map[pnum]) 301 if (map_map[pnum])
323 continue; 302 continue;
324 ms = __nr_to_section(pnum); 303 ms = __nr_to_section(pnum);