summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-06-26 08:27:13 -0400
committerJason Gunthorpe <jgg@mellanox.com>2019-07-02 13:32:44 -0400
commit514caf23a70fd697fa2ece238b2cd8dcc73fb16f (patch)
tree8e41e9dd323e5117baf1bdc044c2ce035c740483
parent80a72d0af05ae97a8b106c172e431072ba587492 (diff)
memremap: replace the altmap_valid field with a PGMAP_ALTMAP_VALID flag
Add a flags field to struct dev_pagemap to replace the altmap_valid boolean to be a little more extensible. Also add a pgmap_altmap() helper to find the optional altmap and clean up the code using the altmap using it. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Ira Weiny <ira.weiny@intel.com> Reviewed-by: Dan Williams <dan.j.williams@intel.com> Tested-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
-rw-r--r--arch/powerpc/mm/mem.c10
-rw-r--r--arch/x86/mm/init_64.c8
-rw-r--r--drivers/nvdimm/pfn_devs.c3
-rw-r--r--drivers/nvdimm/pmem.c1
-rw-r--r--include/linux/memremap.h12
-rw-r--r--kernel/memremap.c26
-rw-r--r--mm/hmm.c1
-rw-r--r--mm/memory_hotplug.c6
-rw-r--r--mm/page_alloc.c5
9 files changed, 29 insertions, 43 deletions
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 2540d3b2588c..a2923c5c1982 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -131,17 +131,9 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
131{ 131{
132 unsigned long start_pfn = start >> PAGE_SHIFT; 132 unsigned long start_pfn = start >> PAGE_SHIFT;
133 unsigned long nr_pages = size >> PAGE_SHIFT; 133 unsigned long nr_pages = size >> PAGE_SHIFT;
134 struct page *page; 134 struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
135 int ret; 135 int ret;
136 136
137 /*
138 * If we have an altmap then we need to skip over any reserved PFNs
139 * when querying the zone.
140 */
141 page = pfn_to_page(start_pfn);
142 if (altmap)
143 page += vmem_altmap_offset(altmap);
144
145 __remove_pages(page_zone(page), start_pfn, nr_pages, altmap); 137 __remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
146 138
147 /* Remove htab bolted mappings for this section of memory */ 139 /* Remove htab bolted mappings for this section of memory */
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 0f01c7b1d217..08bbf648827b 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1213,13 +1213,9 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
1213{ 1213{
1214 unsigned long start_pfn = start >> PAGE_SHIFT; 1214 unsigned long start_pfn = start >> PAGE_SHIFT;
1215 unsigned long nr_pages = size >> PAGE_SHIFT; 1215 unsigned long nr_pages = size >> PAGE_SHIFT;
1216 struct page *page = pfn_to_page(start_pfn); 1216 struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
1217 struct zone *zone; 1217 struct zone *zone = page_zone(page);
1218 1218
1219 /* With altmap the first mapped page is offset from @start */
1220 if (altmap)
1221 page += vmem_altmap_offset(altmap);
1222 zone = page_zone(page);
1223 __remove_pages(zone, start_pfn, nr_pages, altmap); 1219 __remove_pages(zone, start_pfn, nr_pages, altmap);
1224 kernel_physical_mapping_remove(start, start + size); 1220 kernel_physical_mapping_remove(start, start + size);
1225} 1221}
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 0f81fc56bbfd..55fb6b7433ed 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -622,7 +622,6 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
622 if (offset < reserve) 622 if (offset < reserve)
623 return -EINVAL; 623 return -EINVAL;
624 nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns); 624 nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
625 pgmap->altmap_valid = false;
626 } else if (nd_pfn->mode == PFN_MODE_PMEM) { 625 } else if (nd_pfn->mode == PFN_MODE_PMEM) {
627 nd_pfn->npfns = PFN_SECTION_ALIGN_UP((resource_size(res) 626 nd_pfn->npfns = PFN_SECTION_ALIGN_UP((resource_size(res)
628 - offset) / PAGE_SIZE); 627 - offset) / PAGE_SIZE);
@@ -634,7 +633,7 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
634 memcpy(altmap, &__altmap, sizeof(*altmap)); 633 memcpy(altmap, &__altmap, sizeof(*altmap));
635 altmap->free = PHYS_PFN(offset - reserve); 634 altmap->free = PHYS_PFN(offset - reserve);
636 altmap->alloc = 0; 635 altmap->alloc = 0;
637 pgmap->altmap_valid = true; 636 pgmap->flags |= PGMAP_ALTMAP_VALID;
638 } else 637 } else
639 return -ENXIO; 638 return -ENXIO;
640 639
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 093408ce40ad..e7d8cc9f41e8 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -412,7 +412,6 @@ static int pmem_attach_disk(struct device *dev,
412 bb_res.start += pmem->data_offset; 412 bb_res.start += pmem->data_offset;
413 } else if (pmem_should_map_pages(dev)) { 413 } else if (pmem_should_map_pages(dev)) {
414 memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res)); 414 memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res));
415 pmem->pgmap.altmap_valid = false;
416 pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; 415 pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
417 pmem->pgmap.ops = &fsdax_pagemap_ops; 416 pmem->pgmap.ops = &fsdax_pagemap_ops;
418 addr = devm_memremap_pages(dev, &pmem->pgmap); 417 addr = devm_memremap_pages(dev, &pmem->pgmap);
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 336eca601dad..e25685b878e9 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -88,6 +88,8 @@ struct dev_pagemap_ops {
88 vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf); 88 vm_fault_t (*migrate_to_ram)(struct vm_fault *vmf);
89}; 89};
90 90
91#define PGMAP_ALTMAP_VALID (1 << 0)
92
91/** 93/**
92 * struct dev_pagemap - metadata for ZONE_DEVICE mappings 94 * struct dev_pagemap - metadata for ZONE_DEVICE mappings
93 * @altmap: pre-allocated/reserved memory for vmemmap allocations 95 * @altmap: pre-allocated/reserved memory for vmemmap allocations
@@ -96,19 +98,27 @@ struct dev_pagemap_ops {
96 * @dev: host device of the mapping for debug 98 * @dev: host device of the mapping for debug
97 * @data: private data pointer for page_free() 99 * @data: private data pointer for page_free()
98 * @type: memory type: see MEMORY_* in memory_hotplug.h 100 * @type: memory type: see MEMORY_* in memory_hotplug.h
101 * @flags: PGMAP_* flags to specify defailed behavior
99 * @ops: method table 102 * @ops: method table
100 */ 103 */
101struct dev_pagemap { 104struct dev_pagemap {
102 struct vmem_altmap altmap; 105 struct vmem_altmap altmap;
103 bool altmap_valid;
104 struct resource res; 106 struct resource res;
105 struct percpu_ref *ref; 107 struct percpu_ref *ref;
106 struct device *dev; 108 struct device *dev;
107 enum memory_type type; 109 enum memory_type type;
110 unsigned int flags;
108 u64 pci_p2pdma_bus_offset; 111 u64 pci_p2pdma_bus_offset;
109 const struct dev_pagemap_ops *ops; 112 const struct dev_pagemap_ops *ops;
110}; 113};
111 114
115static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
116{
117 if (pgmap->flags & PGMAP_ALTMAP_VALID)
118 return &pgmap->altmap;
119 return NULL;
120}
121
112#ifdef CONFIG_ZONE_DEVICE 122#ifdef CONFIG_ZONE_DEVICE
113void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); 123void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
114void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap); 124void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap);
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 6c3dbb692037..eee490e7d7e1 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -54,14 +54,8 @@ static void pgmap_array_delete(struct resource *res)
54 54
55static unsigned long pfn_first(struct dev_pagemap *pgmap) 55static unsigned long pfn_first(struct dev_pagemap *pgmap)
56{ 56{
57 const struct resource *res = &pgmap->res; 57 return (pgmap->res.start >> PAGE_SHIFT) +
58 struct vmem_altmap *altmap = &pgmap->altmap; 58 vmem_altmap_offset(pgmap_altmap(pgmap));
59 unsigned long pfn;
60
61 pfn = res->start >> PAGE_SHIFT;
62 if (pgmap->altmap_valid)
63 pfn += vmem_altmap_offset(altmap);
64 return pfn;
65} 59}
66 60
67static unsigned long pfn_end(struct dev_pagemap *pgmap) 61static unsigned long pfn_end(struct dev_pagemap *pgmap)
@@ -109,7 +103,7 @@ static void devm_memremap_pages_release(void *data)
109 align_size >> PAGE_SHIFT, NULL); 103 align_size >> PAGE_SHIFT, NULL);
110 } else { 104 } else {
111 arch_remove_memory(nid, align_start, align_size, 105 arch_remove_memory(nid, align_start, align_size,
112 pgmap->altmap_valid ? &pgmap->altmap : NULL); 106 pgmap_altmap(pgmap));
113 kasan_remove_zero_shadow(__va(align_start), align_size); 107 kasan_remove_zero_shadow(__va(align_start), align_size);
114 } 108 }
115 mem_hotplug_done(); 109 mem_hotplug_done();
@@ -129,8 +123,8 @@ static void devm_memremap_pages_release(void *data)
129 * 1/ At a minimum the res, ref and type and ops members of @pgmap must be 123 * 1/ At a minimum the res, ref and type and ops members of @pgmap must be
130 * initialized by the caller before passing it to this function 124 * initialized by the caller before passing it to this function
131 * 125 *
132 * 2/ The altmap field may optionally be initialized, in which case altmap_valid 126 * 2/ The altmap field may optionally be initialized, in which case
133 * must be set to true 127 * PGMAP_ALTMAP_VALID must be set in pgmap->flags.
134 * 128 *
135 * 3/ pgmap->ref must be 'live' on entry and will be killed and reaped 129 * 3/ pgmap->ref must be 'live' on entry and will be killed and reaped
136 * at devm_memremap_pages_release() time, or if this routine fails. 130 * at devm_memremap_pages_release() time, or if this routine fails.
@@ -142,15 +136,13 @@ static void devm_memremap_pages_release(void *data)
142void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap) 136void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
143{ 137{
144 resource_size_t align_start, align_size, align_end; 138 resource_size_t align_start, align_size, align_end;
145 struct vmem_altmap *altmap = pgmap->altmap_valid ?
146 &pgmap->altmap : NULL;
147 struct resource *res = &pgmap->res; 139 struct resource *res = &pgmap->res;
148 struct dev_pagemap *conflict_pgmap; 140 struct dev_pagemap *conflict_pgmap;
149 struct mhp_restrictions restrictions = { 141 struct mhp_restrictions restrictions = {
150 /* 142 /*
151 * We do not want any optional features only our own memmap 143 * We do not want any optional features only our own memmap
152 */ 144 */
153 .altmap = altmap, 145 .altmap = pgmap_altmap(pgmap),
154 }; 146 };
155 pgprot_t pgprot = PAGE_KERNEL; 147 pgprot_t pgprot = PAGE_KERNEL;
156 int error, nid, is_ram; 148 int error, nid, is_ram;
@@ -274,7 +266,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
274 266
275 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; 267 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
276 move_pfn_range_to_zone(zone, align_start >> PAGE_SHIFT, 268 move_pfn_range_to_zone(zone, align_start >> PAGE_SHIFT,
277 align_size >> PAGE_SHIFT, altmap); 269 align_size >> PAGE_SHIFT, pgmap_altmap(pgmap));
278 } 270 }
279 271
280 mem_hotplug_done(); 272 mem_hotplug_done();
@@ -319,7 +311,9 @@ EXPORT_SYMBOL_GPL(devm_memunmap_pages);
319unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) 311unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
320{ 312{
321 /* number of pfns from base where pfn_to_page() is valid */ 313 /* number of pfns from base where pfn_to_page() is valid */
322 return altmap->reserve + altmap->free; 314 if (altmap)
315 return altmap->reserve + altmap->free;
316 return 0;
323} 317}
324 318
325void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns) 319void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
diff --git a/mm/hmm.c b/mm/hmm.c
index 36e25cdbdac1..e4470462298f 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -1442,7 +1442,6 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
1442 devmem->pagemap.type = MEMORY_DEVICE_PRIVATE; 1442 devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
1443 devmem->pagemap.res = *devmem->resource; 1443 devmem->pagemap.res = *devmem->resource;
1444 devmem->pagemap.ops = &hmm_pagemap_ops; 1444 devmem->pagemap.ops = &hmm_pagemap_ops;
1445 devmem->pagemap.altmap_valid = false;
1446 devmem->pagemap.ref = &devmem->ref; 1445 devmem->pagemap.ref = &devmem->ref;
1447 1446
1448 result = devm_memremap_pages(devmem->device, &devmem->pagemap); 1447 result = devm_memremap_pages(devmem->device, &devmem->pagemap);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index e096c987d261..6166ba5a15f3 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -557,10 +557,8 @@ void __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
557 int sections_to_remove; 557 int sections_to_remove;
558 558
559 /* In the ZONE_DEVICE case device driver owns the memory region */ 559 /* In the ZONE_DEVICE case device driver owns the memory region */
560 if (is_dev_zone(zone)) { 560 if (is_dev_zone(zone))
561 if (altmap) 561 map_offset = vmem_altmap_offset(altmap);
562 map_offset = vmem_altmap_offset(altmap);
563 }
564 562
565 clear_zone_contiguous(zone); 563 clear_zone_contiguous(zone);
566 564
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d66bc8abe0af..17a39d40a556 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5853,6 +5853,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
5853{ 5853{
5854 unsigned long pfn, end_pfn = start_pfn + size; 5854 unsigned long pfn, end_pfn = start_pfn + size;
5855 struct pglist_data *pgdat = zone->zone_pgdat; 5855 struct pglist_data *pgdat = zone->zone_pgdat;
5856 struct vmem_altmap *altmap = pgmap_altmap(pgmap);
5856 unsigned long zone_idx = zone_idx(zone); 5857 unsigned long zone_idx = zone_idx(zone);
5857 unsigned long start = jiffies; 5858 unsigned long start = jiffies;
5858 int nid = pgdat->node_id; 5859 int nid = pgdat->node_id;
@@ -5865,9 +5866,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
5865 * of the pages reserved for the memmap, so we can just jump to 5866 * of the pages reserved for the memmap, so we can just jump to
5866 * the end of that region and start processing the device pages. 5867 * the end of that region and start processing the device pages.
5867 */ 5868 */
5868 if (pgmap->altmap_valid) { 5869 if (altmap) {
5869 struct vmem_altmap *altmap = &pgmap->altmap;
5870
5871 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); 5870 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
5872 size = end_pfn - start_pfn; 5871 size = end_pfn - start_pfn;
5873 } 5872 }