summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-12-29 02:53:59 -0500
committerDan Williams <dan.j.williams@intel.com>2018-01-08 14:46:23 -0500
commiteb8045335c70ef8b272d2888a225b81344423139 (patch)
treedfcf7d1f4453dfa4ffe31027390d64728052c475
parenta8fc357b2875da8732c91eb085862a0648d82767 (diff)
mm: merge vmem_altmap_alloc into altmap_alloc_block_buf
There is no clear separation between the two, so merge them. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Logan Gunthorpe <logang@deltatee.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--mm/sparse-vmemmap.c45
1 files changed, 16 insertions, 29 deletions
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index d012c9e2811b..bd0276d5f66b 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -107,33 +107,16 @@ static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
107} 107}
108 108
109/** 109/**
110 * vmem_altmap_alloc - allocate pages from the vmem_altmap reservation 110 * altmap_alloc_block_buf - allocate pages from the device page map
111 * @altmap - reserved page pool for the allocation 111 * @altmap: device page map
112 * @nr_pfns - size (in pages) of the allocation 112 * @size: size (in bytes) of the allocation
113 * 113 *
114 * Allocations are aligned to the size of the request 114 * Allocations are aligned to the size of the request.
115 */ 115 */
116static unsigned long __meminit vmem_altmap_alloc(struct vmem_altmap *altmap,
117 unsigned long nr_pfns)
118{
119 unsigned long pfn = vmem_altmap_next_pfn(altmap);
120 unsigned long nr_align;
121
122 nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
123 nr_align = ALIGN(pfn, nr_align) - pfn;
124
125 if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
126 return ULONG_MAX;
127 altmap->alloc += nr_pfns;
128 altmap->align += nr_align;
129 return pfn + nr_align;
130}
131
132void * __meminit altmap_alloc_block_buf(unsigned long size, 116void * __meminit altmap_alloc_block_buf(unsigned long size,
133 struct vmem_altmap *altmap) 117 struct vmem_altmap *altmap)
134{ 118{
135 unsigned long pfn, nr_pfns; 119 unsigned long pfn, nr_pfns, nr_align;
136 void *ptr;
137 120
138 if (size & ~PAGE_MASK) { 121 if (size & ~PAGE_MASK) {
139 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n", 122 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
@@ -141,16 +124,20 @@ void * __meminit altmap_alloc_block_buf(unsigned long size,
141 return NULL; 124 return NULL;
142 } 125 }
143 126
127 pfn = vmem_altmap_next_pfn(altmap);
144 nr_pfns = size >> PAGE_SHIFT; 128 nr_pfns = size >> PAGE_SHIFT;
145 pfn = vmem_altmap_alloc(altmap, nr_pfns); 129 nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
146 if (pfn < ULONG_MAX) 130 nr_align = ALIGN(pfn, nr_align) - pfn;
147 ptr = __va(__pfn_to_phys(pfn)); 131 if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
148 else 132 return NULL;
149 ptr = NULL; 133
134 altmap->alloc += nr_pfns;
135 altmap->align += nr_align;
136 pfn += nr_align;
137
150 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n", 138 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
151 __func__, pfn, altmap->alloc, altmap->align, nr_pfns); 139 __func__, pfn, altmap->alloc, altmap->align, nr_pfns);
152 140 return __va(__pfn_to_phys(pfn));
153 return ptr;
154} 141}
155 142
156void __meminit vmemmap_verify(pte_t *pte, int node, 143void __meminit vmemmap_verify(pte_t *pte, int node,