aboutsummaryrefslogtreecommitdiffstats
path: root/mm/sparse-vmemmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/sparse-vmemmap.c')
-rw-r--r--mm/sparse-vmemmap.c77
1 files changed, 75 insertions, 2 deletions
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index d9714bdcb4a3..aa33fd67fa41 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -22,6 +22,7 @@
22#include <linux/bootmem.h> 22#include <linux/bootmem.h>
23#include <linux/highmem.h> 23#include <linux/highmem.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/slab.h>
25#include <linux/spinlock.h> 26#include <linux/spinlock.h>
26#include <linux/vmalloc.h> 27#include <linux/vmalloc.h>
27#include <linux/sched.h> 28#include <linux/sched.h>
@@ -40,9 +41,11 @@ static void * __init_refok __earlyonly_bootmem_alloc(int node,
40 unsigned long align, 41 unsigned long align,
41 unsigned long goal) 42 unsigned long goal)
42{ 43{
43 return __alloc_bootmem_node(NODE_DATA(node), size, align, goal); 44 return __alloc_bootmem_node_high(NODE_DATA(node), size, align, goal);
44} 45}
45 46
47static void *vmemmap_buf;
48static void *vmemmap_buf_end;
46 49
47void * __meminit vmemmap_alloc_block(unsigned long size, int node) 50void * __meminit vmemmap_alloc_block(unsigned long size, int node)
48{ 51{
@@ -64,6 +67,24 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node)
64 __pa(MAX_DMA_ADDRESS)); 67 __pa(MAX_DMA_ADDRESS));
65} 68}
66 69
70/* need to make sure size is all the same during early stage */
71void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node)
72{
73 void *ptr;
74
75 if (!vmemmap_buf)
76 return vmemmap_alloc_block(size, node);
77
78 /* take the from buf */
79 ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size);
80 if (ptr + size > vmemmap_buf_end)
81 return vmemmap_alloc_block(size, node);
82
83 vmemmap_buf = ptr + size;
84
85 return ptr;
86}
87
67void __meminit vmemmap_verify(pte_t *pte, int node, 88void __meminit vmemmap_verify(pte_t *pte, int node,
68 unsigned long start, unsigned long end) 89 unsigned long start, unsigned long end)
69{ 90{
@@ -80,7 +101,7 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
80 pte_t *pte = pte_offset_kernel(pmd, addr); 101 pte_t *pte = pte_offset_kernel(pmd, addr);
81 if (pte_none(*pte)) { 102 if (pte_none(*pte)) {
82 pte_t entry; 103 pte_t entry;
83 void *p = vmemmap_alloc_block(PAGE_SIZE, node); 104 void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node);
84 if (!p) 105 if (!p)
85 return NULL; 106 return NULL;
86 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); 107 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
@@ -163,3 +184,55 @@ struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid)
163 184
164 return map; 185 return map;
165} 186}
187
188void __init sparse_mem_maps_populate_node(struct page **map_map,
189 unsigned long pnum_begin,
190 unsigned long pnum_end,
191 unsigned long map_count, int nodeid)
192{
193 unsigned long pnum;
194 unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
195 void *vmemmap_buf_start;
196
197 size = ALIGN(size, PMD_SIZE);
198 vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count,
199 PMD_SIZE, __pa(MAX_DMA_ADDRESS));
200
201 if (vmemmap_buf_start) {
202 vmemmap_buf = vmemmap_buf_start;
203 vmemmap_buf_end = vmemmap_buf_start + size * map_count;
204 }
205
206 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
207 struct mem_section *ms;
208
209 if (!present_section_nr(pnum))
210 continue;
211
212 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
213 if (map_map[pnum])
214 continue;
215 ms = __nr_to_section(pnum);
216 printk(KERN_ERR "%s: sparsemem memory map backing failed "
217 "some memory will not be available.\n", __func__);
218 ms->section_mem_map = 0;
219 }
220
221 if (vmemmap_buf_start) {
222 /* need to free left buf */
223#ifdef CONFIG_NO_BOOTMEM
224 free_early(__pa(vmemmap_buf_start), __pa(vmemmap_buf_end));
225 if (vmemmap_buf_start < vmemmap_buf) {
226 char name[15];
227
228 snprintf(name, sizeof(name), "MEMMAP %d", nodeid);
229 reserve_early_without_check(__pa(vmemmap_buf_start),
230 __pa(vmemmap_buf), name);
231 }
232#else
233 free_bootmem(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf);
234#endif
235 vmemmap_buf = NULL;
236 vmemmap_buf_end = NULL;
237 }
238}