aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/memblock.c
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2010-08-25 16:39:16 -0400
committerH. Peter Anvin <hpa@zytor.com>2010-08-27 14:10:52 -0400
commit88ba088c18457caaf8d2e5f8d36becc731a3d4f6 (patch)
tree1b82b841333bf664b6f49a9d8bb15e44d4670ce4 /arch/x86/mm/memblock.c
parent4d5cf86ce187c0d3a4cdf233ab0cc6526ccbe01f (diff)
x86, memblock: Add memblock_x86_register_active_regions() and memblock_x86_hole_size()
memblock_x86_register_active_regions() will be used to fill early_node_map, the result will be memblock.memory.region AND numa data memblock_x86_hole_size will be used to find hole size on memblock.memory.region with specified range. Signed-off-by: Yinghai Lu <yinghai@kernel.org> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm/memblock.c')
-rw-r--r--arch/x86/mm/memblock.c66
1 files changed, 66 insertions, 0 deletions
diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c
index b4500604ab30..53a7a5aebd6b 100644
--- a/arch/x86/mm/memblock.c
+++ b/arch/x86/mm/memblock.c
@@ -232,3 +232,69 @@ void __init memblock_x86_free_range(u64 start, u64 end)
232 232
233 memblock_free(start, end - start); 233 memblock_free(start, end - start);
234} 234}
235
236/*
237 * Finds an active region in the address range from start_pfn to last_pfn and
238 * returns its range in ei_startpfn and ei_endpfn for the memblock entry.
239 */
240static int __init memblock_x86_find_active_region(const struct memblock_region *ei,
241 unsigned long start_pfn,
242 unsigned long last_pfn,
243 unsigned long *ei_startpfn,
244 unsigned long *ei_endpfn)
245{
246 u64 align = PAGE_SIZE;
247
248 *ei_startpfn = round_up(ei->base, align) >> PAGE_SHIFT;
249 *ei_endpfn = round_down(ei->base + ei->size, align) >> PAGE_SHIFT;
250
251 /* Skip map entries smaller than a page */
252 if (*ei_startpfn >= *ei_endpfn)
253 return 0;
254
255 /* Skip if map is outside the node */
256 if (*ei_endpfn <= start_pfn || *ei_startpfn >= last_pfn)
257 return 0;
258
259 /* Check for overlaps */
260 if (*ei_startpfn < start_pfn)
261 *ei_startpfn = start_pfn;
262 if (*ei_endpfn > last_pfn)
263 *ei_endpfn = last_pfn;
264
265 return 1;
266}
267
268/* Walk the memblock.memory map and register active regions within a node */
269void __init memblock_x86_register_active_regions(int nid, unsigned long start_pfn,
270 unsigned long last_pfn)
271{
272 unsigned long ei_startpfn;
273 unsigned long ei_endpfn;
274 struct memblock_region *r;
275
276 for_each_memblock(memory, r)
277 if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
278 &ei_startpfn, &ei_endpfn))
279 add_active_range(nid, ei_startpfn, ei_endpfn);
280}
281
282/*
283 * Find the hole size (in bytes) in the memory range.
284 * @start: starting address of the memory range to scan
285 * @end: ending address of the memory range to scan
286 */
287u64 __init memblock_x86_hole_size(u64 start, u64 end)
288{
289 unsigned long start_pfn = start >> PAGE_SHIFT;
290 unsigned long last_pfn = end >> PAGE_SHIFT;
291 unsigned long ei_startpfn, ei_endpfn, ram = 0;
292 struct memblock_region *r;
293
294 for_each_memblock(memory, r)
295 if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
296 &ei_startpfn, &ei_endpfn))
297 ram += ei_endpfn - ei_startpfn;
298
299 return end - start - ((u64)ram << PAGE_SHIFT);
300}