aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-04-04 18:23:52 -0400
committerH. Peter Anvin <hpa@zytor.com>2011-04-06 20:57:11 -0400
commitaf7c1a6e8374e05aab4a98ce4d2fb07b66506a02 (patch)
tree2d3fd80705c8af2c8fac2fa6de5439d5f07795d1 /arch/x86/mm
parentc4d4f577d49c441ab4f1bb6068247dafb366e635 (diff)
x86-32, numa: Make @size in init_aloc_remap() represent bytes
@size variable in init_alloc_remap() is confusing in that it starts as number of bytes as its name implies and then becomes number of pages. Make it consistently represent bytes. Signed-off-by: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/1301955840-7246-7-git-send-email-tj@kernel.org Acked-by: Yinghai Lu <yinghai@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/numa_32.c18
1 files changed, 7 insertions, 11 deletions
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 30933fec8f75..99310d26fe34 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -286,22 +286,19 @@ static __init unsigned long init_alloc_remap(int nid, unsigned long offset)
286 size = node_remap_size[nid]; 286 size = node_remap_size[nid];
287 size += ALIGN(sizeof(pg_data_t), PAGE_SIZE); 287 size += ALIGN(sizeof(pg_data_t), PAGE_SIZE);
288 288
289 /* convert size to large (pmd size) pages, rounding up */ 289 /* align to large page */
290 size = (size + LARGE_PAGE_BYTES - 1) / LARGE_PAGE_BYTES; 290 size = ALIGN(size, LARGE_PAGE_BYTES);
291 /* now the roundup is correct, convert to PAGE_SIZE pages */
292 size = size * PTRS_PER_PTE;
293 291
294 node_pa = memblock_find_in_range(node_start_pfn[nid] << PAGE_SHIFT, 292 node_pa = memblock_find_in_range(node_start_pfn[nid] << PAGE_SHIFT,
295 (u64)node_end_pfn[nid] << PAGE_SHIFT, 293 (u64)node_end_pfn[nid] << PAGE_SHIFT,
296 (u64)size << PAGE_SHIFT, 294 size, LARGE_PAGE_BYTES);
297 LARGE_PAGE_BYTES);
298 if (node_pa == MEMBLOCK_ERROR) 295 if (node_pa == MEMBLOCK_ERROR)
299 panic("Can not get kva ram\n"); 296 panic("Can not get kva ram\n");
300 297
301 node_remap_size[nid] = size; 298 node_remap_size[nid] = size >> PAGE_SHIFT;
302 node_remap_offset[nid] = offset; 299 node_remap_offset[nid] = offset;
303 printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of node %d at %llx\n", 300 printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of node %d at %llx\n",
304 size, nid, node_pa >> PAGE_SHIFT); 301 size >> PAGE_SHIFT, nid, node_pa >> PAGE_SHIFT);
305 302
306 /* 303 /*
307 * prevent kva address below max_low_pfn want it on system 304 * prevent kva address below max_low_pfn want it on system
@@ -315,12 +312,11 @@ static __init unsigned long init_alloc_remap(int nid, unsigned long offset)
315 * So memblock_x86_reserve_range here, hope we don't run out 312 * So memblock_x86_reserve_range here, hope we don't run out
316 * of that array 313 * of that array
317 */ 314 */
318 memblock_x86_reserve_range(node_pa, node_pa + ((u64)size << PAGE_SHIFT), 315 memblock_x86_reserve_range(node_pa, node_pa + size, "KVA RAM");
319 "KVA RAM");
320 316
321 node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT; 317 node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT;
322 318
323 return size; 319 return size >> PAGE_SHIFT;
324} 320}
325 321
326static void init_remap_allocator(int nid) 322static void init_remap_allocator(int nid)