aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/numa_32.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-04-04 18:23:51 -0400
committerH. Peter Anvin <hpa@zytor.com>2011-04-06 20:57:04 -0400
commitc4d4f577d49c441ab4f1bb6068247dafb366e635 (patch)
treeb56bbba0794635481530a7299e313d851cb69572 /arch/x86/mm/numa_32.c
parent5510db9c1be111528ce46c57f0bec1c9dce258f4 (diff)
x86-32, numa: Rename @node_kva to @node_pa in init_alloc_remap()
init_alloc_remap() is about to do more and using _kva suffix for physical address becomes confusing because the function will be handling both physical and virtual addresses. Rename @node_kva to @node_pa. This is trivial rename and doesn't cause any behavior difference. Signed-off-by: Tejun Heo <tj@kernel.org> Link: http://lkml.kernel.org/r/1301955840-7246-6-git-send-email-tj@kernel.org Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm/numa_32.c')
-rw-r--r--arch/x86/mm/numa_32.c19
1 files changed, 9 insertions, 10 deletions
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 5039e9b21d9e..30933fec8f75 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -267,7 +267,7 @@ void resume_map_numa_kva(pgd_t *pgd_base)
267static __init unsigned long init_alloc_remap(int nid, unsigned long offset) 267static __init unsigned long init_alloc_remap(int nid, unsigned long offset)
268{ 268{
269 unsigned long size; 269 unsigned long size;
270 u64 node_kva; 270 u64 node_pa;
271 271
272 /* 272 /*
273 * The acpi/srat node info can show hot-add memroy zones where 273 * The acpi/srat node info can show hot-add memroy zones where
@@ -291,17 +291,17 @@ static __init unsigned long init_alloc_remap(int nid, unsigned long offset)
291 /* now the roundup is correct, convert to PAGE_SIZE pages */ 291 /* now the roundup is correct, convert to PAGE_SIZE pages */
292 size = size * PTRS_PER_PTE; 292 size = size * PTRS_PER_PTE;
293 293
294 node_kva = memblock_find_in_range(node_start_pfn[nid] << PAGE_SHIFT, 294 node_pa = memblock_find_in_range(node_start_pfn[nid] << PAGE_SHIFT,
295 (u64)node_end_pfn[nid] << PAGE_SHIFT, 295 (u64)node_end_pfn[nid] << PAGE_SHIFT,
296 (u64)size << PAGE_SHIFT, 296 (u64)size << PAGE_SHIFT,
297 LARGE_PAGE_BYTES); 297 LARGE_PAGE_BYTES);
298 if (node_kva == MEMBLOCK_ERROR) 298 if (node_pa == MEMBLOCK_ERROR)
299 panic("Can not get kva ram\n"); 299 panic("Can not get kva ram\n");
300 300
301 node_remap_size[nid] = size; 301 node_remap_size[nid] = size;
302 node_remap_offset[nid] = offset; 302 node_remap_offset[nid] = offset;
303 printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of node %d at %llx\n", 303 printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of node %d at %llx\n",
304 size, nid, node_kva >> PAGE_SHIFT); 304 size, nid, node_pa >> PAGE_SHIFT);
305 305
306 /* 306 /*
307 * prevent kva address below max_low_pfn want it on system 307 * prevent kva address below max_low_pfn want it on system
@@ -315,11 +315,10 @@ static __init unsigned long init_alloc_remap(int nid, unsigned long offset)
315 * So memblock_x86_reserve_range here, hope we don't run out 315 * So memblock_x86_reserve_range here, hope we don't run out
316 * of that array 316 * of that array
317 */ 317 */
318 memblock_x86_reserve_range(node_kva, 318 memblock_x86_reserve_range(node_pa, node_pa + ((u64)size << PAGE_SHIFT),
319 node_kva + ((u64)size << PAGE_SHIFT),
320 "KVA RAM"); 319 "KVA RAM");
321 320
322 node_remap_start_pfn[nid] = node_kva >> PAGE_SHIFT; 321 node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT;
323 322
324 return size; 323 return size;
325} 324}