aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/numa.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-05-02 08:18:53 -0400
committerTejun Heo <tj@kernel.org>2011-05-02 08:18:53 -0400
commit38f3e1ca24cc3ec416855e02676f91c898a8a262 (patch)
tree01d8f9c88223e7af1b08a465dfa94aaf752d57c7 /arch/x86/mm/numa.c
parent744baba0c4072b04664952a89292e4708eaf949a (diff)
x86, NUMA: Remove long 64bit assumption from numa.c
Code moved from numa_64.c has assumption that long is 64bit in several places. This patch removes the assumption by using {s|u}64_t explicity, using PFN_PHYS() for page number -> addr conversions and adjusting printf formats. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm/numa.c')
-rw-r--r--arch/x86/mm/numa.c45
1 files changed, 22 insertions, 23 deletions
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index c400f3b2b93e..b45caa39f7cf 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -192,13 +192,12 @@ int __init numa_add_memblk(int nid, u64 start, u64 end)
192#endif 192#endif
193 193
194/* Initialize bootmem allocator for a node */ 194/* Initialize bootmem allocator for a node */
195static void __init 195static void __init setup_node_bootmem(int nid, u64 start, u64 end)
196setup_node_bootmem(int nid, unsigned long start, unsigned long end)
197{ 196{
198 const u64 nd_low = (u64)MAX_DMA_PFN << PAGE_SHIFT; 197 const u64 nd_low = PFN_PHYS(MAX_DMA_PFN);
199 const u64 nd_high = (u64)max_pfn_mapped << PAGE_SHIFT; 198 const u64 nd_high = PFN_PHYS(max_pfn_mapped);
200 const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE); 199 const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
201 unsigned long nd_pa; 200 u64 nd_pa;
202 int tnid; 201 int tnid;
203 202
204 /* 203 /*
@@ -210,7 +209,7 @@ setup_node_bootmem(int nid, unsigned long start, unsigned long end)
210 209
211 start = roundup(start, ZONE_ALIGN); 210 start = roundup(start, ZONE_ALIGN);
212 211
213 printk(KERN_INFO "Initmem setup node %d %016lx-%016lx\n", 212 printk(KERN_INFO "Initmem setup node %d %016Lx-%016Lx\n",
214 nid, start, end); 213 nid, start, end);
215 214
216 /* 215 /*
@@ -223,13 +222,13 @@ setup_node_bootmem(int nid, unsigned long start, unsigned long end)
223 nd_pa = memblock_find_in_range(nd_low, nd_high, 222 nd_pa = memblock_find_in_range(nd_low, nd_high,
224 nd_size, SMP_CACHE_BYTES); 223 nd_size, SMP_CACHE_BYTES);
225 if (nd_pa == MEMBLOCK_ERROR) { 224 if (nd_pa == MEMBLOCK_ERROR) {
226 pr_err("Cannot find %lu bytes in node %d\n", nd_size, nid); 225 pr_err("Cannot find %zu bytes in node %d\n", nd_size, nid);
227 return; 226 return;
228 } 227 }
229 memblock_x86_reserve_range(nd_pa, nd_pa + nd_size, "NODE_DATA"); 228 memblock_x86_reserve_range(nd_pa, nd_pa + nd_size, "NODE_DATA");
230 229
231 /* report and initialize */ 230 /* report and initialize */
232 printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", 231 printk(KERN_INFO " NODE_DATA [%016Lx - %016Lx]\n",
233 nd_pa, nd_pa + nd_size - 1); 232 nd_pa, nd_pa + nd_size - 1);
234 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); 233 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
235 if (tnid != nid) 234 if (tnid != nid)
@@ -257,7 +256,7 @@ setup_node_bootmem(int nid, unsigned long start, unsigned long end)
257int __init numa_cleanup_meminfo(struct numa_meminfo *mi) 256int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
258{ 257{
259 const u64 low = 0; 258 const u64 low = 0;
260 const u64 high = (u64)max_pfn << PAGE_SHIFT; 259 const u64 high = PFN_PHYS(max_pfn);
261 int i, j, k; 260 int i, j, k;
262 261
263 for (i = 0; i < mi->nr_blks; i++) { 262 for (i = 0; i < mi->nr_blks; i++) {
@@ -275,7 +274,7 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
275 274
276 for (j = i + 1; j < mi->nr_blks; j++) { 275 for (j = i + 1; j < mi->nr_blks; j++) {
277 struct numa_memblk *bj = &mi->blk[j]; 276 struct numa_memblk *bj = &mi->blk[j];
278 unsigned long start, end; 277 u64 start, end;
279 278
280 /* 279 /*
281 * See whether there are overlapping blocks. Whine 280 * See whether there are overlapping blocks. Whine
@@ -313,7 +312,7 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
313 } 312 }
314 if (k < mi->nr_blks) 313 if (k < mi->nr_blks)
315 continue; 314 continue;
316 printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%lx,%lx)\n", 315 printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%Lx,%Lx)\n",
317 bi->nid, bi->start, bi->end, bj->start, bj->end, 316 bi->nid, bi->start, bi->end, bj->start, bj->end,
318 start, end); 317 start, end);
319 bi->start = start; 318 bi->start = start;
@@ -378,7 +377,7 @@ static int __init numa_alloc_distance(void)
378 cnt++; 377 cnt++;
379 size = cnt * cnt * sizeof(numa_distance[0]); 378 size = cnt * cnt * sizeof(numa_distance[0]);
380 379
381 phys = memblock_find_in_range(0, (u64)max_pfn_mapped << PAGE_SHIFT, 380 phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
382 size, PAGE_SIZE); 381 size, PAGE_SIZE);
383 if (phys == MEMBLOCK_ERROR) { 382 if (phys == MEMBLOCK_ERROR) {
384 pr_warning("NUMA: Warning: can't allocate distance table!\n"); 383 pr_warning("NUMA: Warning: can't allocate distance table!\n");
@@ -456,24 +455,24 @@ EXPORT_SYMBOL(__node_distance);
456 */ 455 */
457static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi) 456static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
458{ 457{
459 unsigned long numaram, e820ram; 458 u64 numaram, e820ram;
460 int i; 459 int i;
461 460
462 numaram = 0; 461 numaram = 0;
463 for (i = 0; i < mi->nr_blks; i++) { 462 for (i = 0; i < mi->nr_blks; i++) {
464 unsigned long s = mi->blk[i].start >> PAGE_SHIFT; 463 u64 s = mi->blk[i].start >> PAGE_SHIFT;
465 unsigned long e = mi->blk[i].end >> PAGE_SHIFT; 464 u64 e = mi->blk[i].end >> PAGE_SHIFT;
466 numaram += e - s; 465 numaram += e - s;
467 numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e); 466 numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
468 if ((long)numaram < 0) 467 if ((s64)numaram < 0)
469 numaram = 0; 468 numaram = 0;
470 } 469 }
471 470
472 e820ram = max_pfn - (memblock_x86_hole_size(0, 471 e820ram = max_pfn - (memblock_x86_hole_size(0,
473 max_pfn << PAGE_SHIFT) >> PAGE_SHIFT); 472 PFN_PHYS(max_pfn)) >> PAGE_SHIFT);
474 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */ 473 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
475 if ((long)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) { 474 if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
476 printk(KERN_ERR "NUMA: nodes only cover %luMB of your %luMB e820 RAM. Not used.\n", 475 printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
477 (numaram << PAGE_SHIFT) >> 20, 476 (numaram << PAGE_SHIFT) >> 20,
478 (e820ram << PAGE_SHIFT) >> 20); 477 (e820ram << PAGE_SHIFT) >> 20);
479 return false; 478 return false;
@@ -503,7 +502,7 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
503 502
504 /* Finally register nodes. */ 503 /* Finally register nodes. */
505 for_each_node_mask(nid, node_possible_map) { 504 for_each_node_mask(nid, node_possible_map) {
506 u64 start = (u64)max_pfn << PAGE_SHIFT; 505 u64 start = PFN_PHYS(max_pfn);
507 u64 end = 0; 506 u64 end = 0;
508 507
509 for (i = 0; i < mi->nr_blks; i++) { 508 for (i = 0; i < mi->nr_blks; i++) {
@@ -595,11 +594,11 @@ static int __init dummy_numa_init(void)
595{ 594{
596 printk(KERN_INFO "%s\n", 595 printk(KERN_INFO "%s\n",
597 numa_off ? "NUMA turned off" : "No NUMA configuration found"); 596 numa_off ? "NUMA turned off" : "No NUMA configuration found");
598 printk(KERN_INFO "Faking a node at %016lx-%016lx\n", 597 printk(KERN_INFO "Faking a node at %016Lx-%016Lx\n",
599 0LU, max_pfn << PAGE_SHIFT); 598 0LLU, PFN_PHYS(max_pfn));
600 599
601 node_set(0, numa_nodes_parsed); 600 node_set(0, numa_nodes_parsed);
602 numa_add_memblk(0, 0, (u64)max_pfn << PAGE_SHIFT); 601 numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
603 602
604 return 0; 603 return 0;
605} 604}