aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBob Picco <bob.picco@hp.com>2005-10-29 17:23:05 -0400
committerTony Luck <tony.luck@intel.com>2005-11-08 12:56:15 -0500
commit97835245768a638002722a36ba9a3b76d0910f68 (patch)
tree14953474ff8aa28f61150518b9af2ef55968f6fc
parentf093182d313edde9b1f86dbdaf40ba4da2dbd0e7 (diff)
[IA64] fix memory less node allocation
The original memory less node allocation attempted to use NODEDATA_ALIGN for alignment. The bootmem allocator only allows a power of two alignments. This causes a BUG_ON for some nodes. For cpu only nodes just allocate with a PERCPU_PAGE_SIZE alignment. Some older firmware reports SLIT distances of 0xff and results in bestnode not being computed. This is now treated correctly. The failed allocation check was removed because it's redundant. The bootmem allocator already makes this check. This fix has been boot tested on 4 node machine which has 4 cpu only nodes and 1 memory node. Thanks to Pete Keilty for reporting this and helping me test it. Signed-off-by: Bob Picco <bob.picco@hp.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r--arch/ia64/mm/discontig.c19
1 files changed, 9 insertions, 10 deletions
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index a88cdb7232f8..0f776b032d31 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -350,14 +350,12 @@ static void __init initialize_pernode_data(void)
350 * for best. 350 * for best.
351 * @nid: node id 351 * @nid: node id
352 * @pernodesize: size of this node's pernode data 352 * @pernodesize: size of this node's pernode data
353 * @align: alignment to use for this node's pernode data
354 */ 353 */
355static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize, 354static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
356 unsigned long align)
357{ 355{
358 void *ptr = NULL; 356 void *ptr = NULL;
359 u8 best = 0xff; 357 u8 best = 0xff;
360 int bestnode = -1, node; 358 int bestnode = -1, node, anynode = 0;
361 359
362 for_each_online_node(node) { 360 for_each_online_node(node) {
363 if (node_isset(node, memory_less_mask)) 361 if (node_isset(node, memory_less_mask))
@@ -366,13 +364,15 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize,
366 best = node_distance(nid, node); 364 best = node_distance(nid, node);
367 bestnode = node; 365 bestnode = node;
368 } 366 }
367 anynode = node;
369 } 368 }
370 369
371 ptr = __alloc_bootmem_node(mem_data[bestnode].pgdat, 370 if (bestnode == -1)
372 pernodesize, align, __pa(MAX_DMA_ADDRESS)); 371 bestnode = anynode;
372
373 ptr = __alloc_bootmem_node(mem_data[bestnode].pgdat, pernodesize,
374 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
373 375
374 if (!ptr)
375 panic("NO memory for memory less node\n");
376 return ptr; 376 return ptr;
377} 377}
378 378
@@ -413,8 +413,7 @@ static void __init memory_less_nodes(void)
413 413
414 for_each_node_mask(node, memory_less_mask) { 414 for_each_node_mask(node, memory_less_mask) {
415 pernodesize = compute_pernodesize(node); 415 pernodesize = compute_pernodesize(node);
416 pernode = memory_less_node_alloc(node, pernodesize, 416 pernode = memory_less_node_alloc(node, pernodesize);
417 (node) ? (node * PERCPU_PAGE_SIZE) : (1024*1024));
418 fill_pernode(node, __pa(pernode), pernodesize); 417 fill_pernode(node, __pa(pernode), pernodesize);
419 } 418 }
420 419