aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-08 05:14:58 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-08 05:14:58 -0400
commit3de352bbd86f890dd0c5e1c09a6a1b0b29e0f8ce (patch)
treed4c5eba8cd2abefd7c9f16d089393f0f5999cf63 /mm
parent1b8ba39a3fad9c58532f6dad12c94d6e675be656 (diff)
parent9340e1ccdf7b9b22a2be7f51cd74e8b5e11961bf (diff)
Merge branch 'x86/mpparse' into x86/devel
Conflicts: arch/x86/Kconfig arch/x86/kernel/io_apic_32.c arch/x86/kernel/setup_64.c arch/x86/mm/init_32.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c80
1 files changed, 67 insertions, 13 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f32fae3121f0..41c6e3aa059f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2929,6 +2929,14 @@ void __init free_bootmem_with_active_regions(int nid,
2929 } 2929 }
2930} 2930}
2931 2931
2932void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
2933{
2934 int i;
2935
2936 for_each_active_range_index_in_nid(i, nid)
2937 work_fn(early_node_map[i].start_pfn, early_node_map[i].end_pfn,
2938 data);
2939}
2932/** 2940/**
2933 * sparse_memory_present_with_active_regions - Call memory_present for each active range 2941 * sparse_memory_present_with_active_regions - Call memory_present for each active range
2934 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. 2942 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
@@ -3461,6 +3469,11 @@ void __paginginit free_area_init_node(int nid, struct pglist_data *pgdat,
3461 calculate_node_totalpages(pgdat, zones_size, zholes_size); 3469 calculate_node_totalpages(pgdat, zones_size, zholes_size);
3462 3470
3463 alloc_node_mem_map(pgdat); 3471 alloc_node_mem_map(pgdat);
3472#ifdef CONFIG_FLAT_NODE_MEM_MAP
3473 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
3474 nid, (unsigned long)pgdat,
3475 (unsigned long)pgdat->node_mem_map);
3476#endif
3464 3477
3465 free_area_init_core(pgdat, zones_size, zholes_size); 3478 free_area_init_core(pgdat, zones_size, zholes_size);
3466} 3479}
@@ -3547,27 +3560,68 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3547} 3560}
3548 3561
3549/** 3562/**
3550 * shrink_active_range - Shrink an existing registered range of PFNs 3563 * remove_active_range - Shrink an existing registered range of PFNs
3551 * @nid: The node id the range is on that should be shrunk 3564 * @nid: The node id the range is on that should be shrunk
3552 * @old_end_pfn: The old end PFN of the range 3565 * @start_pfn: The new PFN of the range
3553 * @new_end_pfn: The new PFN of the range 3566 * @end_pfn: The new PFN of the range
3554 * 3567 *
3555 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node. 3568 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
3556 * The map is kept at the end physical page range that has already been 3569 * The map is kept near the end physical page range that has already been
3557 * registered with add_active_range(). This function allows an arch to shrink 3570 * registered. This function allows an arch to shrink an existing registered
3558 * an existing registered range. 3571 * range.
3559 */ 3572 */
3560void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn, 3573void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
3561 unsigned long new_end_pfn) 3574 unsigned long end_pfn)
3562{ 3575{
3563 int i; 3576 int i, j;
3577 int removed = 0;
3578
3579 printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
3580 nid, start_pfn, end_pfn);
3564 3581
3565 /* Find the old active region end and shrink */ 3582 /* Find the old active region end and shrink */
3566 for_each_active_range_index_in_nid(i, nid) 3583 for_each_active_range_index_in_nid(i, nid) {
3567 if (early_node_map[i].end_pfn == old_end_pfn) { 3584 if (early_node_map[i].start_pfn >= start_pfn &&
3568 early_node_map[i].end_pfn = new_end_pfn; 3585 early_node_map[i].end_pfn <= end_pfn) {
3569 break; 3586 /* clear it */
3587 early_node_map[i].start_pfn = 0;
3588 early_node_map[i].end_pfn = 0;
3589 removed = 1;
3590 continue;
3570 } 3591 }
3592 if (early_node_map[i].start_pfn < start_pfn &&
3593 early_node_map[i].end_pfn > start_pfn) {
3594 unsigned long temp_end_pfn = early_node_map[i].end_pfn;
3595 early_node_map[i].end_pfn = start_pfn;
3596 if (temp_end_pfn > end_pfn)
3597 add_active_range(nid, end_pfn, temp_end_pfn);
3598 continue;
3599 }
3600 if (early_node_map[i].start_pfn >= start_pfn &&
3601 early_node_map[i].end_pfn > end_pfn &&
3602 early_node_map[i].start_pfn < end_pfn) {
3603 early_node_map[i].start_pfn = end_pfn;
3604 continue;
3605 }
3606 }
3607
3608 if (!removed)
3609 return;
3610
3611 /* remove the blank ones */
3612 for (i = nr_nodemap_entries - 1; i > 0; i--) {
3613 if (early_node_map[i].nid != nid)
3614 continue;
3615 if (early_node_map[i].end_pfn)
3616 continue;
3617 /* we found it, get rid of it */
3618 for (j = i; j < nr_nodemap_entries - 1; j++)
3619 memcpy(&early_node_map[j], &early_node_map[j+1],
3620 sizeof(early_node_map[j]));
3621 j = nr_nodemap_entries - 1;
3622 memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
3623 nr_nodemap_entries--;
3624 }
3571} 3625}
3572 3626
3573/** 3627/**