aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c49
1 files changed, 39 insertions, 10 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f32fae3121f0..eee5ba7509c1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3461,6 +3461,11 @@ void __paginginit free_area_init_node(int nid, struct pglist_data *pgdat,
3461 calculate_node_totalpages(pgdat, zones_size, zholes_size); 3461 calculate_node_totalpages(pgdat, zones_size, zholes_size);
3462 3462
3463 alloc_node_mem_map(pgdat); 3463 alloc_node_mem_map(pgdat);
3464#ifdef CONFIG_FLAT_NODE_MEM_MAP
3465 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
3466 nid, (unsigned long)pgdat,
3467 (unsigned long)pgdat->node_mem_map);
3468#endif
3464 3469
3465 free_area_init_core(pgdat, zones_size, zholes_size); 3470 free_area_init_core(pgdat, zones_size, zholes_size);
3466} 3471}
@@ -3549,25 +3554,49 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3549/** 3554/**
3550 * shrink_active_range - Shrink an existing registered range of PFNs 3555 * shrink_active_range - Shrink an existing registered range of PFNs
3551 * @nid: The node id the range is on that should be shrunk 3556 * @nid: The node id the range is on that should be shrunk
3552 * @old_end_pfn: The old end PFN of the range
3553 * @new_end_pfn: The new PFN of the range 3557 * @new_end_pfn: The new PFN of the range
3554 * 3558 *
3555 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node. 3559 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
3556 * The map is kept at the end physical page range that has already been 3560 * The map is kept near the end physical page range that has already been
3557 * registered with add_active_range(). This function allows an arch to shrink 3561 * registered. This function allows an arch to shrink an existing registered
3558 * an existing registered range. 3562 * range.
3559 */ 3563 */
3560void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn, 3564void __init shrink_active_range(unsigned int nid, unsigned long new_end_pfn)
3561 unsigned long new_end_pfn)
3562{ 3565{
3563 int i; 3566 int i, j;
3567 int removed = 0;
3564 3568
3565 /* Find the old active region end and shrink */ 3569 /* Find the old active region end and shrink */
3566 for_each_active_range_index_in_nid(i, nid) 3570 for_each_active_range_index_in_nid(i, nid) {
3567 if (early_node_map[i].end_pfn == old_end_pfn) { 3571 if (early_node_map[i].start_pfn >= new_end_pfn) {
3572 /* clear it */
3573 early_node_map[i].end_pfn = 0;
3574 removed = 1;
3575 continue;
3576 }
3577 if (early_node_map[i].end_pfn > new_end_pfn) {
3568 early_node_map[i].end_pfn = new_end_pfn; 3578 early_node_map[i].end_pfn = new_end_pfn;
3569 break; 3579 continue;
3570 } 3580 }
3581 }
3582
3583 if (!removed)
3584 return;
3585
3586 /* remove the blank ones */
3587 for (i = nr_nodemap_entries - 1; i > 0; i--) {
3588 if (early_node_map[i].nid != nid)
3589 continue;
3590 if (early_node_map[i].end_pfn)
3591 continue;
3592 /* we found it, get rid of it */
3593 for (j = i; j < nr_nodemap_entries - 1; j++)
3594 memcpy(&early_node_map[j], &early_node_map[j+1],
3595 sizeof(early_node_map[j]));
3596 j = nr_nodemap_entries - 1;
3597 memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
3598 nr_nodemap_entries--;
3599 }
3571} 3600}
3572 3601
3573/** 3602/**