aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYinghai Lu <yhlu.kernel@gmail.com>2008-06-08 22:39:16 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-10 05:31:44 -0400
commitcc1a9d86ce989083703c4bdc11b75a87e1cc404a (patch)
treeee2b58df708b291f4a20311508cb26438647f82f
parentdb3660c1905293b91653e07f7857576df71ebf28 (diff)
mm, x86: shrink_active_range() should check all
Now we are using register_e820_active_regions() instead of add_active_range() directly. So end_pfn could be different between the value in early_node_map to node_end_pfn. So we need to make shrink_active_range() smarter. shrink_active_range() is a generic MM function in mm/page_alloc.c but it is only used on 32-bit x86. Should we move it back to some file in arch/x86? Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/mm/discontig_32.c2
-rw-r--r--include/linux/mm.h3
-rw-r--r--mm/page_alloc.c44
3 files changed, 36 insertions, 13 deletions
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c
index a89ccf3d4c14..489605bab85a 100644
--- a/arch/x86/mm/discontig_32.c
+++ b/arch/x86/mm/discontig_32.c
@@ -282,7 +282,7 @@ static unsigned long calculate_numa_remap_pages(void)
282 282
283 node_end_pfn[nid] -= size; 283 node_end_pfn[nid] -= size;
284 node_remap_start_pfn[nid] = node_end_pfn[nid]; 284 node_remap_start_pfn[nid] = node_end_pfn[nid];
285 shrink_active_range(nid, old_end_pfn, node_end_pfn[nid]); 285 shrink_active_range(nid, node_end_pfn[nid]);
286 } 286 }
287 printk("Reserving total of %ld pages for numa KVA remap\n", 287 printk("Reserving total of %ld pages for numa KVA remap\n",
288 reserve_pages); 288 reserve_pages);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c31a9cd2a30e..7cbd949f2516 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -997,8 +997,7 @@ extern void free_area_init_node(int nid, pg_data_t *pgdat,
997extern void free_area_init_nodes(unsigned long *max_zone_pfn); 997extern void free_area_init_nodes(unsigned long *max_zone_pfn);
998extern void add_active_range(unsigned int nid, unsigned long start_pfn, 998extern void add_active_range(unsigned int nid, unsigned long start_pfn,
999 unsigned long end_pfn); 999 unsigned long end_pfn);
1000extern void shrink_active_range(unsigned int nid, unsigned long old_end_pfn, 1000extern void shrink_active_range(unsigned int nid, unsigned long new_end_pfn);
1001 unsigned long new_end_pfn);
1002extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn, 1001extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn,
1003 unsigned long end_pfn); 1002 unsigned long end_pfn);
1004extern void remove_all_active_ranges(void); 1003extern void remove_all_active_ranges(void);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 502223c3c2c6..215408684076 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3579,25 +3579,49 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3579/** 3579/**
3580 * shrink_active_range - Shrink an existing registered range of PFNs 3580 * shrink_active_range - Shrink an existing registered range of PFNs
3581 * @nid: The node id the range is on that should be shrunk 3581 * @nid: The node id the range is on that should be shrunk
3582 * @old_end_pfn: The old end PFN of the range
3583 * @new_end_pfn: The new PFN of the range 3582 * @new_end_pfn: The new PFN of the range
3584 * 3583 *
3585 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node. 3584 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
3586 * The map is kept at the end physical page range that has already been 3585 * The map is kept near the end physical page range that has already been
3587 * registered with add_active_range(). This function allows an arch to shrink 3586 * registered. This function allows an arch to shrink an existing registered
3588 * an existing registered range. 3587 * range.
3589 */ 3588 */
3590void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn, 3589void __init shrink_active_range(unsigned int nid, unsigned long new_end_pfn)
3591 unsigned long new_end_pfn)
3592{ 3590{
3593 int i; 3591 int i, j;
3592 int removed = 0;
3594 3593
3595 /* Find the old active region end and shrink */ 3594 /* Find the old active region end and shrink */
3596 for_each_active_range_index_in_nid(i, nid) 3595 for_each_active_range_index_in_nid(i, nid) {
3597 if (early_node_map[i].end_pfn == old_end_pfn) { 3596 if (early_node_map[i].start_pfn >= new_end_pfn) {
3597 /* clear it */
3598 early_node_map[i].end_pfn = 0;
3599 removed = 1;
3600 continue;
3601 }
3602 if (early_node_map[i].end_pfn > new_end_pfn) {
3598 early_node_map[i].end_pfn = new_end_pfn; 3603 early_node_map[i].end_pfn = new_end_pfn;
3599 break; 3604 continue;
3600 } 3605 }
3606 }
3607
3608 if (!removed)
3609 return;
3610
3611 /* remove the blank ones */
3612 for (i = nr_nodemap_entries - 1; i > 0; i--) {
3613 if (early_node_map[i].nid != nid)
3614 continue;
3615 if (early_node_map[i].end_pfn)
3616 continue;
3617 /* we found it, get rid of it */
3618 for (j = i; j < nr_nodemap_entries - 1; j++)
3619 memcpy(&early_node_map[j], &early_node_map[j+1],
3620 sizeof(early_node_map[j]));
3621 j = nr_nodemap_entries - 1;
3622 memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
3623 nr_nodemap_entries--;
3624 }
3601} 3625}
3602 3626
3603/** 3627/**