diff options
author | Yinghai Lu <yhlu.kernel@gmail.com> | 2008-06-08 22:39:16 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-06-10 05:31:44 -0400 |
commit | cc1a9d86ce989083703c4bdc11b75a87e1cc404a (patch) | |
tree | ee2b58df708b291f4a20311508cb26438647f82f /mm/page_alloc.c | |
parent | db3660c1905293b91653e07f7857576df71ebf28 (diff) |
mm, x86: shrink_active_range() should check all
Now we are using register_e820_active_regions() instead of
add_active_range() directly. So end_pfn could be different between the
value in early_node_map to node_end_pfn.
So we need to make shrink_active_range() smarter.
shrink_active_range() is a generic MM function in mm/page_alloc.c but
it is only used on 32-bit x86. Should we move it back to some file in
arch/x86?
Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 44 |
1 files changed, 34 insertions, 10 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 502223c3c2c6..215408684076 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -3579,25 +3579,49 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn, | |||
3579 | /** | 3579 | /** |
3580 | * shrink_active_range - Shrink an existing registered range of PFNs | 3580 | * shrink_active_range - Shrink an existing registered range of PFNs |
3581 | * @nid: The node id the range is on that should be shrunk | 3581 | * @nid: The node id the range is on that should be shrunk |
3582 | * @old_end_pfn: The old end PFN of the range | ||
3583 | * @new_end_pfn: The new PFN of the range | 3582 | * @new_end_pfn: The new PFN of the range |
3584 | * | 3583 | * |
3585 | * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node. | 3584 | * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node. |
3586 | * The map is kept at the end physical page range that has already been | 3585 | * The map is kept near the end physical page range that has already been |
3587 | * registered with add_active_range(). This function allows an arch to shrink | 3586 | * registered. This function allows an arch to shrink an existing registered |
3588 | * an existing registered range. | 3587 | * range. |
3589 | */ | 3588 | */ |
3590 | void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn, | 3589 | void __init shrink_active_range(unsigned int nid, unsigned long new_end_pfn) |
3591 | unsigned long new_end_pfn) | ||
3592 | { | 3590 | { |
3593 | int i; | 3591 | int i, j; |
3592 | int removed = 0; | ||
3594 | 3593 | ||
3595 | /* Find the old active region end and shrink */ | 3594 | /* Find the old active region end and shrink */ |
3596 | for_each_active_range_index_in_nid(i, nid) | 3595 | for_each_active_range_index_in_nid(i, nid) { |
3597 | if (early_node_map[i].end_pfn == old_end_pfn) { | 3596 | if (early_node_map[i].start_pfn >= new_end_pfn) { |
3597 | /* clear it */ | ||
3598 | early_node_map[i].end_pfn = 0; | ||
3599 | removed = 1; | ||
3600 | continue; | ||
3601 | } | ||
3602 | if (early_node_map[i].end_pfn > new_end_pfn) { | ||
3598 | early_node_map[i].end_pfn = new_end_pfn; | 3603 | early_node_map[i].end_pfn = new_end_pfn; |
3599 | break; | 3604 | continue; |
3600 | } | 3605 | } |
3606 | } | ||
3607 | |||
3608 | if (!removed) | ||
3609 | return; | ||
3610 | |||
3611 | /* remove the blank ones */ | ||
3612 | for (i = nr_nodemap_entries - 1; i > 0; i--) { | ||
3613 | if (early_node_map[i].nid != nid) | ||
3614 | continue; | ||
3615 | if (early_node_map[i].end_pfn) | ||
3616 | continue; | ||
3617 | /* we found it, get rid of it */ | ||
3618 | for (j = i; j < nr_nodemap_entries - 1; j++) | ||
3619 | memcpy(&early_node_map[j], &early_node_map[j+1], | ||
3620 | sizeof(early_node_map[j])); | ||
3621 | j = nr_nodemap_entries - 1; | ||
3622 | memset(&early_node_map[j], 0, sizeof(early_node_map[j])); | ||
3623 | nr_nodemap_entries--; | ||
3624 | } | ||
3601 | } | 3625 | } |
3602 | 3626 | ||
3603 | /** | 3627 | /** |