diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 29 |
1 files changed, 23 insertions, 6 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index eee5ba7509c1..d80e1868e570 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -3552,30 +3552,47 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn, | |||
3552 | } | 3552 | } |
3553 | 3553 | ||
3554 | /** | 3554 | /** |
3555 | * shrink_active_range - Shrink an existing registered range of PFNs | 3555 | * remove_active_range - Shrink an existing registered range of PFNs |
3556 | * @nid: The node id the range is on that should be shrunk | 3556 | * @nid: The node id the range is on that should be shrunk |
3557 | * @new_end_pfn: The new PFN of the range | 3557 | * @start_pfn: The new PFN of the range |
3558 | * @end_pfn: The new PFN of the range | ||
3558 | * | 3559 | * |
3559 | * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node. | 3560 | * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node. |
3560 | * The map is kept near the end physical page range that has already been | 3561 | * The map is kept near the end physical page range that has already been |
3561 | * registered. This function allows an arch to shrink an existing registered | 3562 | * registered. This function allows an arch to shrink an existing registered |
3562 | * range. | 3563 | * range. |
3563 | */ | 3564 | */ |
3564 | void __init shrink_active_range(unsigned int nid, unsigned long new_end_pfn) | 3565 | void __init remove_active_range(unsigned int nid, unsigned long start_pfn, |
3566 | unsigned long end_pfn) | ||
3565 | { | 3567 | { |
3566 | int i, j; | 3568 | int i, j; |
3567 | int removed = 0; | 3569 | int removed = 0; |
3568 | 3570 | ||
3571 | printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n", | ||
3572 | nid, start_pfn, end_pfn); | ||
3573 | |||
3569 | /* Find the old active region end and shrink */ | 3574 | /* Find the old active region end and shrink */ |
3570 | for_each_active_range_index_in_nid(i, nid) { | 3575 | for_each_active_range_index_in_nid(i, nid) { |
3571 | if (early_node_map[i].start_pfn >= new_end_pfn) { | 3576 | if (early_node_map[i].start_pfn >= start_pfn && |
3577 | early_node_map[i].end_pfn <= end_pfn) { | ||
3572 | /* clear it */ | 3578 | /* clear it */ |
3579 | early_node_map[i].start_pfn = 0; | ||
3573 | early_node_map[i].end_pfn = 0; | 3580 | early_node_map[i].end_pfn = 0; |
3574 | removed = 1; | 3581 | removed = 1; |
3575 | continue; | 3582 | continue; |
3576 | } | 3583 | } |
3577 | if (early_node_map[i].end_pfn > new_end_pfn) { | 3584 | if (early_node_map[i].start_pfn < start_pfn && |
3578 | early_node_map[i].end_pfn = new_end_pfn; | 3585 | early_node_map[i].end_pfn > start_pfn) { |
3586 | unsigned long temp_end_pfn = early_node_map[i].end_pfn; | ||
3587 | early_node_map[i].end_pfn = start_pfn; | ||
3588 | if (temp_end_pfn > end_pfn) | ||
3589 | add_active_range(nid, end_pfn, temp_end_pfn); | ||
3590 | continue; | ||
3591 | } | ||
3592 | if (early_node_map[i].start_pfn >= start_pfn && | ||
3593 | early_node_map[i].end_pfn > end_pfn && | ||
3594 | early_node_map[i].start_pfn < end_pfn) { | ||
3595 | early_node_map[i].start_pfn = end_pfn; | ||
3579 | continue; | 3596 | continue; |
3580 | } | 3597 | } |
3581 | } | 3598 | } |