diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 50 |
1 files changed, 25 insertions, 25 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b5468de49869..a8c003e7b3d5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2051,8 +2051,8 @@ int __init early_pfn_to_nid(unsigned long pfn) | |||
2051 | 2051 | ||
2052 | /** | 2052 | /** |
2053 | * free_bootmem_with_active_regions - Call free_bootmem_node for each active range | 2053 | * free_bootmem_with_active_regions - Call free_bootmem_node for each active range |
2054 | * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed | 2054 | * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. |
2055 | * @max_low_pfn: The highest PFN that till be passed to free_bootmem_node | 2055 | * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node |
2056 | * | 2056 | * |
2057 | * If an architecture guarantees that all ranges registered with | 2057 | * If an architecture guarantees that all ranges registered with |
2058 | * add_active_ranges() contain no holes and may be freed, this | 2058 | * add_active_ranges() contain no holes and may be freed, this |
@@ -2082,11 +2082,11 @@ void __init free_bootmem_with_active_regions(int nid, | |||
2082 | 2082 | ||
2083 | /** | 2083 | /** |
2084 | * sparse_memory_present_with_active_regions - Call memory_present for each active range | 2084 | * sparse_memory_present_with_active_regions - Call memory_present for each active range |
2085 | * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used | 2085 | * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. |
2086 | * | 2086 | * |
2087 | * If an architecture guarantees that all ranges registered with | 2087 | * If an architecture guarantees that all ranges registered with |
2088 | * add_active_ranges() contain no holes and may be freed, this | 2088 | * add_active_ranges() contain no holes and may be freed, this |
2089 | * this function may be used instead of calling memory_present() manually. | 2089 | * function may be used instead of calling memory_present() manually. |
2090 | */ | 2090 | */ |
2091 | void __init sparse_memory_present_with_active_regions(int nid) | 2091 | void __init sparse_memory_present_with_active_regions(int nid) |
2092 | { | 2092 | { |
@@ -2156,14 +2156,14 @@ static void __init account_node_boundary(unsigned int nid, | |||
2156 | 2156 | ||
2157 | /** | 2157 | /** |
2158 | * get_pfn_range_for_nid - Return the start and end page frames for a node | 2158 | * get_pfn_range_for_nid - Return the start and end page frames for a node |
2159 | * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned | 2159 | * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. |
2160 | * @start_pfn: Passed by reference. On return, it will have the node start_pfn | 2160 | * @start_pfn: Passed by reference. On return, it will have the node start_pfn. |
2161 | * @end_pfn: Passed by reference. On return, it will have the node end_pfn | 2161 | * @end_pfn: Passed by reference. On return, it will have the node end_pfn. |
2162 | * | 2162 | * |
2163 | * It returns the start and end page frame of a node based on information | 2163 | * It returns the start and end page frame of a node based on information |
2164 | * provided by an arch calling add_active_range(). If called for a node | 2164 | * provided by an arch calling add_active_range(). If called for a node |
2165 | * with no available memory, a warning is printed and the start and end | 2165 | * with no available memory, a warning is printed and the start and end |
2166 | * PFNs will be 0 | 2166 | * PFNs will be 0. |
2167 | */ | 2167 | */ |
2168 | void __init get_pfn_range_for_nid(unsigned int nid, | 2168 | void __init get_pfn_range_for_nid(unsigned int nid, |
2169 | unsigned long *start_pfn, unsigned long *end_pfn) | 2169 | unsigned long *start_pfn, unsigned long *end_pfn) |
@@ -2216,7 +2216,7 @@ unsigned long __init zone_spanned_pages_in_node(int nid, | |||
2216 | 2216 | ||
2217 | /* | 2217 | /* |
2218 | * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, | 2218 | * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, |
2219 | * then all holes in the requested range will be accounted for | 2219 | * then all holes in the requested range will be accounted for. |
2220 | */ | 2220 | */ |
2221 | unsigned long __init __absent_pages_in_range(int nid, | 2221 | unsigned long __init __absent_pages_in_range(int nid, |
2222 | unsigned long range_start_pfn, | 2222 | unsigned long range_start_pfn, |
@@ -2269,7 +2269,7 @@ unsigned long __init __absent_pages_in_range(int nid, | |||
2269 | * @start_pfn: The start PFN to start searching for holes | 2269 | * @start_pfn: The start PFN to start searching for holes |
2270 | * @end_pfn: The end PFN to stop searching for holes | 2270 | * @end_pfn: The end PFN to stop searching for holes |
2271 | * | 2271 | * |
2272 | * It returns the number of pages frames in memory holes within a range | 2272 | * It returns the number of pages frames in memory holes within a range. |
2273 | */ | 2273 | */ |
2274 | unsigned long __init absent_pages_in_range(unsigned long start_pfn, | 2274 | unsigned long __init absent_pages_in_range(unsigned long start_pfn, |
2275 | unsigned long end_pfn) | 2275 | unsigned long end_pfn) |
@@ -2583,11 +2583,12 @@ void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn, | |||
2583 | 2583 | ||
2584 | /** | 2584 | /** |
2585 | * remove_all_active_ranges - Remove all currently registered regions | 2585 | * remove_all_active_ranges - Remove all currently registered regions |
2586 | * | ||
2586 | * During discovery, it may be found that a table like SRAT is invalid | 2587 | * During discovery, it may be found that a table like SRAT is invalid |
2587 | * and an alternative discovery method must be used. This function removes | 2588 | * and an alternative discovery method must be used. This function removes |
2588 | * all currently registered regions. | 2589 | * all currently registered regions. |
2589 | */ | 2590 | */ |
2590 | void __init remove_all_active_ranges() | 2591 | void __init remove_all_active_ranges(void) |
2591 | { | 2592 | { |
2592 | memset(early_node_map, 0, sizeof(early_node_map)); | 2593 | memset(early_node_map, 0, sizeof(early_node_map)); |
2593 | nr_nodemap_entries = 0; | 2594 | nr_nodemap_entries = 0; |
@@ -2637,7 +2638,7 @@ unsigned long __init find_min_pfn_for_node(unsigned long nid) | |||
2637 | * find_min_pfn_with_active_regions - Find the minimum PFN registered | 2638 | * find_min_pfn_with_active_regions - Find the minimum PFN registered |
2638 | * | 2639 | * |
2639 | * It returns the minimum PFN based on information provided via | 2640 | * It returns the minimum PFN based on information provided via |
2640 | * add_active_range() | 2641 | * add_active_range(). |
2641 | */ | 2642 | */ |
2642 | unsigned long __init find_min_pfn_with_active_regions(void) | 2643 | unsigned long __init find_min_pfn_with_active_regions(void) |
2643 | { | 2644 | { |
@@ -2648,7 +2649,7 @@ unsigned long __init find_min_pfn_with_active_regions(void) | |||
2648 | * find_max_pfn_with_active_regions - Find the maximum PFN registered | 2649 | * find_max_pfn_with_active_regions - Find the maximum PFN registered |
2649 | * | 2650 | * |
2650 | * It returns the maximum PFN based on information provided via | 2651 | * It returns the maximum PFN based on information provided via |
2651 | * add_active_range() | 2652 | * add_active_range(). |
2652 | */ | 2653 | */ |
2653 | unsigned long __init find_max_pfn_with_active_regions(void) | 2654 | unsigned long __init find_max_pfn_with_active_regions(void) |
2654 | { | 2655 | { |
@@ -2663,10 +2664,7 @@ unsigned long __init find_max_pfn_with_active_regions(void) | |||
2663 | 2664 | ||
2664 | /** | 2665 | /** |
2665 | * free_area_init_nodes - Initialise all pg_data_t and zone data | 2666 | * free_area_init_nodes - Initialise all pg_data_t and zone data |
2666 | * @arch_max_dma_pfn: The maximum PFN usable for ZONE_DMA | 2667 | * @max_zone_pfn: an array of max PFNs for each zone |
2667 | * @arch_max_dma32_pfn: The maximum PFN usable for ZONE_DMA32 | ||
2668 | * @arch_max_low_pfn: The maximum PFN usable for ZONE_NORMAL | ||
2669 | * @arch_max_high_pfn: The maximum PFN usable for ZONE_HIGHMEM | ||
2670 | * | 2668 | * |
2671 | * This will call free_area_init_node() for each active node in the system. | 2669 | * This will call free_area_init_node() for each active node in the system. |
2672 | * Using the page ranges provided by add_active_range(), the size of each | 2670 | * Using the page ranges provided by add_active_range(), the size of each |
@@ -2724,14 +2722,15 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) | |||
2724 | #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ | 2722 | #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ |
2725 | 2723 | ||
2726 | /** | 2724 | /** |
2727 | * set_dma_reserve - Account the specified number of pages reserved in ZONE_DMA | 2725 | * set_dma_reserve - set the specified number of pages reserved in the first zone |
2728 | * @new_dma_reserve - The number of pages to mark reserved | 2726 | * @new_dma_reserve: The number of pages to mark reserved |
2729 | * | 2727 | * |
2730 | * The per-cpu batchsize and zone watermarks are determined by present_pages. | 2728 | * The per-cpu batchsize and zone watermarks are determined by present_pages. |
2731 | * In the DMA zone, a significant percentage may be consumed by kernel image | 2729 | * In the DMA zone, a significant percentage may be consumed by kernel image |
2732 | * and other unfreeable allocations which can skew the watermarks badly. This | 2730 | * and other unfreeable allocations which can skew the watermarks badly. This |
2733 | * function may optionally be used to account for unfreeable pages in | 2731 | * function may optionally be used to account for unfreeable pages in the |
2734 | * ZONE_DMA. The effect will be lower watermarks and smaller per-cpu batchsize | 2732 | * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and |
2733 | * smaller per-cpu batchsize. | ||
2735 | */ | 2734 | */ |
2736 | void __init set_dma_reserve(unsigned long new_dma_reserve) | 2735 | void __init set_dma_reserve(unsigned long new_dma_reserve) |
2737 | { | 2736 | { |
@@ -2844,10 +2843,11 @@ static void setup_per_zone_lowmem_reserve(void) | |||
2844 | calculate_totalreserve_pages(); | 2843 | calculate_totalreserve_pages(); |
2845 | } | 2844 | } |
2846 | 2845 | ||
2847 | /* | 2846 | /** |
2848 | * setup_per_zone_pages_min - called when min_free_kbytes changes. Ensures | 2847 | * setup_per_zone_pages_min - called when min_free_kbytes changes. |
2849 | * that the pages_{min,low,high} values for each zone are set correctly | 2848 | * |
2850 | * with respect to min_free_kbytes. | 2849 | * Ensures that the pages_{min,low,high} values for each zone are set correctly |
2850 | * with respect to min_free_kbytes. | ||
2851 | */ | 2851 | */ |
2852 | void setup_per_zone_pages_min(void) | 2852 | void setup_per_zone_pages_min(void) |
2853 | { | 2853 | { |