aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorLee Schermerhorn <Lee.Schermerhorn@hp.com>2007-10-16 04:25:39 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:42:59 -0400
commit37b07e4163f7306aa735a6e250e8d22293e5b8de (patch)
tree5c9c1935253a39aa840a9923bf1c86620cb6f733 /mm/page_alloc.c
parent0e1e7c7a739562a321fda07c7cd2a97a7114f8f8 (diff)
memoryless nodes: fixup uses of node_online_map in generic code
Here's a cut at fixing up uses of the online node map in generic code. mm/shmem.c:shmem_parse_mpol() Ensure nodelist is subset of nodes with memory. Use node_states[N_HIGH_MEMORY] as default for missing nodelist for interleave policy. mm/shmem.c:shmem_fill_super() initialize policy_nodes to node_states[N_HIGH_MEMORY] mm/page-writeback.c:highmem_dirtyable_memory() sum over nodes with memory mm/page_alloc.c:zlc_setup() allowednodes - use nodes with memory. mm/page_alloc.c:default_zonelist_order() average over nodes with memory. mm/page_alloc.c:find_next_best_node() skip nodes w/o memory. N_HIGH_MEMORY state mask may not be initialized at this time, unless we want to depend on early_calculate_totalpages() [see below]. Will ZONE_MOVABLE ever be configurable? mm/page_alloc.c:find_zone_movable_pfns_for_nodes() spread kernelcore over nodes with memory. This required calling early_calculate_totalpages() unconditionally, and populating N_HIGH_MEMORY node state therein from nodes in the early_node_map[]. If we can depend on this, we can eliminate the population of N_HIGH_MEMORY mask from __build_all_zonelists() and use the N_HIGH_MEMORY mask in find_next_best_node(). mm/mempolicy.c:mpol_check_policy() Ensure nodes specified for policy are subset of nodes with memory. [akpm@linux-foundation.org: fix warnings] Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com> Acked-by: Christoph Lameter <clameter@sgi.com> Cc: Shaohua Li <shaohua.li@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c69
1 files changed, 39 insertions, 30 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2f547f45de18..e69f19e841e5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1040,7 +1040,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1040 * 1040 *
1041 * If the zonelist cache is present in the passed in zonelist, then 1041 * If the zonelist cache is present in the passed in zonelist, then
1042 * returns a pointer to the allowed node mask (either the current 1042 * returns a pointer to the allowed node mask (either the current
1043 * tasks mems_allowed, or node_online_map.) 1043 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
1044 * 1044 *
1045 * If the zonelist cache is not available for this zonelist, does 1045 * If the zonelist cache is not available for this zonelist, does
1046 * nothing and returns NULL. 1046 * nothing and returns NULL.
@@ -1069,7 +1069,7 @@ static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1069 1069
1070 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ? 1070 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1071 &cpuset_current_mems_allowed : 1071 &cpuset_current_mems_allowed :
1072 &node_online_map; 1072 &node_states[N_HIGH_MEMORY];
1073 return allowednodes; 1073 return allowednodes;
1074} 1074}
1075 1075
@@ -1802,7 +1802,7 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
1802 return node; 1802 return node;
1803 } 1803 }
1804 1804
1805 for_each_online_node(n) { 1805 for_each_node_state(n, N_HIGH_MEMORY) {
1806 cpumask_t tmp; 1806 cpumask_t tmp;
1807 1807
1808 /* Don't want a node to appear more than once */ 1808 /* Don't want a node to appear more than once */
@@ -1939,7 +1939,8 @@ static int default_zonelist_order(void)
1939 * If there is a node whose DMA/DMA32 memory is very big area on 1939 * If there is a node whose DMA/DMA32 memory is very big area on
1940 * local memory, NODE_ORDER may be suitable. 1940 * local memory, NODE_ORDER may be suitable.
1941 */ 1941 */
1942 average_size = total_size / (num_online_nodes() + 1); 1942 average_size = total_size /
1943 (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
1943 for_each_online_node(nid) { 1944 for_each_online_node(nid) {
1944 low_kmem_size = 0; 1945 low_kmem_size = 0;
1945 total_size = 0; 1946 total_size = 0;
@@ -2098,20 +2099,6 @@ static void build_zonelist_cache(pg_data_t *pgdat)
2098 2099
2099#endif /* CONFIG_NUMA */ 2100#endif /* CONFIG_NUMA */
2100 2101
2101/* Any regular memory on that node ? */
2102static void check_for_regular_memory(pg_data_t *pgdat)
2103{
2104#ifdef CONFIG_HIGHMEM
2105 enum zone_type zone_type;
2106
2107 for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
2108 struct zone *zone = &pgdat->node_zones[zone_type];
2109 if (zone->present_pages)
2110 node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
2111 }
2112#endif
2113}
2114
2115/* return values int ....just for stop_machine_run() */ 2102/* return values int ....just for stop_machine_run() */
2116static int __build_all_zonelists(void *dummy) 2103static int __build_all_zonelists(void *dummy)
2117{ 2104{
@@ -2122,11 +2109,6 @@ static int __build_all_zonelists(void *dummy)
2122 2109
2123 build_zonelists(pgdat); 2110 build_zonelists(pgdat);
2124 build_zonelist_cache(pgdat); 2111 build_zonelist_cache(pgdat);
2125
2126 /* Any memory on that node */
2127 if (pgdat->node_present_pages)
2128 node_set_state(nid, N_HIGH_MEMORY);
2129 check_for_regular_memory(pgdat);
2130 } 2112 }
2131 return 0; 2113 return 0;
2132} 2114}
@@ -3282,16 +3264,24 @@ unsigned long __init find_max_pfn_with_active_regions(void)
3282 return max_pfn; 3264 return max_pfn;
3283} 3265}
3284 3266
3267/*
3268 * early_calculate_totalpages()
3269 * Sum pages in active regions for movable zone.
3270 * Populate N_HIGH_MEMORY for calculating usable_nodes.
3271 */
3285unsigned long __init early_calculate_totalpages(void) 3272unsigned long __init early_calculate_totalpages(void)
3286{ 3273{
3287 int i; 3274 int i;
3288 unsigned long totalpages = 0; 3275 unsigned long totalpages = 0;
3289 3276
3290 for (i = 0; i < nr_nodemap_entries; i++) 3277 for (i = 0; i < nr_nodemap_entries; i++) {
3291 totalpages += early_node_map[i].end_pfn - 3278 unsigned long pages = early_node_map[i].end_pfn -
3292 early_node_map[i].start_pfn; 3279 early_node_map[i].start_pfn;
3293 3280 totalpages += pages;
3294 return totalpages; 3281 if (pages)
3282 node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
3283 }
3284 return totalpages;
3295} 3285}
3296 3286
3297/* 3287/*
@@ -3305,7 +3295,8 @@ void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
3305 int i, nid; 3295 int i, nid;
3306 unsigned long usable_startpfn; 3296 unsigned long usable_startpfn;
3307 unsigned long kernelcore_node, kernelcore_remaining; 3297 unsigned long kernelcore_node, kernelcore_remaining;
3308 int usable_nodes = num_online_nodes(); 3298 unsigned long totalpages = early_calculate_totalpages();
3299 int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
3309 3300
3310 /* 3301 /*
3311 * If movablecore was specified, calculate what size of 3302 * If movablecore was specified, calculate what size of
@@ -3316,7 +3307,6 @@ void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
3316 * what movablecore would have allowed. 3307 * what movablecore would have allowed.
3317 */ 3308 */
3318 if (required_movablecore) { 3309 if (required_movablecore) {
3319 unsigned long totalpages = early_calculate_totalpages();
3320 unsigned long corepages; 3310 unsigned long corepages;
3321 3311
3322 /* 3312 /*
@@ -3341,7 +3331,7 @@ void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
3341restart: 3331restart:
3342 /* Spread kernelcore memory as evenly as possible throughout nodes */ 3332 /* Spread kernelcore memory as evenly as possible throughout nodes */
3343 kernelcore_node = required_kernelcore / usable_nodes; 3333 kernelcore_node = required_kernelcore / usable_nodes;
3344 for_each_online_node(nid) { 3334 for_each_node_state(nid, N_HIGH_MEMORY) {
3345 /* 3335 /*
3346 * Recalculate kernelcore_node if the division per node 3336 * Recalculate kernelcore_node if the division per node
3347 * now exceeds what is necessary to satisfy the requested 3337 * now exceeds what is necessary to satisfy the requested
@@ -3433,6 +3423,20 @@ restart:
3433 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); 3423 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
3434} 3424}
3435 3425
3426/* Any regular memory on that node ? */
3427static void check_for_regular_memory(pg_data_t *pgdat)
3428{
3429#ifdef CONFIG_HIGHMEM
3430 enum zone_type zone_type;
3431
3432 for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
3433 struct zone *zone = &pgdat->node_zones[zone_type];
3434 if (zone->present_pages)
3435 node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
3436 }
3437#endif
3438}
3439
3436/** 3440/**
3437 * free_area_init_nodes - Initialise all pg_data_t and zone data 3441 * free_area_init_nodes - Initialise all pg_data_t and zone data
3438 * @max_zone_pfn: an array of max PFNs for each zone 3442 * @max_zone_pfn: an array of max PFNs for each zone
@@ -3507,6 +3511,11 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
3507 pg_data_t *pgdat = NODE_DATA(nid); 3511 pg_data_t *pgdat = NODE_DATA(nid);
3508 free_area_init_node(nid, pgdat, NULL, 3512 free_area_init_node(nid, pgdat, NULL,
3509 find_min_pfn_for_node(nid), NULL); 3513 find_min_pfn_for_node(nid), NULL);
3514
3515 /* Any memory on that node */
3516 if (pgdat->node_present_pages)
3517 node_set_state(nid, N_HIGH_MEMORY);
3518 check_for_regular_memory(pgdat);
3510 } 3519 }
3511} 3520}
3512 3521