aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@cn.fujitsu.com>2012-12-12 16:51:46 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-12 20:38:33 -0500
commit4b0ef1fe8a626f0ba7f649764f979d0dc9eab86b (patch)
treee66e7d6523ff4243755c13dce9c1fe6952991bba /mm/page_alloc.c
parent48fb2e240c4275c6ba4f53c9397f5fd6f350c3a7 (diff)
page_alloc: use N_MEMORY instead N_HIGH_MEMORY change the node_states initialization
N_HIGH_MEMORY stands for the nodes that has normal or high memory. N_MEMORY stands for the nodes that has any memory. The code here need to handle with the nodes which have memory, we should use N_MEMORY instead. Since we introduced N_MEMORY, we update the initialization of node_states. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Lin Feng <linfeng@cn.fujitsu.com> Signed-off-by: Wen Congyang <wency@cn.fujitsu.com> Cc: Christoph Lameter <cl@linux.com> Cc: Hillf Danton <dhillf@gmail.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c40
1 files changed, 22 insertions, 18 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4171cd4f8257..35727168896b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1695,7 +1695,7 @@ bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
1695 * 1695 *
1696 * If the zonelist cache is present in the passed in zonelist, then 1696 * If the zonelist cache is present in the passed in zonelist, then
1697 * returns a pointer to the allowed node mask (either the current 1697 * returns a pointer to the allowed node mask (either the current
1698 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].) 1698 * tasks mems_allowed, or node_states[N_MEMORY].)
1699 * 1699 *
1700 * If the zonelist cache is not available for this zonelist, does 1700 * If the zonelist cache is not available for this zonelist, does
1701 * nothing and returns NULL. 1701 * nothing and returns NULL.
@@ -1724,7 +1724,7 @@ static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1724 1724
1725 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ? 1725 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1726 &cpuset_current_mems_allowed : 1726 &cpuset_current_mems_allowed :
1727 &node_states[N_HIGH_MEMORY]; 1727 &node_states[N_MEMORY];
1728 return allowednodes; 1728 return allowednodes;
1729} 1729}
1730 1730
@@ -3238,7 +3238,7 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
3238 return node; 3238 return node;
3239 } 3239 }
3240 3240
3241 for_each_node_state(n, N_HIGH_MEMORY) { 3241 for_each_node_state(n, N_MEMORY) {
3242 3242
3243 /* Don't want a node to appear more than once */ 3243 /* Don't want a node to appear more than once */
3244 if (node_isset(n, *used_node_mask)) 3244 if (node_isset(n, *used_node_mask))
@@ -3380,7 +3380,7 @@ static int default_zonelist_order(void)
3380 * local memory, NODE_ORDER may be suitable. 3380 * local memory, NODE_ORDER may be suitable.
3381 */ 3381 */
3382 average_size = total_size / 3382 average_size = total_size /
3383 (nodes_weight(node_states[N_HIGH_MEMORY]) + 1); 3383 (nodes_weight(node_states[N_MEMORY]) + 1);
3384 for_each_online_node(nid) { 3384 for_each_online_node(nid) {
3385 low_kmem_size = 0; 3385 low_kmem_size = 0;
3386 total_size = 0; 3386 total_size = 0;
@@ -4731,7 +4731,7 @@ unsigned long __init find_min_pfn_with_active_regions(void)
4731/* 4731/*
4732 * early_calculate_totalpages() 4732 * early_calculate_totalpages()
4733 * Sum pages in active regions for movable zone. 4733 * Sum pages in active regions for movable zone.
4734 * Populate N_HIGH_MEMORY for calculating usable_nodes. 4734 * Populate N_MEMORY for calculating usable_nodes.
4735 */ 4735 */
4736static unsigned long __init early_calculate_totalpages(void) 4736static unsigned long __init early_calculate_totalpages(void)
4737{ 4737{
@@ -4744,7 +4744,7 @@ static unsigned long __init early_calculate_totalpages(void)
4744 4744
4745 totalpages += pages; 4745 totalpages += pages;
4746 if (pages) 4746 if (pages)
4747 node_set_state(nid, N_HIGH_MEMORY); 4747 node_set_state(nid, N_MEMORY);
4748 } 4748 }
4749 return totalpages; 4749 return totalpages;
4750} 4750}
@@ -4761,9 +4761,9 @@ static void __init find_zone_movable_pfns_for_nodes(void)
4761 unsigned long usable_startpfn; 4761 unsigned long usable_startpfn;
4762 unsigned long kernelcore_node, kernelcore_remaining; 4762 unsigned long kernelcore_node, kernelcore_remaining;
4763 /* save the state before borrow the nodemask */ 4763 /* save the state before borrow the nodemask */
4764 nodemask_t saved_node_state = node_states[N_HIGH_MEMORY]; 4764 nodemask_t saved_node_state = node_states[N_MEMORY];
4765 unsigned long totalpages = early_calculate_totalpages(); 4765 unsigned long totalpages = early_calculate_totalpages();
4766 int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]); 4766 int usable_nodes = nodes_weight(node_states[N_MEMORY]);
4767 4767
4768 /* 4768 /*
4769 * If movablecore was specified, calculate what size of 4769 * If movablecore was specified, calculate what size of
@@ -4798,7 +4798,7 @@ static void __init find_zone_movable_pfns_for_nodes(void)
4798restart: 4798restart:
4799 /* Spread kernelcore memory as evenly as possible throughout nodes */ 4799 /* Spread kernelcore memory as evenly as possible throughout nodes */
4800 kernelcore_node = required_kernelcore / usable_nodes; 4800 kernelcore_node = required_kernelcore / usable_nodes;
4801 for_each_node_state(nid, N_HIGH_MEMORY) { 4801 for_each_node_state(nid, N_MEMORY) {
4802 unsigned long start_pfn, end_pfn; 4802 unsigned long start_pfn, end_pfn;
4803 4803
4804 /* 4804 /*
@@ -4890,23 +4890,27 @@ restart:
4890 4890
4891out: 4891out:
4892 /* restore the node_state */ 4892 /* restore the node_state */
4893 node_states[N_HIGH_MEMORY] = saved_node_state; 4893 node_states[N_MEMORY] = saved_node_state;
4894} 4894}
4895 4895
4896/* Any regular memory on that node ? */ 4896/* Any regular or high memory on that node ? */
4897static void __init check_for_regular_memory(pg_data_t *pgdat) 4897static void check_for_memory(pg_data_t *pgdat, int nid)
4898{ 4898{
4899#ifdef CONFIG_HIGHMEM
4900 enum zone_type zone_type; 4899 enum zone_type zone_type;
4901 4900
4902 for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) { 4901 if (N_MEMORY == N_NORMAL_MEMORY)
4902 return;
4903
4904 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
4903 struct zone *zone = &pgdat->node_zones[zone_type]; 4905 struct zone *zone = &pgdat->node_zones[zone_type];
4904 if (zone->present_pages) { 4906 if (zone->present_pages) {
4905 node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY); 4907 node_set_state(nid, N_HIGH_MEMORY);
4908 if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
4909 zone_type <= ZONE_NORMAL)
4910 node_set_state(nid, N_NORMAL_MEMORY);
4906 break; 4911 break;
4907 } 4912 }
4908 } 4913 }
4909#endif
4910} 4914}
4911 4915
4912/** 4916/**
@@ -4989,8 +4993,8 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4989 4993
4990 /* Any memory on that node */ 4994 /* Any memory on that node */
4991 if (pgdat->node_present_pages) 4995 if (pgdat->node_present_pages)
4992 node_set_state(nid, N_HIGH_MEMORY); 4996 node_set_state(nid, N_MEMORY);
4993 check_for_regular_memory(pgdat); 4997 check_for_memory(pgdat, nid);
4994 } 4998 }
4995} 4999}
4996 5000