diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-10-16 04:25:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-16 12:42:58 -0400 |
commit | 37c0708dbee5825df3bd9ce6ef2199c6c1713970 (patch) | |
tree | 747551aa58484e7f872da118b864c8f3ca6e892d /mm | |
parent | 56bbd65df0e92a4a8eb70c5f2b416ae2b6c5fb31 (diff) |
Memoryless nodes: Add N_CPU node state
We need the check for a node with cpu in zone reclaim. Zone reclaim will not
allow remote zone reclaim if a node has a cpu.
[Lee.Schermerhorn@hp.com: Move setup of N_CPU node state mask]
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Tested-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Acked-by: Bob Picco <bob.picco@hp.com>
Cc: Nishanth Aravamudan <nacc@us.ibm.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@skynet.ie>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 5 | ||||
-rw-r--r-- | mm/vmscan.c | 4 |
2 files changed, 5 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 07dfd89992fa..161bcb711b30 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2353,6 +2353,9 @@ static struct per_cpu_pageset boot_pageset[NR_CPUS]; | |||
2353 | static int __cpuinit process_zones(int cpu) | 2353 | static int __cpuinit process_zones(int cpu) |
2354 | { | 2354 | { |
2355 | struct zone *zone, *dzone; | 2355 | struct zone *zone, *dzone; |
2356 | int node = cpu_to_node(cpu); | ||
2357 | |||
2358 | node_set_state(node, N_CPU); /* this node has a cpu */ | ||
2356 | 2359 | ||
2357 | for_each_zone(zone) { | 2360 | for_each_zone(zone) { |
2358 | 2361 | ||
@@ -2360,7 +2363,7 @@ static int __cpuinit process_zones(int cpu) | |||
2360 | continue; | 2363 | continue; |
2361 | 2364 | ||
2362 | zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset), | 2365 | zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset), |
2363 | GFP_KERNEL, cpu_to_node(cpu)); | 2366 | GFP_KERNEL, node); |
2364 | if (!zone_pcp(zone, cpu)) | 2367 | if (!zone_pcp(zone, cpu)) |
2365 | goto bad; | 2368 | goto bad; |
2366 | 2369 | ||
diff --git a/mm/vmscan.c b/mm/vmscan.c index 876568847b71..8fd8ba1c67b4 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1853,7 +1853,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
1853 | 1853 | ||
1854 | int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | 1854 | int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) |
1855 | { | 1855 | { |
1856 | cpumask_t mask; | ||
1857 | int node_id; | 1856 | int node_id; |
1858 | 1857 | ||
1859 | /* | 1858 | /* |
@@ -1890,8 +1889,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
1890 | * as wide as possible. | 1889 | * as wide as possible. |
1891 | */ | 1890 | */ |
1892 | node_id = zone_to_nid(zone); | 1891 | node_id = zone_to_nid(zone); |
1893 | mask = node_to_cpumask(node_id); | 1892 | if (node_state(node_id, N_CPU) && node_id != numa_node_id()) |
1894 | if (!cpus_empty(mask) && node_id != numa_node_id()) | ||
1895 | return 0; | 1893 | return 0; |
1896 | return __zone_reclaim(zone, gfp_mask, order); | 1894 | return __zone_reclaim(zone, gfp_mask, order); |
1897 | } | 1895 | } |