aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2008-04-28 05:12:14 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-28 11:58:18 -0400
commit18ea7e710d2452fa726814a406779188028cf1bf (patch)
treedfa439770b4344ade1ad8bd4fe70920ad66ee064
parent0e88460da6ab7bb6a7ef83675412ed5b6315d741 (diff)
mm: remember what the preferred zone is for zone_statistics
On NUMA, zone_statistics() is used to record events like numa hit, miss and foreign. It assumes that the first zone in a zonelist is the preferred zone. When multiple zonelists are replaced by one that is filtered, this is no longer the case. This patch records what the preferred zone is rather than assuming the first zone in the zonelist is it. This simplifies the reading of later patches in this set. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Christoph Lameter <clameter@sgi.com> Cc: Hugh Dickins <hugh@veritas.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/vmstat.h2
-rw-r--r--mm/page_alloc.c9
-rw-r--r--mm/vmstat.c6
3 files changed, 9 insertions, 8 deletions
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 9f1b4b46151e..e726b6d46495 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -174,7 +174,7 @@ static inline unsigned long node_page_state(int node,
174 zone_page_state(&zones[ZONE_MOVABLE], item); 174 zone_page_state(&zones[ZONE_MOVABLE], item);
175} 175}
176 176
177extern void zone_statistics(struct zonelist *, struct zone *); 177extern void zone_statistics(struct zone *, struct zone *);
178 178
179#else 179#else
180 180
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 63ff71830ea4..187efd47a446 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1050,7 +1050,7 @@ void split_page(struct page *page, unsigned int order)
1050 * we cheat by calling it from here, in the order > 0 path. Saves a branch 1050 * we cheat by calling it from here, in the order > 0 path. Saves a branch
1051 * or two. 1051 * or two.
1052 */ 1052 */
1053static struct page *buffered_rmqueue(struct zonelist *zonelist, 1053static struct page *buffered_rmqueue(struct zone *preferred_zone,
1054 struct zone *zone, int order, gfp_t gfp_flags) 1054 struct zone *zone, int order, gfp_t gfp_flags)
1055{ 1055{
1056 unsigned long flags; 1056 unsigned long flags;
@@ -1102,7 +1102,7 @@ again:
1102 } 1102 }
1103 1103
1104 __count_zone_vm_events(PGALLOC, zone, 1 << order); 1104 __count_zone_vm_events(PGALLOC, zone, 1 << order);
1105 zone_statistics(zonelist, zone); 1105 zone_statistics(preferred_zone, zone);
1106 local_irq_restore(flags); 1106 local_irq_restore(flags);
1107 put_cpu(); 1107 put_cpu();
1108 1108
@@ -1383,7 +1383,7 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
1383 struct zone **z; 1383 struct zone **z;
1384 struct page *page = NULL; 1384 struct page *page = NULL;
1385 int classzone_idx = zone_idx(zonelist->zones[0]); 1385 int classzone_idx = zone_idx(zonelist->zones[0]);
1386 struct zone *zone; 1386 struct zone *zone, *preferred_zone;
1387 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */ 1387 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1388 int zlc_active = 0; /* set if using zonelist_cache */ 1388 int zlc_active = 0; /* set if using zonelist_cache */
1389 int did_zlc_setup = 0; /* just call zlc_setup() one time */ 1389 int did_zlc_setup = 0; /* just call zlc_setup() one time */
@@ -1395,6 +1395,7 @@ zonelist_scan:
1395 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1395 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1396 */ 1396 */
1397 z = zonelist->zones; 1397 z = zonelist->zones;
1398 preferred_zone = *z;
1398 1399
1399 do { 1400 do {
1400 /* 1401 /*
@@ -1433,7 +1434,7 @@ zonelist_scan:
1433 } 1434 }
1434 } 1435 }
1435 1436
1436 page = buffered_rmqueue(zonelist, zone, order, gfp_mask); 1437 page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask);
1437 if (page) 1438 if (page)
1438 break; 1439 break;
1439this_zone_full: 1440this_zone_full:
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 7c7286e9506d..879bcc0a1d4c 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -364,13 +364,13 @@ void refresh_cpu_vm_stats(int cpu)
364 * 364 *
365 * Must be called with interrupts disabled. 365 * Must be called with interrupts disabled.
366 */ 366 */
367void zone_statistics(struct zonelist *zonelist, struct zone *z) 367void zone_statistics(struct zone *preferred_zone, struct zone *z)
368{ 368{
369 if (z->zone_pgdat == zonelist->zones[0]->zone_pgdat) { 369 if (z->zone_pgdat == preferred_zone->zone_pgdat) {
370 __inc_zone_state(z, NUMA_HIT); 370 __inc_zone_state(z, NUMA_HIT);
371 } else { 371 } else {
372 __inc_zone_state(z, NUMA_MISS); 372 __inc_zone_state(z, NUMA_MISS);
373 __inc_zone_state(zonelist->zones[0], NUMA_FOREIGN); 373 __inc_zone_state(preferred_zone, NUMA_FOREIGN);
374 } 374 }
375 if (z->node == numa_node_id()) 375 if (z->node == numa_node_id())
376 __inc_zone_state(z, NUMA_LOCAL); 376 __inc_zone_state(z, NUMA_LOCAL);