aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-09-26 02:31:55 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 11:48:52 -0400
commit89fa30242facca249aead2aac03c4c69764f911c (patch)
tree1ac46b4777b819f2a4793d8e37330576ae5089ec /mm
parent4415cc8df630b05d3a54267d5f3e5c0b63a4ec05 (diff)
[PATCH] NUMA: Add zone_to_nid function
There are many places where we need to determine the node of a zone. Currently we use a difficult to read sequence of pointer dereferencing. Put that into an inline function and use throughout VM. Maybe we can find a way to optimize the lookup in the future. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/mempolicy.c6
-rw-r--r--mm/oom_kill.c3
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/vmscan.c2
5 files changed, 7 insertions, 8 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3aceadce1a76..7c7d03dbf73d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -72,7 +72,7 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma,
72 struct zone **z; 72 struct zone **z;
73 73
74 for (z = zonelist->zones; *z; z++) { 74 for (z = zonelist->zones; *z; z++) {
75 nid = (*z)->zone_pgdat->node_id; 75 nid = zone_to_nid(*z);
76 if (cpuset_zone_allowed(*z, GFP_HIGHUSER) && 76 if (cpuset_zone_allowed(*z, GFP_HIGHUSER) &&
77 !list_empty(&hugepage_freelists[nid])) 77 !list_empty(&hugepage_freelists[nid]))
78 break; 78 break;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 8002e1faccda..38f89650bc84 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -487,7 +487,7 @@ static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
487 switch (p->policy) { 487 switch (p->policy) {
488 case MPOL_BIND: 488 case MPOL_BIND:
489 for (i = 0; p->v.zonelist->zones[i]; i++) 489 for (i = 0; p->v.zonelist->zones[i]; i++)
490 node_set(p->v.zonelist->zones[i]->zone_pgdat->node_id, 490 node_set(zone_to_nid(p->v.zonelist->zones[i]),
491 *nodes); 491 *nodes);
492 break; 492 break;
493 case MPOL_DEFAULT: 493 case MPOL_DEFAULT:
@@ -1145,7 +1145,7 @@ unsigned slab_node(struct mempolicy *policy)
1145 * Follow bind policy behavior and start allocation at the 1145 * Follow bind policy behavior and start allocation at the
1146 * first node. 1146 * first node.
1147 */ 1147 */
1148 return policy->v.zonelist->zones[0]->zone_pgdat->node_id; 1148 return zone_to_nid(policy->v.zonelist->zones[0]);
1149 1149
1150 case MPOL_PREFERRED: 1150 case MPOL_PREFERRED:
1151 if (policy->v.preferred_node >= 0) 1151 if (policy->v.preferred_node >= 0)
@@ -1649,7 +1649,7 @@ void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
1649 1649
1650 nodes_clear(nodes); 1650 nodes_clear(nodes);
1651 for (z = pol->v.zonelist->zones; *z; z++) 1651 for (z = pol->v.zonelist->zones; *z; z++)
1652 node_set((*z)->zone_pgdat->node_id, nodes); 1652 node_set(zone_to_nid(*z), nodes);
1653 nodes_remap(tmp, nodes, *mpolmask, *newmask); 1653 nodes_remap(tmp, nodes, *mpolmask, *newmask);
1654 nodes = tmp; 1654 nodes = tmp;
1655 1655
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index f1c0ef1fd21f..bada3d03119f 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -177,8 +177,7 @@ static inline int constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask)
177 177
178 for (z = zonelist->zones; *z; z++) 178 for (z = zonelist->zones; *z; z++)
179 if (cpuset_zone_allowed(*z, gfp_mask)) 179 if (cpuset_zone_allowed(*z, gfp_mask))
180 node_clear((*z)->zone_pgdat->node_id, 180 node_clear(zone_to_nid(*z), nodes);
181 nodes);
182 else 181 else
183 return CONSTRAINT_CPUSET; 182 return CONSTRAINT_CPUSET;
184 183
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cf913bdd433e..51070b6d593f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1217,7 +1217,7 @@ unsigned int nr_free_pagecache_pages(void)
1217#ifdef CONFIG_NUMA 1217#ifdef CONFIG_NUMA
1218static void show_node(struct zone *zone) 1218static void show_node(struct zone *zone)
1219{ 1219{
1220 printk("Node %d ", zone->zone_pgdat->node_id); 1220 printk("Node %ld ", zone_to_nid(zone));
1221} 1221}
1222#else 1222#else
1223#define show_node(zone) do { } while (0) 1223#define show_node(zone) do { } while (0)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b950f193816e..87779dda4ec6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1661,7 +1661,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1661 * over remote processors and spread off node memory allocations 1661 * over remote processors and spread off node memory allocations
1662 * as wide as possible. 1662 * as wide as possible.
1663 */ 1663 */
1664 node_id = zone->zone_pgdat->node_id; 1664 node_id = zone_to_nid(zone);
1665 mask = node_to_cpumask(node_id); 1665 mask = node_to_cpumask(node_id);
1666 if (!cpus_empty(mask) && node_id != numa_node_id()) 1666 if (!cpus_empty(mask) && node_id != numa_node_id())
1667 return 0; 1667 return 0;