diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-09-26 02:31:55 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-26 11:48:52 -0400 |
commit | 89fa30242facca249aead2aac03c4c69764f911c (patch) | |
tree | 1ac46b4777b819f2a4793d8e37330576ae5089ec | |
parent | 4415cc8df630b05d3a54267d5f3e5c0b63a4ec05 (diff) |
[PATCH] NUMA: Add zone_to_nid function
There are many places where we need to determine the node of a zone.
Currently we use a difficult to read sequence of pointer dereferencing.
Put that into an inline function and use throughout VM. Maybe we can find
a way to optimize the lookup in the future.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | arch/i386/mm/discontig.c | 2 | ||||
-rw-r--r-- | arch/parisc/mm/init.c | 2 | ||||
-rw-r--r-- | include/linux/mm.h | 7 | ||||
-rw-r--r-- | kernel/cpuset.c | 4 | ||||
-rw-r--r-- | mm/hugetlb.c | 2 | ||||
-rw-r--r-- | mm/mempolicy.c | 6 | ||||
-rw-r--r-- | mm/oom_kill.c | 3 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 | ||||
-rw-r--r-- | mm/vmscan.c | 2 |
9 files changed, 17 insertions, 13 deletions
diff --git a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c index 07c300f93764..fb5d8b747de4 100644 --- a/arch/i386/mm/discontig.c +++ b/arch/i386/mm/discontig.c | |||
@@ -422,7 +422,7 @@ void __init set_highmem_pages_init(int bad_ppro) | |||
422 | zone_end_pfn = zone_start_pfn + zone->spanned_pages; | 422 | zone_end_pfn = zone_start_pfn + zone->spanned_pages; |
423 | 423 | ||
424 | printk("Initializing %s for node %d (%08lx:%08lx)\n", | 424 | printk("Initializing %s for node %d (%08lx:%08lx)\n", |
425 | zone->name, zone->zone_pgdat->node_id, | 425 | zone->name, zone_to_nid(zone), |
426 | zone_start_pfn, zone_end_pfn); | 426 | zone_start_pfn, zone_end_pfn); |
427 | 427 | ||
428 | for (node_pfn = zone_start_pfn; node_pfn < zone_end_pfn; node_pfn++) { | 428 | for (node_pfn = zone_start_pfn; node_pfn < zone_end_pfn; node_pfn++) { |
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index c7329615ef94..25ad28d63e88 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c | |||
@@ -551,7 +551,7 @@ void show_mem(void) | |||
551 | 551 | ||
552 | printk("Zone list for zone %d on node %d: ", j, i); | 552 | printk("Zone list for zone %d on node %d: ", j, i); |
553 | for (k = 0; zl->zones[k] != NULL; k++) | 553 | for (k = 0; zl->zones[k] != NULL; k++) |
554 | printk("[%d/%s] ", zl->zones[k]->zone_pgdat->node_id, zl->zones[k]->name); | 554 | printk("[%d/%s] ", zone_to_nid(zl->zones[k]), zl->zones[k]->name); |
555 | printk("\n"); | 555 | printk("\n"); |
556 | } | 556 | } |
557 | } | 557 | } |
diff --git a/include/linux/mm.h b/include/linux/mm.h index f2018775b995..856f0ee7e84a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -499,12 +499,17 @@ static inline struct zone *page_zone(struct page *page) | |||
499 | return zone_table[page_zone_id(page)]; | 499 | return zone_table[page_zone_id(page)]; |
500 | } | 500 | } |
501 | 501 | ||
502 | static inline unsigned long zone_to_nid(struct zone *zone) | ||
503 | { | ||
504 | return zone->zone_pgdat->node_id; | ||
505 | } | ||
506 | |||
502 | static inline unsigned long page_to_nid(struct page *page) | 507 | static inline unsigned long page_to_nid(struct page *page) |
503 | { | 508 | { |
504 | if (FLAGS_HAS_NODE) | 509 | if (FLAGS_HAS_NODE) |
505 | return (page->flags >> NODES_PGSHIFT) & NODES_MASK; | 510 | return (page->flags >> NODES_PGSHIFT) & NODES_MASK; |
506 | else | 511 | else |
507 | return page_zone(page)->zone_pgdat->node_id; | 512 | return zone_to_nid(page_zone(page)); |
508 | } | 513 | } |
509 | static inline unsigned long page_to_section(struct page *page) | 514 | static inline unsigned long page_to_section(struct page *page) |
510 | { | 515 | { |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 76940361273e..cff41511269f 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -2245,7 +2245,7 @@ int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl) | |||
2245 | int i; | 2245 | int i; |
2246 | 2246 | ||
2247 | for (i = 0; zl->zones[i]; i++) { | 2247 | for (i = 0; zl->zones[i]; i++) { |
2248 | int nid = zl->zones[i]->zone_pgdat->node_id; | 2248 | int nid = zone_to_nid(zl->zones[i]); |
2249 | 2249 | ||
2250 | if (node_isset(nid, current->mems_allowed)) | 2250 | if (node_isset(nid, current->mems_allowed)) |
2251 | return 1; | 2251 | return 1; |
@@ -2318,7 +2318,7 @@ int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) | |||
2318 | 2318 | ||
2319 | if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) | 2319 | if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) |
2320 | return 1; | 2320 | return 1; |
2321 | node = z->zone_pgdat->node_id; | 2321 | node = zone_to_nid(z); |
2322 | might_sleep_if(!(gfp_mask & __GFP_HARDWALL)); | 2322 | might_sleep_if(!(gfp_mask & __GFP_HARDWALL)); |
2323 | if (node_isset(node, current->mems_allowed)) | 2323 | if (node_isset(node, current->mems_allowed)) |
2324 | return 1; | 2324 | return 1; |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 3aceadce1a76..7c7d03dbf73d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -72,7 +72,7 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma, | |||
72 | struct zone **z; | 72 | struct zone **z; |
73 | 73 | ||
74 | for (z = zonelist->zones; *z; z++) { | 74 | for (z = zonelist->zones; *z; z++) { |
75 | nid = (*z)->zone_pgdat->node_id; | 75 | nid = zone_to_nid(*z); |
76 | if (cpuset_zone_allowed(*z, GFP_HIGHUSER) && | 76 | if (cpuset_zone_allowed(*z, GFP_HIGHUSER) && |
77 | !list_empty(&hugepage_freelists[nid])) | 77 | !list_empty(&hugepage_freelists[nid])) |
78 | break; | 78 | break; |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 8002e1faccda..38f89650bc84 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -487,7 +487,7 @@ static void get_zonemask(struct mempolicy *p, nodemask_t *nodes) | |||
487 | switch (p->policy) { | 487 | switch (p->policy) { |
488 | case MPOL_BIND: | 488 | case MPOL_BIND: |
489 | for (i = 0; p->v.zonelist->zones[i]; i++) | 489 | for (i = 0; p->v.zonelist->zones[i]; i++) |
490 | node_set(p->v.zonelist->zones[i]->zone_pgdat->node_id, | 490 | node_set(zone_to_nid(p->v.zonelist->zones[i]), |
491 | *nodes); | 491 | *nodes); |
492 | break; | 492 | break; |
493 | case MPOL_DEFAULT: | 493 | case MPOL_DEFAULT: |
@@ -1145,7 +1145,7 @@ unsigned slab_node(struct mempolicy *policy) | |||
1145 | * Follow bind policy behavior and start allocation at the | 1145 | * Follow bind policy behavior and start allocation at the |
1146 | * first node. | 1146 | * first node. |
1147 | */ | 1147 | */ |
1148 | return policy->v.zonelist->zones[0]->zone_pgdat->node_id; | 1148 | return zone_to_nid(policy->v.zonelist->zones[0]); |
1149 | 1149 | ||
1150 | case MPOL_PREFERRED: | 1150 | case MPOL_PREFERRED: |
1151 | if (policy->v.preferred_node >= 0) | 1151 | if (policy->v.preferred_node >= 0) |
@@ -1649,7 +1649,7 @@ void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) | |||
1649 | 1649 | ||
1650 | nodes_clear(nodes); | 1650 | nodes_clear(nodes); |
1651 | for (z = pol->v.zonelist->zones; *z; z++) | 1651 | for (z = pol->v.zonelist->zones; *z; z++) |
1652 | node_set((*z)->zone_pgdat->node_id, nodes); | 1652 | node_set(zone_to_nid(*z), nodes); |
1653 | nodes_remap(tmp, nodes, *mpolmask, *newmask); | 1653 | nodes_remap(tmp, nodes, *mpolmask, *newmask); |
1654 | nodes = tmp; | 1654 | nodes = tmp; |
1655 | 1655 | ||
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index f1c0ef1fd21f..bada3d03119f 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -177,8 +177,7 @@ static inline int constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask) | |||
177 | 177 | ||
178 | for (z = zonelist->zones; *z; z++) | 178 | for (z = zonelist->zones; *z; z++) |
179 | if (cpuset_zone_allowed(*z, gfp_mask)) | 179 | if (cpuset_zone_allowed(*z, gfp_mask)) |
180 | node_clear((*z)->zone_pgdat->node_id, | 180 | node_clear(zone_to_nid(*z), nodes); |
181 | nodes); | ||
182 | else | 181 | else |
183 | return CONSTRAINT_CPUSET; | 182 | return CONSTRAINT_CPUSET; |
184 | 183 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cf913bdd433e..51070b6d593f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1217,7 +1217,7 @@ unsigned int nr_free_pagecache_pages(void) | |||
1217 | #ifdef CONFIG_NUMA | 1217 | #ifdef CONFIG_NUMA |
1218 | static void show_node(struct zone *zone) | 1218 | static void show_node(struct zone *zone) |
1219 | { | 1219 | { |
1220 | printk("Node %d ", zone->zone_pgdat->node_id); | 1220 | printk("Node %ld ", zone_to_nid(zone)); |
1221 | } | 1221 | } |
1222 | #else | 1222 | #else |
1223 | #define show_node(zone) do { } while (0) | 1223 | #define show_node(zone) do { } while (0) |
diff --git a/mm/vmscan.c b/mm/vmscan.c index b950f193816e..87779dda4ec6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1661,7 +1661,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
1661 | * over remote processors and spread off node memory allocations | 1661 | * over remote processors and spread off node memory allocations |
1662 | * as wide as possible. | 1662 | * as wide as possible. |
1663 | */ | 1663 | */ |
1664 | node_id = zone->zone_pgdat->node_id; | 1664 | node_id = zone_to_nid(zone); |
1665 | mask = node_to_cpumask(node_id); | 1665 | mask = node_to_cpumask(node_id); |
1666 | if (!cpus_empty(mask) && node_id != numa_node_id()) | 1666 | if (!cpus_empty(mask) && node_id != numa_node_id()) |
1667 | return 0; | 1667 | return 0; |