diff options
author | Andy Whitcroft <apw@shadowen.org> | 2006-12-06 23:33:03 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-07 11:39:23 -0500 |
commit | 25ba77c141dbcd2602dd0171824d0d72aa023a01 (patch) | |
tree | 153eb9bc567f63d739dcaf8a3caf11c8f48b8379 /mm | |
parent | bc4ba393c007248f76c05945abb7b7b892cdd1cc (diff) |
[PATCH] numa node ids are int, page_to_nid and zone_to_nid should return int
NUMA node ids are passed as either int or unsigned int almost exclusivly
page_to_nid and zone_to_nid both return unsigned long. This is a throw
back to when page_to_nid was a #define and was thus exposing the real type
of the page flags field.
In addition to fixing up the definitions of page_to_nid and zone_to_nid I
audited the users of these functions identifying the following incorrect
uses:
1) mm/page_alloc.c show_node() -- printk dumping the node id,
2) include/asm-ia64/pgalloc.h pgtable_quicklist_free() -- comparison
against numa_node_id() which returns an int from cpu_to_node(), and
3) mm/mpolicy.c check_pte_range -- used as an index in node_isset which
uses bit_set which in generic code takes an int.
Signed-off-by: Andy Whitcroft <apw@shadowen.org>
Cc: Christoph Lameter <clameter@engr.sgi.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/mempolicy.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 | ||||
-rw-r--r-- | mm/sparse.c | 2 |
3 files changed, 3 insertions, 3 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index fb907236bbd8..e7b69c90cfd6 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -221,7 +221,7 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
221 | orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); | 221 | orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
222 | do { | 222 | do { |
223 | struct page *page; | 223 | struct page *page; |
224 | unsigned int nid; | 224 | int nid; |
225 | 225 | ||
226 | if (!pte_present(*pte)) | 226 | if (!pte_present(*pte)) |
227 | continue; | 227 | continue; |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 86f2984f8b79..614d427854a8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1407,7 +1407,7 @@ unsigned int nr_free_pagecache_pages(void) | |||
1407 | static inline void show_node(struct zone *zone) | 1407 | static inline void show_node(struct zone *zone) |
1408 | { | 1408 | { |
1409 | if (NUMA_BUILD) | 1409 | if (NUMA_BUILD) |
1410 | printk("Node %ld ", zone_to_nid(zone)); | 1410 | printk("Node %d ", zone_to_nid(zone)); |
1411 | } | 1411 | } |
1412 | 1412 | ||
1413 | void si_meminfo(struct sysinfo *val) | 1413 | void si_meminfo(struct sysinfo *val) |
diff --git a/mm/sparse.c b/mm/sparse.c index 158d6a2a5263..ac26eb0d73cd 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -36,7 +36,7 @@ static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; | |||
36 | static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; | 36 | static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; |
37 | #endif | 37 | #endif |
38 | 38 | ||
39 | unsigned long page_to_nid(struct page *page) | 39 | int page_to_nid(struct page *page) |
40 | { | 40 | { |
41 | return section_to_node_table[page_to_section(page)]; | 41 | return section_to_node_table[page_to_section(page)]; |
42 | } | 42 | } |