aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorDave Hansen <haveblue@us.ibm.com>2007-01-11 02:15:30 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2007-01-11 21:18:20 -0500
commita2f3aa02576632cdb60bd3de1f4bf55e9ac65604 (patch)
tree2b9b73675de73866fbd219fab5bf2d804e6817b1 /mm/page_alloc.c
parent47a4d5be7c50b2e9b905abbe2b97dc87051c5a44 (diff)
[PATCH] Fix sparsemem on Cell
Fix an oops experienced on the Cell architecture when init-time functions, early_*(), are called at runtime. It alters the call paths to make sure that the callers explicitly say whether the call is being made on behalf of a hotplug even, or happening at boot-time. It has been compile tested on ppc64, ia64, s390, i386 and x86_64. Acked-by: Arnd Bergmann <arndb@de.ibm.com> Signed-off-by: Dave Hansen <haveblue@us.ibm.com> Cc: Yasunori Goto <y-goto@jp.fujitsu.com> Acked-by: Andy Whitcroft <apw@shadowen.org> Cc: Christoph Lameter <clameter@engr.sgi.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c25
1 files changed, 17 insertions, 8 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a49f96b7ea43..fc5b5442e942 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1956,17 +1956,24 @@ static inline unsigned long wait_table_bits(unsigned long size)
1956 * done. Non-atomic initialization, single-pass. 1956 * done. Non-atomic initialization, single-pass.
1957 */ 1957 */
1958void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 1958void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
1959 unsigned long start_pfn) 1959 unsigned long start_pfn, enum memmap_context context)
1960{ 1960{
1961 struct page *page; 1961 struct page *page;
1962 unsigned long end_pfn = start_pfn + size; 1962 unsigned long end_pfn = start_pfn + size;
1963 unsigned long pfn; 1963 unsigned long pfn;
1964 1964
1965 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 1965 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1966 if (!early_pfn_valid(pfn)) 1966 /*
1967 continue; 1967 * There can be holes in boot-time mem_map[]s
1968 if (!early_pfn_in_nid(pfn, nid)) 1968 * handed to this function. They do not
1969 continue; 1969 * exist on hotplugged memory.
1970 */
1971 if (context == MEMMAP_EARLY) {
1972 if (!early_pfn_valid(pfn))
1973 continue;
1974 if (!early_pfn_in_nid(pfn, nid))
1975 continue;
1976 }
1970 page = pfn_to_page(pfn); 1977 page = pfn_to_page(pfn);
1971 set_page_links(page, zone, nid, pfn); 1978 set_page_links(page, zone, nid, pfn);
1972 init_page_count(page); 1979 init_page_count(page);
@@ -1993,7 +2000,7 @@ void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
1993 2000
1994#ifndef __HAVE_ARCH_MEMMAP_INIT 2001#ifndef __HAVE_ARCH_MEMMAP_INIT
1995#define memmap_init(size, nid, zone, start_pfn) \ 2002#define memmap_init(size, nid, zone, start_pfn) \
1996 memmap_init_zone((size), (nid), (zone), (start_pfn)) 2003 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
1997#endif 2004#endif
1998 2005
1999static int __cpuinit zone_batchsize(struct zone *zone) 2006static int __cpuinit zone_batchsize(struct zone *zone)
@@ -2239,7 +2246,8 @@ static __meminit void zone_pcp_init(struct zone *zone)
2239 2246
2240__meminit int init_currently_empty_zone(struct zone *zone, 2247__meminit int init_currently_empty_zone(struct zone *zone,
2241 unsigned long zone_start_pfn, 2248 unsigned long zone_start_pfn,
2242 unsigned long size) 2249 unsigned long size,
2250 enum memmap_context context)
2243{ 2251{
2244 struct pglist_data *pgdat = zone->zone_pgdat; 2252 struct pglist_data *pgdat = zone->zone_pgdat;
2245 int ret; 2253 int ret;
@@ -2683,7 +2691,8 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
2683 if (!size) 2691 if (!size)
2684 continue; 2692 continue;
2685 2693
2686 ret = init_currently_empty_zone(zone, zone_start_pfn, size); 2694 ret = init_currently_empty_zone(zone, zone_start_pfn,
2695 size, MEMMAP_EARLY);
2687 BUG_ON(ret); 2696 BUG_ON(ret);
2688 zone_start_pfn += size; 2697 zone_start_pfn += size;
2689 } 2698 }