aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYasunori Goto <y-goto@jp.fujitsu.com>2006-06-23 05:03:10 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-23 10:42:46 -0400
commit718127cc3170454f4aa274fdd2f1e01574fecd66 (patch)
tree8b42a48248d6508b8369718deef6b1af3ea82dbf
parent86356ab147669bd3bcb2149fd9561d1280835c24 (diff)
[PATCH] wait_table and zonelist initializing for memory hotadd: add return code for init_current_empty_zone
When add_zone() is called against empty zone (not populated zone), we have to initialize the zone which didn't initialize at boot time. But, init_currently_empty_zone() may fail due to allocation of wait table. So, this patch is to catch its error code. Changes against wait_table is in the next patch. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/mmzone.h3
-rw-r--r--mm/memory_hotplug.c15
-rw-r--r--mm/page_alloc.c11
3 files changed, 24 insertions, 5 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 652673ea92f1..e82fc1a52cd0 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -333,6 +333,9 @@ void wakeup_kswapd(struct zone *zone, int order);
333int zone_watermark_ok(struct zone *z, int order, unsigned long mark, 333int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
334 int classzone_idx, int alloc_flags); 334 int classzone_idx, int alloc_flags);
335 335
336extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
337 unsigned long size);
338
336#ifdef CONFIG_HAVE_MEMORY_PRESENT 339#ifdef CONFIG_HAVE_MEMORY_PRESENT
337void memory_present(int nid, unsigned long start, unsigned long end); 340void memory_present(int nid, unsigned long start, unsigned long end);
338#else 341#else
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 70df5c0d957e..71da5c98c9c1 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -26,7 +26,7 @@
26 26
27extern void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn, 27extern void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn,
28 unsigned long size); 28 unsigned long size);
29static void __add_zone(struct zone *zone, unsigned long phys_start_pfn) 29static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
30{ 30{
31 struct pglist_data *pgdat = zone->zone_pgdat; 31 struct pglist_data *pgdat = zone->zone_pgdat;
32 int nr_pages = PAGES_PER_SECTION; 32 int nr_pages = PAGES_PER_SECTION;
@@ -34,8 +34,15 @@ static void __add_zone(struct zone *zone, unsigned long phys_start_pfn)
34 int zone_type; 34 int zone_type;
35 35
36 zone_type = zone - pgdat->node_zones; 36 zone_type = zone - pgdat->node_zones;
37 if (!populated_zone(zone)) {
38 int ret = 0;
39 ret = init_currently_empty_zone(zone, phys_start_pfn, nr_pages);
40 if (ret < 0)
41 return ret;
42 }
37 memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn); 43 memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn);
38 zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages); 44 zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages);
45 return 0;
39} 46}
40 47
41extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, 48extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
@@ -50,7 +57,11 @@ static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
50 if (ret < 0) 57 if (ret < 0)
51 return ret; 58 return ret;
52 59
53 __add_zone(zone, phys_start_pfn); 60 ret = __add_zone(zone, phys_start_pfn);
61
62 if (ret < 0)
63 return ret;
64
54 return register_new_memory(__pfn_to_section(phys_start_pfn)); 65 return register_new_memory(__pfn_to_section(phys_start_pfn));
55} 66}
56 67
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5ae75bead4df..4bc66f6b7718 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2050,8 +2050,9 @@ static __meminit void zone_pcp_init(struct zone *zone)
2050 zone->name, zone->present_pages, batch); 2050 zone->name, zone->present_pages, batch);
2051} 2051}
2052 2052
2053static __meminit void init_currently_empty_zone(struct zone *zone, 2053__meminit int init_currently_empty_zone(struct zone *zone,
2054 unsigned long zone_start_pfn, unsigned long size) 2054 unsigned long zone_start_pfn,
2055 unsigned long size)
2055{ 2056{
2056 struct pglist_data *pgdat = zone->zone_pgdat; 2057 struct pglist_data *pgdat = zone->zone_pgdat;
2057 2058
@@ -2063,6 +2064,8 @@ static __meminit void init_currently_empty_zone(struct zone *zone,
2063 memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn); 2064 memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
2064 2065
2065 zone_init_free_lists(pgdat, zone, zone->spanned_pages); 2066 zone_init_free_lists(pgdat, zone, zone->spanned_pages);
2067
2068 return 0;
2066} 2069}
2067 2070
2068/* 2071/*
@@ -2077,6 +2080,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
2077 unsigned long j; 2080 unsigned long j;
2078 int nid = pgdat->node_id; 2081 int nid = pgdat->node_id;
2079 unsigned long zone_start_pfn = pgdat->node_start_pfn; 2082 unsigned long zone_start_pfn = pgdat->node_start_pfn;
2083 int ret;
2080 2084
2081 pgdat_resize_init(pgdat); 2085 pgdat_resize_init(pgdat);
2082 pgdat->nr_zones = 0; 2086 pgdat->nr_zones = 0;
@@ -2118,7 +2122,8 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
2118 continue; 2122 continue;
2119 2123
2120 zonetable_add(zone, nid, j, zone_start_pfn, size); 2124 zonetable_add(zone, nid, j, zone_start_pfn, size);
2121 init_currently_empty_zone(zone, zone_start_pfn, size); 2125 ret = init_currently_empty_zone(zone, zone_start_pfn, size);
2126 BUG_ON(ret);
2122 zone_start_pfn += size; 2127 zone_start_pfn += size;
2123 } 2128 }
2124} 2129}