diff options
author | Yasunori Goto <y-goto@jp.fujitsu.com> | 2006-06-23 05:03:11 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-23 10:42:46 -0400 |
commit | 6811378e7d8b9aa4fca2a1ca73d24c9d67c9cb12 (patch) | |
tree | 37f2f5a2bf2e60848a571f8f43685c7406d7b238 /mm | |
parent | cca448fe92246fb59efe55ba2e048ded0971a9af (diff) |
[PATCH] wait_table and zonelist initializing for memory hotadd: update zonelists
In current code, zonelist is considered to be build once, no modification.
But MemoryHotplug can add new zone/pgdat. It must be updated.
This patch modifies build_all_zonelists(). By this, build_all_zonelist() can
reconfig pgdat's zonelists.
To update them safety, this patch use stop_machine_run(). Other cpus don't
touch among updating them by using it.
In old version (V2 of node hotadd), kernel updated them after zone
initialization. But present_page of its new zone is still 0, because
online_page() is not called yet at this time. Build_zonelists() checks
present_pages to find present zone. It was too early. So, I changed it after
online_pages().
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory_hotplug.c | 12 | ||||
-rw-r--r-- | mm/page_alloc.c | 26 |
2 files changed, 33 insertions, 5 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 71da5c98c9c1..1b1ac3db2187 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -127,6 +127,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) | |||
127 | unsigned long flags; | 127 | unsigned long flags; |
128 | unsigned long onlined_pages = 0; | 128 | unsigned long onlined_pages = 0; |
129 | struct zone *zone; | 129 | struct zone *zone; |
130 | int need_zonelists_rebuild = 0; | ||
130 | 131 | ||
131 | /* | 132 | /* |
132 | * This doesn't need a lock to do pfn_to_page(). | 133 | * This doesn't need a lock to do pfn_to_page(). |
@@ -139,6 +140,14 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) | |||
139 | grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages); | 140 | grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages); |
140 | pgdat_resize_unlock(zone->zone_pgdat, &flags); | 141 | pgdat_resize_unlock(zone->zone_pgdat, &flags); |
141 | 142 | ||
143 | /* | ||
144 | * If this zone is not populated, then it is not in zonelist. | ||
145 | * This means the page allocator ignores this zone. | ||
146 | * So, zonelist must be updated after online. | ||
147 | */ | ||
148 | if (!populated_zone(zone)) | ||
149 | need_zonelists_rebuild = 1; | ||
150 | |||
142 | for (i = 0; i < nr_pages; i++) { | 151 | for (i = 0; i < nr_pages; i++) { |
143 | struct page *page = pfn_to_page(pfn + i); | 152 | struct page *page = pfn_to_page(pfn + i); |
144 | online_page(page); | 153 | online_page(page); |
@@ -149,5 +158,8 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) | |||
149 | 158 | ||
150 | setup_per_zone_pages_min(); | 159 | setup_per_zone_pages_min(); |
151 | 160 | ||
161 | if (need_zonelists_rebuild) | ||
162 | build_all_zonelists(); | ||
163 | |||
152 | return 0; | 164 | return 0; |
153 | } | 165 | } |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 62564e27b448..9dfbe6b7d1c8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/nodemask.h> | 37 | #include <linux/nodemask.h> |
38 | #include <linux/vmalloc.h> | 38 | #include <linux/vmalloc.h> |
39 | #include <linux/mempolicy.h> | 39 | #include <linux/mempolicy.h> |
40 | #include <linux/stop_machine.h> | ||
40 | 41 | ||
41 | #include <asm/tlbflush.h> | 42 | #include <asm/tlbflush.h> |
42 | #include <asm/div64.h> | 43 | #include <asm/div64.h> |
@@ -1704,14 +1705,29 @@ static void __meminit build_zonelists(pg_data_t *pgdat) | |||
1704 | 1705 | ||
1705 | #endif /* CONFIG_NUMA */ | 1706 | #endif /* CONFIG_NUMA */ |
1706 | 1707 | ||
1707 | void __init build_all_zonelists(void) | 1708 | /* return values int ....just for stop_machine_run() */ |
1709 | static int __meminit __build_all_zonelists(void *dummy) | ||
1708 | { | 1710 | { |
1709 | int i; | 1711 | int nid; |
1712 | for_each_online_node(nid) | ||
1713 | build_zonelists(NODE_DATA(nid)); | ||
1714 | return 0; | ||
1715 | } | ||
1716 | |||
1717 | void __meminit build_all_zonelists(void) | ||
1718 | { | ||
1719 | if (system_state == SYSTEM_BOOTING) { | ||
1720 | __build_all_zonelists(0); | ||
1721 | cpuset_init_current_mems_allowed(); | ||
1722 | } else { | ||
1723 | /* we have to stop all cpus to guaranntee there is no user | ||
1724 | of zonelist */ | ||
1725 | stop_machine_run(__build_all_zonelists, NULL, NR_CPUS); | ||
1726 | /* cpuset refresh routine should be here */ | ||
1727 | } | ||
1710 | 1728 | ||
1711 | for_each_online_node(i) | ||
1712 | build_zonelists(NODE_DATA(i)); | ||
1713 | printk("Built %i zonelists\n", num_online_nodes()); | 1729 | printk("Built %i zonelists\n", num_online_nodes()); |
1714 | cpuset_init_current_mems_allowed(); | 1730 | |
1715 | } | 1731 | } |
1716 | 1732 | ||
1717 | /* | 1733 | /* |