aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2007-10-16 04:26:10 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:43:01 -0400
commit75884fb1c6388f3713ddcca662f3647b3129aaeb (patch)
tree7debdd89fd94d099de3d3763b47af00ef6359d9d /mm
parent48f13bf3e742fca8aab87f6c39451d03bf5952d4 (diff)
memory unplug: memory hotplug cleanup
A clean up patch for "scanning memory resource [start, end)" operation. Now, find_next_system_ram() function is used in memory hotplug, but this interface is not easy to use and codes are complicated. This patch adds walk_memory_resouce(start,len,arg,func) function. The function 'func' is called per valid memory resouce range in [start,pfn). [pbadari@us.ibm.com: Error handling in walk_memory_resource()] Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Badari Pulavarty <pbadari@us.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory_hotplug.c45
1 files changed, 19 insertions, 26 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 9c12ae5e369..1cbe9579e23 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -161,14 +161,27 @@ static void grow_pgdat_span(struct pglist_data *pgdat,
161 pgdat->node_start_pfn; 161 pgdat->node_start_pfn;
162} 162}
163 163
164int online_pages(unsigned long pfn, unsigned long nr_pages) 164static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
165 void *arg)
165{ 166{
166 unsigned long i; 167 unsigned long i;
168 unsigned long onlined_pages = *(unsigned long *)arg;
169 struct page *page;
170 if (PageReserved(pfn_to_page(start_pfn)))
171 for (i = 0; i < nr_pages; i++) {
172 page = pfn_to_page(start_pfn + i);
173 online_page(page);
174 onlined_pages++;
175 }
176 *(unsigned long *)arg = onlined_pages;
177 return 0;
178}
179
180
181int online_pages(unsigned long pfn, unsigned long nr_pages)
182{
167 unsigned long flags; 183 unsigned long flags;
168 unsigned long onlined_pages = 0; 184 unsigned long onlined_pages = 0;
169 struct resource res;
170 u64 section_end;
171 unsigned long start_pfn;
172 struct zone *zone; 185 struct zone *zone;
173 int need_zonelists_rebuild = 0; 186 int need_zonelists_rebuild = 0;
174 187
@@ -191,28 +204,8 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
191 if (!populated_zone(zone)) 204 if (!populated_zone(zone))
192 need_zonelists_rebuild = 1; 205 need_zonelists_rebuild = 1;
193 206
194 res.start = (u64)pfn << PAGE_SHIFT; 207 walk_memory_resource(pfn, nr_pages, &onlined_pages,
195 res.end = res.start + ((u64)nr_pages << PAGE_SHIFT) - 1; 208 online_pages_range);
196 res.flags = IORESOURCE_MEM; /* we just need system ram */
197 section_end = res.end;
198
199 while ((res.start < res.end) && (find_next_system_ram(&res) >= 0)) {
200 start_pfn = (unsigned long)(res.start >> PAGE_SHIFT);
201 nr_pages = (unsigned long)
202 ((res.end + 1 - res.start) >> PAGE_SHIFT);
203
204 if (PageReserved(pfn_to_page(start_pfn))) {
205 /* this region's page is not onlined now */
206 for (i = 0; i < nr_pages; i++) {
207 struct page *page = pfn_to_page(start_pfn + i);
208 online_page(page);
209 onlined_pages++;
210 }
211 }
212
213 res.start = res.end + 1;
214 res.end = section_end;
215 }
216 zone->present_pages += onlined_pages; 209 zone->present_pages += onlined_pages;
217 zone->zone_pgdat->node_present_pages += onlined_pages; 210 zone->zone_pgdat->node_present_pages += onlined_pages;
218 211