aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2006-06-27 05:53:36 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-27 20:32:36 -0400
commit2842f11419704f8707fffc82e10d2263427fc130 (patch)
tree9bc86163ba7b2fa842b3aff2f087be6419f246bb
parent0a54703904a4a206686b4e8c3f5a6927b60747aa (diff)
[PATCH] catch valid mem range at onlining memory
This patch allows hot-add memory which is not aligned to section. Now, hot-added memory has to be aligned to section size. Considering big section sized archs, this is not useful. When hot-added memory is registerd as iomem resoruce by iomem resource patch, we can make use of that information to detect valid memory range. Note: With this, not-aligned memory can be registerd. To allow hot-add memory with holes, we have to do more work around add_memory(). (It doesn't allows add memory to already existing mem section.) Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/ioport.h3
-rw-r--r--kernel/resource.c38
-rw-r--r--mm/memory_hotplug.c28
3 files changed, 65 insertions, 4 deletions
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index cd6bd001ba4e..edfc733b1575 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -105,6 +105,9 @@ extern int allocate_resource(struct resource *root, struct resource *new,
105int adjust_resource(struct resource *res, unsigned long start, 105int adjust_resource(struct resource *res, unsigned long start,
106 unsigned long size); 106 unsigned long size);
107 107
108/* get registered SYSTEM_RAM resources in specified area */
109extern int find_next_system_ram(struct resource *res);
110
108/* Convenience shorthand with allocation */ 111/* Convenience shorthand with allocation */
109#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name)) 112#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name))
110#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name)) 113#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name))
diff --git a/kernel/resource.c b/kernel/resource.c
index e3080fcc66a3..2404f9b0bc47 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -232,6 +232,44 @@ int release_resource(struct resource *old)
232 232
233EXPORT_SYMBOL(release_resource); 233EXPORT_SYMBOL(release_resource);
234 234
235#ifdef CONFIG_MEMORY_HOTPLUG
236/*
237 * Finds the lowest memory reosurce exists within [res->start.res->end)
238 * the caller must specify res->start, res->end, res->flags.
239 * If found, returns 0, res is overwritten, if not found, returns -1.
240 */
241int find_next_system_ram(struct resource *res)
242{
243 resource_size_t start, end;
244 struct resource *p;
245
246 BUG_ON(!res);
247
248 start = res->start;
249 end = res->end;
250
251 read_lock(&resource_lock);
252 for (p = iomem_resource.child; p ; p = p->sibling) {
253 /* system ram is just marked as IORESOURCE_MEM */
254 if (p->flags != res->flags)
255 continue;
256 if (p->start > end) {
257 p = NULL;
258 break;
259 }
260 if (p->start >= start)
261 break;
262 }
263 read_unlock(&resource_lock);
264 if (!p)
265 return -1;
266 /* copy data */
267 res->start = p->start;
268 res->end = p->end;
269 return 0;
270}
271#endif
272
235/* 273/*
236 * Find empty slot in the resource tree given range and alignment. 274 * Find empty slot in the resource tree given range and alignment.
237 */ 275 */
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 0b11a8543441..f13783e81eb6 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -127,6 +127,9 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
127 unsigned long i; 127 unsigned long i;
128 unsigned long flags; 128 unsigned long flags;
129 unsigned long onlined_pages = 0; 129 unsigned long onlined_pages = 0;
130 struct resource res;
131 u64 section_end;
132 unsigned long start_pfn;
130 struct zone *zone; 133 struct zone *zone;
131 int need_zonelists_rebuild = 0; 134 int need_zonelists_rebuild = 0;
132 135
@@ -149,10 +152,27 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
149 if (!populated_zone(zone)) 152 if (!populated_zone(zone))
150 need_zonelists_rebuild = 1; 153 need_zonelists_rebuild = 1;
151 154
152 for (i = 0; i < nr_pages; i++) { 155 res.start = (u64)pfn << PAGE_SHIFT;
153 struct page *page = pfn_to_page(pfn + i); 156 res.end = res.start + ((u64)nr_pages << PAGE_SHIFT) - 1;
154 online_page(page); 157 res.flags = IORESOURCE_MEM; /* we just need system ram */
155 onlined_pages++; 158 section_end = res.end;
159
160 while (find_next_system_ram(&res) >= 0) {
161 start_pfn = (unsigned long)(res.start >> PAGE_SHIFT);
162 nr_pages = (unsigned long)
163 ((res.end + 1 - res.start) >> PAGE_SHIFT);
164
165 if (PageReserved(pfn_to_page(start_pfn))) {
166 /* this region's page is not onlined now */
167 for (i = 0; i < nr_pages; i++) {
168 struct page *page = pfn_to_page(start_pfn + i);
169 online_page(page);
170 onlined_pages++;
171 }
172 }
173
174 res.start = res.end + 1;
175 res.end = section_end;
156 } 176 }
157 zone->present_pages += onlined_pages; 177 zone->present_pages += onlined_pages;
158 zone->zone_pgdat->node_present_pages += onlined_pages; 178 zone->zone_pgdat->node_present_pages += onlined_pages;