aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2006-09-27 04:49:52 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-27 11:26:11 -0400
commit5cb248abf5ab65ab543b2d5fc16c738b28031fc0 (patch)
treee9af2f7f86000e36f11f1091cb675c1738d69ca3 /arch/x86_64/kernel
parent4cfee88ad30acc47f02b8b7ba3db8556262dce1e (diff)
[PATCH] Have x86_64 use add_active_range() and free_area_init_nodes
Size zones and holes in an architecture independent manner for x86_64. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: Dave Hansen <haveblue@us.ibm.com> Cc: Andy Whitcroft <apw@shadowen.org> Cc: Andi Kleen <ak@muc.de> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: "Keith Mannthey" <kmannth@gmail.com> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Yasunori Goto <y-goto@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/kernel')
-rw-r--r--arch/x86_64/kernel/e820.c125
-rw-r--r--arch/x86_64/kernel/setup.c7
2 files changed, 51 insertions, 81 deletions
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c
index c0af3828df45..b3f0908668ec 100644
--- a/arch/x86_64/kernel/e820.c
+++ b/arch/x86_64/kernel/e820.c
@@ -162,59 +162,14 @@ unsigned long __init find_e820_area(unsigned long start, unsigned long end, unsi
162 return -1UL; 162 return -1UL;
163} 163}
164 164
165/*
166 * Free bootmem based on the e820 table for a node.
167 */
168void __init e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end)
169{
170 int i;
171 for (i = 0; i < e820.nr_map; i++) {
172 struct e820entry *ei = &e820.map[i];
173 unsigned long last, addr;
174
175 if (ei->type != E820_RAM ||
176 ei->addr+ei->size <= start ||
177 ei->addr >= end)
178 continue;
179
180 addr = round_up(ei->addr, PAGE_SIZE);
181 if (addr < start)
182 addr = start;
183
184 last = round_down(ei->addr + ei->size, PAGE_SIZE);
185 if (last >= end)
186 last = end;
187
188 if (last > addr && last-addr >= PAGE_SIZE)
189 free_bootmem_node(pgdat, addr, last-addr);
190 }
191}
192
193/* 165/*
194 * Find the highest page frame number we have available 166 * Find the highest page frame number we have available
195 */ 167 */
196unsigned long __init e820_end_of_ram(void) 168unsigned long __init e820_end_of_ram(void)
197{ 169{
198 int i;
199 unsigned long end_pfn = 0; 170 unsigned long end_pfn = 0;
171 end_pfn = find_max_pfn_with_active_regions();
200 172
201 for (i = 0; i < e820.nr_map; i++) {
202 struct e820entry *ei = &e820.map[i];
203 unsigned long start, end;
204
205 start = round_up(ei->addr, PAGE_SIZE);
206 end = round_down(ei->addr + ei->size, PAGE_SIZE);
207 if (start >= end)
208 continue;
209 if (ei->type == E820_RAM) {
210 if (end > end_pfn<<PAGE_SHIFT)
211 end_pfn = end>>PAGE_SHIFT;
212 } else {
213 if (end > end_pfn_map<<PAGE_SHIFT)
214 end_pfn_map = end>>PAGE_SHIFT;
215 }
216 }
217
218 if (end_pfn > end_pfn_map) 173 if (end_pfn > end_pfn_map)
219 end_pfn_map = end_pfn; 174 end_pfn_map = end_pfn;
220 if (end_pfn_map > MAXMEM>>PAGE_SHIFT) 175 if (end_pfn_map > MAXMEM>>PAGE_SHIFT)
@@ -224,43 +179,10 @@ unsigned long __init e820_end_of_ram(void)
224 if (end_pfn > end_pfn_map) 179 if (end_pfn > end_pfn_map)
225 end_pfn = end_pfn_map; 180 end_pfn = end_pfn_map;
226 181
182 printk("end_pfn_map = %lu\n", end_pfn_map);
227 return end_pfn; 183 return end_pfn;
228} 184}
229 185
230/*
231 * Compute how much memory is missing in a range.
232 * Unlike the other functions in this file the arguments are in page numbers.
233 */
234unsigned long __init
235e820_hole_size(unsigned long start_pfn, unsigned long end_pfn)
236{
237 unsigned long ram = 0;
238 unsigned long start = start_pfn << PAGE_SHIFT;
239 unsigned long end = end_pfn << PAGE_SHIFT;
240 int i;
241 for (i = 0; i < e820.nr_map; i++) {
242 struct e820entry *ei = &e820.map[i];
243 unsigned long last, addr;
244
245 if (ei->type != E820_RAM ||
246 ei->addr+ei->size <= start ||
247 ei->addr >= end)
248 continue;
249
250 addr = round_up(ei->addr, PAGE_SIZE);
251 if (addr < start)
252 addr = start;
253
254 last = round_down(ei->addr + ei->size, PAGE_SIZE);
255 if (last >= end)
256 last = end;
257
258 if (last > addr)
259 ram += last - addr;
260 }
261 return ((end - start) - ram) >> PAGE_SHIFT;
262}
263
264/* 186/*
265 * Mark e820 reserved areas as busy for the resource manager. 187 * Mark e820 reserved areas as busy for the resource manager.
266 */ 188 */
@@ -342,6 +264,49 @@ void __init e820_mark_nosave_regions(void)
342 } 264 }
343} 265}
344 266
267/* Walk the e820 map and register active regions within a node */
268void __init
269e820_register_active_regions(int nid, unsigned long start_pfn,
270 unsigned long end_pfn)
271{
272 int i;
273 unsigned long ei_startpfn, ei_endpfn;
274 for (i = 0; i < e820.nr_map; i++) {
275 struct e820entry *ei = &e820.map[i];
276 ei_startpfn = round_up(ei->addr, PAGE_SIZE) >> PAGE_SHIFT;
277 ei_endpfn = round_down(ei->addr + ei->size, PAGE_SIZE)
278 >> PAGE_SHIFT;
279
280 /* Skip map entries smaller than a page */
281 if (ei_startpfn > ei_endpfn)
282 continue;
283
284 /* Check if end_pfn_map should be updated */
285 if (ei->type != E820_RAM && ei_endpfn > end_pfn_map)
286 end_pfn_map = ei_endpfn;
287
288 /* Skip if map is outside the node */
289 if (ei->type != E820_RAM ||
290 ei_endpfn <= start_pfn ||
291 ei_startpfn >= end_pfn)
292 continue;
293
294 /* Check for overlaps */
295 if (ei_startpfn < start_pfn)
296 ei_startpfn = start_pfn;
297 if (ei_endpfn > end_pfn)
298 ei_endpfn = end_pfn;
299
300 /* Obey end_user_pfn to save on memmap */
301 if (ei_startpfn >= end_user_pfn)
302 continue;
303 if (ei_endpfn > end_user_pfn)
304 ei_endpfn = end_user_pfn;
305
306 add_active_range(nid, ei_startpfn, ei_endpfn);
307 }
308}
309
345/* 310/*
346 * Add a memory region to the kernel e820 map. 311 * Add a memory region to the kernel e820 map.
347 */ 312 */
diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c
index f98e48cae6da..0b00bb2ea576 100644
--- a/arch/x86_64/kernel/setup.c
+++ b/arch/x86_64/kernel/setup.c
@@ -292,7 +292,8 @@ contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
292 if (bootmap == -1L) 292 if (bootmap == -1L)
293 panic("Cannot find bootmem map of size %ld\n",bootmap_size); 293 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
294 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn); 294 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
295 e820_bootmem_free(NODE_DATA(0), 0, end_pfn << PAGE_SHIFT); 295 e820_register_active_regions(0, start_pfn, end_pfn);
296 free_bootmem_with_active_regions(0, end_pfn);
296 reserve_bootmem(bootmap, bootmap_size); 297 reserve_bootmem(bootmap, bootmap_size);
297} 298}
298#endif 299#endif
@@ -384,6 +385,7 @@ void __init setup_arch(char **cmdline_p)
384 385
385 finish_e820_parsing(); 386 finish_e820_parsing();
386 387
388 e820_register_active_regions(0, 0, -1UL);
387 /* 389 /*
388 * partially used pages are not usable - thus 390 * partially used pages are not usable - thus
389 * we are rounding upwards: 391 * we are rounding upwards:
@@ -414,6 +416,9 @@ void __init setup_arch(char **cmdline_p)
414 max_pfn = end_pfn; 416 max_pfn = end_pfn;
415 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1; 417 high_memory = (void *)__va(end_pfn * PAGE_SIZE - 1) + 1;
416 418
419 /* Remove active ranges so rediscovery with NUMA-awareness happens */
420 remove_all_active_ranges();
421
417#ifdef CONFIG_ACPI_NUMA 422#ifdef CONFIG_ACPI_NUMA
418 /* 423 /*
419 * Parse SRAT to discover nodes. 424 * Parse SRAT to discover nodes.