aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/mm
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2006-04-07 13:49:15 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-04-09 14:53:16 -0400
commit9d99aaa31f5994d1923c3713ce9144c4c42332e1 (patch)
treeae608593ca196dd6493cccbdfc1b8dd098e91ee8 /arch/x86_64/mm
parent805e8c03c9ea9bdb402a36341e02ec24825d5417 (diff)
[PATCH] x86_64: Support memory hotadd without sparsemem
Memory hotadd doesn't need SPARSEMEM, but can be handled by just preallocating mem_maps. This only needs some untangling of ifdefs to enable the necessary code even without SPARSEMEM. Originally from Keith Mannthey, hacked by AK. Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/mm')
-rw-r--r--arch/x86_64/mm/init.c36
1 files changed, 34 insertions, 2 deletions
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index e5f7f1c34462..492161168402 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -507,9 +507,8 @@ void __init clear_kernel_mapping(unsigned long address, unsigned long size)
507 507
508/* 508/*
509 * Memory hotplug specific functions 509 * Memory hotplug specific functions
510 * These are only for non-NUMA machines right now.
511 */ 510 */
512#ifdef CONFIG_MEMORY_HOTPLUG 511#if defined(CONFIG_ACPI_HOTPLUG_MEMORY) || defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)
513 512
514void online_page(struct page *page) 513void online_page(struct page *page)
515{ 514{
@@ -520,6 +519,39 @@ void online_page(struct page *page)
520 num_physpages++; 519 num_physpages++;
521} 520}
522 521
522#ifndef CONFIG_MEMORY_HOTPLUG
523/*
524 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
525 * just online the pages.
526 */
527int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
528{
529 int err = -EIO;
530 unsigned long pfn;
531 unsigned long total = 0, mem = 0;
532 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
533 unsigned long addr = pfn << PAGE_SHIFT;
534 if (pfn_valid(pfn) && e820_mapped(addr, addr+1, E820_RAM)) {
535 online_page(pfn_to_page(pfn));
536 err = 0;
537 mem++;
538 }
539 total++;
540 }
541 if (!err) {
542 z->spanned_pages += total;
543 z->present_pages += mem;
544 z->zone_pgdat->node_spanned_pages += total;
545 z->zone_pgdat->node_present_pages += mem;
546 }
547 return err;
548}
549#endif
550
551/*
552 * Memory is added always to NORMAL zone. This means you will never get
553 * additional DMA/DMA32 memory.
554 */
523int add_memory(u64 start, u64 size) 555int add_memory(u64 start, u64 size)
524{ 556{
525 struct pglist_data *pgdat = NODE_DATA(0); 557 struct pglist_data *pgdat = NODE_DATA(0);