diff options
Diffstat (limited to 'arch/x86_64')
-rw-r--r-- | arch/x86_64/kernel/e820.c | 2 | ||||
-rw-r--r-- | arch/x86_64/mm/init.c | 36 |
2 files changed, 35 insertions, 3 deletions
diff --git a/arch/x86_64/kernel/e820.c b/arch/x86_64/kernel/e820.c index 293cd71a266a..db57d3ff04e3 100644 --- a/arch/x86_64/kernel/e820.c +++ b/arch/x86_64/kernel/e820.c | |||
@@ -80,7 +80,7 @@ static inline int bad_addr(unsigned long *addrp, unsigned long size) | |||
80 | return 0; | 80 | return 0; |
81 | } | 81 | } |
82 | 82 | ||
83 | int __init e820_mapped(unsigned long start, unsigned long end, unsigned type) | 83 | int __meminit e820_mapped(unsigned long start, unsigned long end, unsigned type) |
84 | { | 84 | { |
85 | int i; | 85 | int i; |
86 | for (i = 0; i < e820.nr_map; i++) { | 86 | for (i = 0; i < e820.nr_map; i++) { |
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index e5f7f1c34462..492161168402 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c | |||
@@ -507,9 +507,8 @@ void __init clear_kernel_mapping(unsigned long address, unsigned long size) | |||
507 | 507 | ||
508 | /* | 508 | /* |
509 | * Memory hotplug specific functions | 509 | * Memory hotplug specific functions |
510 | * These are only for non-NUMA machines right now. | ||
511 | */ | 510 | */ |
512 | #ifdef CONFIG_MEMORY_HOTPLUG | 511 | #if defined(CONFIG_ACPI_HOTPLUG_MEMORY) || defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE) |
513 | 512 | ||
514 | void online_page(struct page *page) | 513 | void online_page(struct page *page) |
515 | { | 514 | { |
@@ -520,6 +519,39 @@ void online_page(struct page *page) | |||
520 | num_physpages++; | 519 | num_physpages++; |
521 | } | 520 | } |
522 | 521 | ||
522 | #ifndef CONFIG_MEMORY_HOTPLUG | ||
523 | /* | ||
524 | * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance, | ||
525 | * just online the pages. | ||
526 | */ | ||
527 | int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages) | ||
528 | { | ||
529 | int err = -EIO; | ||
530 | unsigned long pfn; | ||
531 | unsigned long total = 0, mem = 0; | ||
532 | for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { | ||
533 | unsigned long addr = pfn << PAGE_SHIFT; | ||
534 | if (pfn_valid(pfn) && e820_mapped(addr, addr+1, E820_RAM)) { | ||
535 | online_page(pfn_to_page(pfn)); | ||
536 | err = 0; | ||
537 | mem++; | ||
538 | } | ||
539 | total++; | ||
540 | } | ||
541 | if (!err) { | ||
542 | z->spanned_pages += total; | ||
543 | z->present_pages += mem; | ||
544 | z->zone_pgdat->node_spanned_pages += total; | ||
545 | z->zone_pgdat->node_present_pages += mem; | ||
546 | } | ||
547 | return err; | ||
548 | } | ||
549 | #endif | ||
550 | |||
551 | /* | ||
552 | * Memory is added always to NORMAL zone. This means you will never get | ||
553 | * additional DMA/DMA32 memory. | ||
554 | */ | ||
523 | int add_memory(u64 start, u64 size) | 555 | int add_memory(u64 start, u64 size) |
524 | { | 556 | { |
525 | struct pglist_data *pgdat = NODE_DATA(0); | 557 | struct pglist_data *pgdat = NODE_DATA(0); |