aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/mm/init.c')
-rw-r--r--arch/x86_64/mm/init.c37
1 files changed, 34 insertions, 3 deletions
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index e5f7f1c34462..4ba34e95d835 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -305,7 +305,7 @@ static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned
305 if (paddr >= end) 305 if (paddr >= end)
306 break; 306 break;
307 307
308 if (!after_bootmem && !e820_mapped(paddr, paddr+PUD_SIZE, 0)) { 308 if (!after_bootmem && !e820_any_mapped(paddr, paddr+PUD_SIZE, 0)) {
309 set_pud(pud, __pud(0)); 309 set_pud(pud, __pud(0));
310 continue; 310 continue;
311 } 311 }
@@ -507,9 +507,8 @@ void __init clear_kernel_mapping(unsigned long address, unsigned long size)
507 507
508/* 508/*
509 * Memory hotplug specific functions 509 * Memory hotplug specific functions
510 * These are only for non-NUMA machines right now.
511 */ 510 */
512#ifdef CONFIG_MEMORY_HOTPLUG 511#if defined(CONFIG_ACPI_HOTPLUG_MEMORY) || defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)
513 512
514void online_page(struct page *page) 513void online_page(struct page *page)
515{ 514{
@@ -520,6 +519,38 @@ void online_page(struct page *page)
520 num_physpages++; 519 num_physpages++;
521} 520}
522 521
522#ifndef CONFIG_MEMORY_HOTPLUG
523/*
524 * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
525 * just online the pages.
526 */
527int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
528{
529 int err = -EIO;
530 unsigned long pfn;
531 unsigned long total = 0, mem = 0;
532 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
533 if (pfn_valid(pfn)) {
534 online_page(pfn_to_page(pfn));
535 err = 0;
536 mem++;
537 }
538 total++;
539 }
540 if (!err) {
541 z->spanned_pages += total;
542 z->present_pages += mem;
543 z->zone_pgdat->node_spanned_pages += total;
544 z->zone_pgdat->node_present_pages += mem;
545 }
546 return err;
547}
548#endif
549
550/*
551 * Memory is added always to NORMAL zone. This means you will never get
552 * additional DMA/DMA32 memory.
553 */
523int add_memory(u64 start, u64 size) 554int add_memory(u64 start, u64 size)
524{ 555{
525 struct pglist_data *pgdat = NODE_DATA(0); 556 struct pglist_data *pgdat = NODE_DATA(0);