diff options
Diffstat (limited to 'arch/x86_64/mm/init.c')
-rw-r--r-- | arch/x86_64/mm/init.c | 121 |
1 files changed, 65 insertions, 56 deletions
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index 4ba34e95d835..95bd232ff0cf 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/bootmem.h> | 23 | #include <linux/bootmem.h> |
24 | #include <linux/proc_fs.h> | 24 | #include <linux/proc_fs.h> |
25 | #include <linux/pci.h> | 25 | #include <linux/pci.h> |
26 | #include <linux/poison.h> | ||
26 | #include <linux/dma-mapping.h> | 27 | #include <linux/dma-mapping.h> |
27 | #include <linux/module.h> | 28 | #include <linux/module.h> |
28 | #include <linux/memory_hotplug.h> | 29 | #include <linux/memory_hotplug.h> |
@@ -41,8 +42,6 @@ | |||
41 | #include <asm/proto.h> | 42 | #include <asm/proto.h> |
42 | #include <asm/smp.h> | 43 | #include <asm/smp.h> |
43 | #include <asm/sections.h> | 44 | #include <asm/sections.h> |
44 | #include <asm/dma-mapping.h> | ||
45 | #include <asm/swiotlb.h> | ||
46 | 45 | ||
47 | #ifndef Dprintk | 46 | #ifndef Dprintk |
48 | #define Dprintk(x...) | 47 | #define Dprintk(x...) |
@@ -90,8 +89,6 @@ void show_mem(void) | |||
90 | printk(KERN_INFO "%lu pages swap cached\n",cached); | 89 | printk(KERN_INFO "%lu pages swap cached\n",cached); |
91 | } | 90 | } |
92 | 91 | ||
93 | /* References to section boundaries */ | ||
94 | |||
95 | int after_bootmem; | 92 | int after_bootmem; |
96 | 93 | ||
97 | static __init void *spp_getpage(void) | 94 | static __init void *spp_getpage(void) |
@@ -261,9 +258,10 @@ phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end) | |||
261 | for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) { | 258 | for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) { |
262 | unsigned long entry; | 259 | unsigned long entry; |
263 | 260 | ||
264 | if (address > end) { | 261 | if (address >= end) { |
265 | for (; i < PTRS_PER_PMD; i++, pmd++) | 262 | if (!after_bootmem) |
266 | set_pmd(pmd, __pmd(0)); | 263 | for (; i < PTRS_PER_PMD; i++, pmd++) |
264 | set_pmd(pmd, __pmd(0)); | ||
267 | break; | 265 | break; |
268 | } | 266 | } |
269 | entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address; | 267 | entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address; |
@@ -341,7 +339,8 @@ static void __init find_early_table_space(unsigned long end) | |||
341 | table_end = table_start; | 339 | table_end = table_start; |
342 | 340 | ||
343 | early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n", | 341 | early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n", |
344 | end, table_start << PAGE_SHIFT, table_end << PAGE_SHIFT); | 342 | end, table_start << PAGE_SHIFT, |
343 | (table_start << PAGE_SHIFT) + tables); | ||
345 | } | 344 | } |
346 | 345 | ||
347 | /* Setup the direct mapping of the physical memory at PAGE_OFFSET. | 346 | /* Setup the direct mapping of the physical memory at PAGE_OFFSET. |
@@ -372,7 +371,7 @@ void __meminit init_memory_mapping(unsigned long start, unsigned long end) | |||
372 | pud_t *pud; | 371 | pud_t *pud; |
373 | 372 | ||
374 | if (after_bootmem) | 373 | if (after_bootmem) |
375 | pud = pud_offset_k(pgd, start & PGDIR_MASK); | 374 | pud = pud_offset(pgd, start & PGDIR_MASK); |
376 | else | 375 | else |
377 | pud = alloc_low_page(&map, &pud_phys); | 376 | pud = alloc_low_page(&map, &pud_phys); |
378 | 377 | ||
@@ -508,8 +507,6 @@ void __init clear_kernel_mapping(unsigned long address, unsigned long size) | |||
508 | /* | 507 | /* |
509 | * Memory hotplug specific functions | 508 | * Memory hotplug specific functions |
510 | */ | 509 | */ |
511 | #if defined(CONFIG_ACPI_HOTPLUG_MEMORY) || defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE) | ||
512 | |||
513 | void online_page(struct page *page) | 510 | void online_page(struct page *page) |
514 | { | 511 | { |
515 | ClearPageReserved(page); | 512 | ClearPageReserved(page); |
@@ -519,31 +516,17 @@ void online_page(struct page *page) | |||
519 | num_physpages++; | 516 | num_physpages++; |
520 | } | 517 | } |
521 | 518 | ||
522 | #ifndef CONFIG_MEMORY_HOTPLUG | 519 | #ifdef CONFIG_MEMORY_HOTPLUG |
523 | /* | 520 | /* |
524 | * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance, | 521 | * XXX: memory_add_physaddr_to_nid() is to find node id from physical address |
525 | * just online the pages. | 522 | * via probe interface of sysfs. If acpi notifies hot-add event, then it |
523 | * can tell node id by searching dsdt. But, probe interface doesn't have | ||
524 | * node id. So, return 0 as node id at this time. | ||
526 | */ | 525 | */ |
527 | int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages) | 526 | #ifdef CONFIG_NUMA |
527 | int memory_add_physaddr_to_nid(u64 start) | ||
528 | { | 528 | { |
529 | int err = -EIO; | 529 | return 0; |
530 | unsigned long pfn; | ||
531 | unsigned long total = 0, mem = 0; | ||
532 | for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { | ||
533 | if (pfn_valid(pfn)) { | ||
534 | online_page(pfn_to_page(pfn)); | ||
535 | err = 0; | ||
536 | mem++; | ||
537 | } | ||
538 | total++; | ||
539 | } | ||
540 | if (!err) { | ||
541 | z->spanned_pages += total; | ||
542 | z->present_pages += mem; | ||
543 | z->zone_pgdat->node_spanned_pages += total; | ||
544 | z->zone_pgdat->node_present_pages += mem; | ||
545 | } | ||
546 | return err; | ||
547 | } | 530 | } |
548 | #endif | 531 | #endif |
549 | 532 | ||
@@ -551,9 +534,9 @@ int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages) | |||
551 | * Memory is added always to NORMAL zone. This means you will never get | 534 | * Memory is added always to NORMAL zone. This means you will never get |
552 | * additional DMA/DMA32 memory. | 535 | * additional DMA/DMA32 memory. |
553 | */ | 536 | */ |
554 | int add_memory(u64 start, u64 size) | 537 | int arch_add_memory(int nid, u64 start, u64 size) |
555 | { | 538 | { |
556 | struct pglist_data *pgdat = NODE_DATA(0); | 539 | struct pglist_data *pgdat = NODE_DATA(nid); |
557 | struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2; | 540 | struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2; |
558 | unsigned long start_pfn = start >> PAGE_SHIFT; | 541 | unsigned long start_pfn = start >> PAGE_SHIFT; |
559 | unsigned long nr_pages = size >> PAGE_SHIFT; | 542 | unsigned long nr_pages = size >> PAGE_SHIFT; |
@@ -570,7 +553,7 @@ error: | |||
570 | printk("%s: Problem encountered in __add_pages!\n", __func__); | 553 | printk("%s: Problem encountered in __add_pages!\n", __func__); |
571 | return ret; | 554 | return ret; |
572 | } | 555 | } |
573 | EXPORT_SYMBOL_GPL(add_memory); | 556 | EXPORT_SYMBOL_GPL(arch_add_memory); |
574 | 557 | ||
575 | int remove_memory(u64 start, u64 size) | 558 | int remove_memory(u64 start, u64 size) |
576 | { | 559 | { |
@@ -578,7 +561,33 @@ int remove_memory(u64 start, u64 size) | |||
578 | } | 561 | } |
579 | EXPORT_SYMBOL_GPL(remove_memory); | 562 | EXPORT_SYMBOL_GPL(remove_memory); |
580 | 563 | ||
581 | #endif | 564 | #else /* CONFIG_MEMORY_HOTPLUG */ |
565 | /* | ||
566 | * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance, | ||
567 | * just online the pages. | ||
568 | */ | ||
569 | int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages) | ||
570 | { | ||
571 | int err = -EIO; | ||
572 | unsigned long pfn; | ||
573 | unsigned long total = 0, mem = 0; | ||
574 | for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { | ||
575 | if (pfn_valid(pfn)) { | ||
576 | online_page(pfn_to_page(pfn)); | ||
577 | err = 0; | ||
578 | mem++; | ||
579 | } | ||
580 | total++; | ||
581 | } | ||
582 | if (!err) { | ||
583 | z->spanned_pages += total; | ||
584 | z->present_pages += mem; | ||
585 | z->zone_pgdat->node_spanned_pages += total; | ||
586 | z->zone_pgdat->node_present_pages += mem; | ||
587 | } | ||
588 | return err; | ||
589 | } | ||
590 | #endif /* CONFIG_MEMORY_HOTPLUG */ | ||
582 | 591 | ||
583 | static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules, | 592 | static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules, |
584 | kcore_vsyscall; | 593 | kcore_vsyscall; |
@@ -587,10 +596,7 @@ void __init mem_init(void) | |||
587 | { | 596 | { |
588 | long codesize, reservedpages, datasize, initsize; | 597 | long codesize, reservedpages, datasize, initsize; |
589 | 598 | ||
590 | #ifdef CONFIG_SWIOTLB | 599 | pci_iommu_alloc(); |
591 | pci_swiotlb_init(); | ||
592 | #endif | ||
593 | no_iommu_init(); | ||
594 | 600 | ||
595 | /* How many end-of-memory variables you have, grandma! */ | 601 | /* How many end-of-memory variables you have, grandma! */ |
596 | max_low_pfn = end_pfn; | 602 | max_low_pfn = end_pfn; |
@@ -644,20 +650,31 @@ void __init mem_init(void) | |||
644 | #endif | 650 | #endif |
645 | } | 651 | } |
646 | 652 | ||
647 | void free_initmem(void) | 653 | void free_init_pages(char *what, unsigned long begin, unsigned long end) |
648 | { | 654 | { |
649 | unsigned long addr; | 655 | unsigned long addr; |
650 | 656 | ||
651 | addr = (unsigned long)(&__init_begin); | 657 | if (begin >= end) |
652 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { | 658 | return; |
659 | |||
660 | printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); | ||
661 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | ||
653 | ClearPageReserved(virt_to_page(addr)); | 662 | ClearPageReserved(virt_to_page(addr)); |
654 | init_page_count(virt_to_page(addr)); | 663 | init_page_count(virt_to_page(addr)); |
655 | memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE); | 664 | memset((void *)(addr & ~(PAGE_SIZE-1)), |
665 | POISON_FREE_INITMEM, PAGE_SIZE); | ||
656 | free_page(addr); | 666 | free_page(addr); |
657 | totalram_pages++; | 667 | totalram_pages++; |
658 | } | 668 | } |
659 | memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin); | 669 | } |
660 | printk ("Freeing unused kernel memory: %luk freed\n", (__init_end - __init_begin) >> 10); | 670 | |
671 | void free_initmem(void) | ||
672 | { | ||
673 | memset(__initdata_begin, POISON_FREE_INITDATA, | ||
674 | __initdata_end - __initdata_begin); | ||
675 | free_init_pages("unused kernel memory", | ||
676 | (unsigned long)(&__init_begin), | ||
677 | (unsigned long)(&__init_end)); | ||
661 | } | 678 | } |
662 | 679 | ||
663 | #ifdef CONFIG_DEBUG_RODATA | 680 | #ifdef CONFIG_DEBUG_RODATA |
@@ -686,15 +703,7 @@ void mark_rodata_ro(void) | |||
686 | #ifdef CONFIG_BLK_DEV_INITRD | 703 | #ifdef CONFIG_BLK_DEV_INITRD |
687 | void free_initrd_mem(unsigned long start, unsigned long end) | 704 | void free_initrd_mem(unsigned long start, unsigned long end) |
688 | { | 705 | { |
689 | if (start >= end) | 706 | free_init_pages("initrd memory", start, end); |
690 | return; | ||
691 | printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | ||
692 | for (; start < end; start += PAGE_SIZE) { | ||
693 | ClearPageReserved(virt_to_page(start)); | ||
694 | init_page_count(virt_to_page(start)); | ||
695 | free_page(start); | ||
696 | totalram_pages++; | ||
697 | } | ||
698 | } | 707 | } |
699 | #endif | 708 | #endif |
700 | 709 | ||