aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init_32.c
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2012-11-16 22:39:16 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2012-11-17 14:59:44 -0500
commit4e37a890474b89ca49ad6b3651b1709a17d7c216 (patch)
tree865d65b582af514a6e866071a30e06a095dd8bc1 /arch/x86/mm/init_32.c
parent2e8059edb6fc5887e8e022d9e04fba26c9e0abcb (diff)
x86, mm: Unifying after_bootmem for 32bit and 64bit
after_bootmem has different meaning in 32bit and 64bit. 32bit: after bootmem is ready 64bit: after bootmem is distroyed Let's merget them make 32bit the same as 64bit. for 32bit, it is mixing alloc_bootmem_pages, and alloc_low_page under after_bootmem is set or not set. alloc_bootmem is just wrapper for memblock for x86. Now we have alloc_low_page() with memblock too. We can drop bootmem path now, and only alloc_low_page only. At the same time, we make alloc_low_page could handle real after_bootmem for 32bit, because alloc_bootmem_pages could fallback to use slab too. At last move after_bootmem set position for 32bit the same as 64bit. Signed-off-by: Yinghai Lu <yinghai@kernel.org> Link: http://lkml.kernel.org/r/1353123563-3103-40-git-send-email-yinghai@kernel.org Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/mm/init_32.c')
-rw-r--r--arch/x86/mm/init_32.c21
1 files changed, 4 insertions, 17 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 19ef9f018012..f4fc4a28393a 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -73,10 +73,7 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd)
73 73
74#ifdef CONFIG_X86_PAE 74#ifdef CONFIG_X86_PAE
75 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { 75 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
76 if (after_bootmem) 76 pmd_table = (pmd_t *)alloc_low_page();
77 pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
78 else
79 pmd_table = (pmd_t *)alloc_low_page();
80 paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); 77 paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
81 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); 78 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
82 pud = pud_offset(pgd, 0); 79 pud = pud_offset(pgd, 0);
@@ -98,17 +95,7 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd)
98static pte_t * __init one_page_table_init(pmd_t *pmd) 95static pte_t * __init one_page_table_init(pmd_t *pmd)
99{ 96{
100 if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { 97 if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
101 pte_t *page_table = NULL; 98 pte_t *page_table = (pte_t *)alloc_low_page();
102
103 if (after_bootmem) {
104#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
105 page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
106#endif
107 if (!page_table)
108 page_table =
109 (pte_t *)alloc_bootmem_pages(PAGE_SIZE);
110 } else
111 page_table = (pte_t *)alloc_low_page();
112 99
113 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); 100 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
114 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); 101 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
@@ -708,8 +695,6 @@ void __init setup_bootmem_allocator(void)
708 printk(KERN_INFO " mapped low ram: 0 - %08lx\n", 695 printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
709 max_pfn_mapped<<PAGE_SHIFT); 696 max_pfn_mapped<<PAGE_SHIFT);
710 printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT); 697 printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
711
712 after_bootmem = 1;
713} 698}
714 699
715/* 700/*
@@ -795,6 +780,8 @@ void __init mem_init(void)
795 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) 780 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
796 reservedpages++; 781 reservedpages++;
797 782
783 after_bootmem = 1;
784
798 codesize = (unsigned long) &_etext - (unsigned long) &_text; 785 codesize = (unsigned long) &_etext - (unsigned long) &_text;
799 datasize = (unsigned long) &_edata - (unsigned long) &_etext; 786 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
800 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 787 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;