diff options
| author | Pekka Enberg <penberg@cs.helsinki.fi> | 2009-03-05 07:54:58 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2009-03-05 08:17:13 -0500 |
| commit | c464573cb3d3bdd45eed8f5f59596f84ede95a0c (patch) | |
| tree | cd7ae5662ebe5b8d61124c773f2070b0f508a6d0 | |
| parent | 96083ca11bc85265c7ef9e791a57e3514d8f605a (diff) | |
x86: rename after_init_bootmem to after_bootmem in mm/init_32.c
Impact: cleanup
This patch renames after_init_bootmem to after_bootmem in
mm/init_32.c to reduce the diff to the 64-bit version of of
init_memory_mapping().
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Yinghai Lu <yinghai@kernel.org>
LKML-Reference: <1236257708-27269-7-git-send-email-penberg@cs.helsinki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
| -rw-r--r-- | arch/x86/mm/init_32.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 559715b488bb..cc5c3992385e 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
| @@ -63,7 +63,7 @@ static unsigned long __initdata table_start; | |||
| 63 | static unsigned long __meminitdata table_end; | 63 | static unsigned long __meminitdata table_end; |
| 64 | static unsigned long __meminitdata table_top; | 64 | static unsigned long __meminitdata table_top; |
| 65 | 65 | ||
| 66 | static int __initdata after_init_bootmem; | 66 | int after_bootmem; |
| 67 | 67 | ||
| 68 | int direct_gbpages; | 68 | int direct_gbpages; |
| 69 | 69 | ||
| @@ -92,7 +92,7 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd) | |||
| 92 | 92 | ||
| 93 | #ifdef CONFIG_X86_PAE | 93 | #ifdef CONFIG_X86_PAE |
| 94 | if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { | 94 | if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { |
| 95 | if (after_init_bootmem) | 95 | if (after_bootmem) |
| 96 | pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); | 96 | pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); |
| 97 | else | 97 | else |
| 98 | pmd_table = (pmd_t *)alloc_low_page(); | 98 | pmd_table = (pmd_t *)alloc_low_page(); |
| @@ -119,7 +119,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd) | |||
| 119 | if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { | 119 | if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { |
| 120 | pte_t *page_table = NULL; | 120 | pte_t *page_table = NULL; |
| 121 | 121 | ||
| 122 | if (after_init_bootmem) { | 122 | if (after_bootmem) { |
| 123 | #ifdef CONFIG_DEBUG_PAGEALLOC | 123 | #ifdef CONFIG_DEBUG_PAGEALLOC |
| 124 | page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); | 124 | page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); |
| 125 | #endif | 125 | #endif |
| @@ -158,7 +158,7 @@ static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, | |||
| 158 | pte_t *newpte; | 158 | pte_t *newpte; |
| 159 | int i; | 159 | int i; |
| 160 | 160 | ||
| 161 | BUG_ON(after_init_bootmem); | 161 | BUG_ON(after_bootmem); |
| 162 | newpte = alloc_low_page(); | 162 | newpte = alloc_low_page(); |
| 163 | for (i = 0; i < PTRS_PER_PTE; i++) | 163 | for (i = 0; i < PTRS_PER_PTE; i++) |
| 164 | set_pte(newpte + i, pte[i]); | 164 | set_pte(newpte + i, pte[i]); |
| @@ -831,7 +831,7 @@ void __init setup_bootmem_allocator(void) | |||
| 831 | bootmap = setup_node_bootmem(0, 0, max_low_pfn, bootmap); | 831 | bootmap = setup_node_bootmem(0, 0, max_low_pfn, bootmap); |
| 832 | #endif | 832 | #endif |
| 833 | 833 | ||
| 834 | after_init_bootmem = 1; | 834 | after_bootmem = 1; |
| 835 | } | 835 | } |
| 836 | 836 | ||
| 837 | static void __init find_early_table_space(unsigned long end, int use_pse, | 837 | static void __init find_early_table_space(unsigned long end, int use_pse, |
| @@ -1037,7 +1037,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
| 1037 | * memory mapped. Unfortunately this is done currently before the | 1037 | * memory mapped. Unfortunately this is done currently before the |
| 1038 | * nodes are discovered. | 1038 | * nodes are discovered. |
| 1039 | */ | 1039 | */ |
| 1040 | if (!after_init_bootmem) | 1040 | if (!after_bootmem) |
| 1041 | find_early_table_space(end, use_pse, use_gbpages); | 1041 | find_early_table_space(end, use_pse, use_gbpages); |
| 1042 | 1042 | ||
| 1043 | for (i = 0; i < nr_range; i++) | 1043 | for (i = 0; i < nr_range; i++) |
| @@ -1052,11 +1052,11 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
| 1052 | 1052 | ||
| 1053 | __flush_tlb_all(); | 1053 | __flush_tlb_all(); |
| 1054 | 1054 | ||
| 1055 | if (!after_init_bootmem) | 1055 | if (!after_bootmem) |
| 1056 | reserve_early(table_start << PAGE_SHIFT, | 1056 | reserve_early(table_start << PAGE_SHIFT, |
| 1057 | table_end << PAGE_SHIFT, "PGTABLE"); | 1057 | table_end << PAGE_SHIFT, "PGTABLE"); |
| 1058 | 1058 | ||
| 1059 | if (!after_init_bootmem) | 1059 | if (!after_bootmem) |
| 1060 | early_memtest(start, end); | 1060 | early_memtest(start, end); |
| 1061 | 1061 | ||
| 1062 | return end >> PAGE_SHIFT; | 1062 | return end >> PAGE_SHIFT; |
