diff options
Diffstat (limited to 'arch/x86/mm/init_32.c')
-rw-r--r-- | arch/x86/mm/init_32.c | 61 |
1 files changed, 4 insertions, 57 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index ef0bb941cdf5..47df0e1bbeb9 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -50,8 +50,6 @@ | |||
50 | #include <asm/setup.h> | 50 | #include <asm/setup.h> |
51 | #include <asm/cacheflush.h> | 51 | #include <asm/cacheflush.h> |
52 | 52 | ||
53 | unsigned int __VMALLOC_RESERVE = 128 << 20; | ||
54 | |||
55 | unsigned long max_low_pfn_mapped; | 53 | unsigned long max_low_pfn_mapped; |
56 | unsigned long max_pfn_mapped; | 54 | unsigned long max_pfn_mapped; |
57 | 55 | ||
@@ -486,22 +484,10 @@ void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn, | |||
486 | work_with_active_regions(nid, add_highpages_work_fn, &data); | 484 | work_with_active_regions(nid, add_highpages_work_fn, &data); |
487 | } | 485 | } |
488 | 486 | ||
489 | #ifndef CONFIG_NUMA | ||
490 | static void __init set_highmem_pages_init(void) | ||
491 | { | ||
492 | add_highpages_with_active_regions(0, highstart_pfn, highend_pfn); | ||
493 | |||
494 | totalram_pages += totalhigh_pages; | ||
495 | } | ||
496 | #endif /* !CONFIG_NUMA */ | ||
497 | |||
498 | #else | 487 | #else |
499 | static inline void permanent_kmaps_init(pgd_t *pgd_base) | 488 | static inline void permanent_kmaps_init(pgd_t *pgd_base) |
500 | { | 489 | { |
501 | } | 490 | } |
502 | static inline void set_highmem_pages_init(void) | ||
503 | { | ||
504 | } | ||
505 | #endif /* CONFIG_HIGHMEM */ | 491 | #endif /* CONFIG_HIGHMEM */ |
506 | 492 | ||
507 | void __init native_pagetable_setup_start(pgd_t *base) | 493 | void __init native_pagetable_setup_start(pgd_t *base) |
@@ -864,10 +850,10 @@ static void __init find_early_table_space(unsigned long end, int use_pse) | |||
864 | unsigned long puds, pmds, ptes, tables, start; | 850 | unsigned long puds, pmds, ptes, tables, start; |
865 | 851 | ||
866 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; | 852 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; |
867 | tables = PAGE_ALIGN(puds * sizeof(pud_t)); | 853 | tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); |
868 | 854 | ||
869 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; | 855 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; |
870 | tables += PAGE_ALIGN(pmds * sizeof(pmd_t)); | 856 | tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); |
871 | 857 | ||
872 | if (use_pse) { | 858 | if (use_pse) { |
873 | unsigned long extra; | 859 | unsigned long extra; |
@@ -878,10 +864,10 @@ static void __init find_early_table_space(unsigned long end, int use_pse) | |||
878 | } else | 864 | } else |
879 | ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; | 865 | ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; |
880 | 866 | ||
881 | tables += PAGE_ALIGN(ptes * sizeof(pte_t)); | 867 | tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); |
882 | 868 | ||
883 | /* for fixmap */ | 869 | /* for fixmap */ |
884 | tables += PAGE_ALIGN(__end_of_fixed_addresses * sizeof(pte_t)); | 870 | tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE); |
885 | 871 | ||
886 | /* | 872 | /* |
887 | * RED-PEN putting page tables only on node 0 could | 873 | * RED-PEN putting page tables only on node 0 could |
@@ -1231,45 +1217,6 @@ void mark_rodata_ro(void) | |||
1231 | } | 1217 | } |
1232 | #endif | 1218 | #endif |
1233 | 1219 | ||
1234 | void free_init_pages(char *what, unsigned long begin, unsigned long end) | ||
1235 | { | ||
1236 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
1237 | /* | ||
1238 | * If debugging page accesses then do not free this memory but | ||
1239 | * mark them not present - any buggy init-section access will | ||
1240 | * create a kernel page fault: | ||
1241 | */ | ||
1242 | printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", | ||
1243 | begin, PAGE_ALIGN(end)); | ||
1244 | set_memory_np(begin, (end - begin) >> PAGE_SHIFT); | ||
1245 | #else | ||
1246 | unsigned long addr; | ||
1247 | |||
1248 | /* | ||
1249 | * We just marked the kernel text read only above, now that | ||
1250 | * we are going to free part of that, we need to make that | ||
1251 | * writeable first. | ||
1252 | */ | ||
1253 | set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); | ||
1254 | |||
1255 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | ||
1256 | ClearPageReserved(virt_to_page(addr)); | ||
1257 | init_page_count(virt_to_page(addr)); | ||
1258 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); | ||
1259 | free_page(addr); | ||
1260 | totalram_pages++; | ||
1261 | } | ||
1262 | printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); | ||
1263 | #endif | ||
1264 | } | ||
1265 | |||
1266 | void free_initmem(void) | ||
1267 | { | ||
1268 | free_init_pages("unused kernel memory", | ||
1269 | (unsigned long)(&__init_begin), | ||
1270 | (unsigned long)(&__init_end)); | ||
1271 | } | ||
1272 | |||
1273 | #ifdef CONFIG_BLK_DEV_INITRD | 1220 | #ifdef CONFIG_BLK_DEV_INITRD |
1274 | void free_initrd_mem(unsigned long start, unsigned long end) | 1221 | void free_initrd_mem(unsigned long start, unsigned long end) |
1275 | { | 1222 | { |