aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init_32.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/init_32.c')
-rw-r--r--arch/x86/mm/init_32.c69
1 files changed, 29 insertions, 40 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 9ec62da85fd7..de236e419cb5 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -71,7 +71,7 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd)
71 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { 71 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
72 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); 72 pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
73 73
74 paravirt_alloc_pd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); 74 paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
75 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); 75 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
76 pud = pud_offset(pgd, 0); 76 pud = pud_offset(pgd, 0);
77 BUG_ON(pmd_table != pmd_offset(pud, 0)); 77 BUG_ON(pmd_table != pmd_offset(pud, 0));
@@ -100,7 +100,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
100 (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); 100 (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
101 } 101 }
102 102
103 paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT); 103 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
104 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); 104 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
105 BUG_ON(page_table != pte_offset_kernel(pmd, 0)); 105 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
106 } 106 }
@@ -227,6 +227,25 @@ static inline int page_kills_ppro(unsigned long pagenr)
227 return 0; 227 return 0;
228} 228}
229 229
230/*
231 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
232 * is valid. The argument is a physical page number.
233 *
234 *
235 * On x86, access has to be given to the first megabyte of ram because that area
236 * contains bios code and data regions used by X and dosemu and similar apps.
237 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
238 * mmio resources as well as potential bios/acpi data regions.
239 */
240int devmem_is_allowed(unsigned long pagenr)
241{
242 if (pagenr <= 256)
243 return 1;
244 if (!page_is_ram(pagenr))
245 return 1;
246 return 0;
247}
248
230#ifdef CONFIG_HIGHMEM 249#ifdef CONFIG_HIGHMEM
231pte_t *kmap_pte; 250pte_t *kmap_pte;
232pgprot_t kmap_prot; 251pgprot_t kmap_prot;
@@ -268,47 +287,17 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
268 pkmap_page_table = pte; 287 pkmap_page_table = pte;
269} 288}
270 289
271static void __meminit free_new_highpage(struct page *page)
272{
273 init_page_count(page);
274 __free_page(page);
275 totalhigh_pages++;
276}
277
278void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro) 290void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
279{ 291{
280 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) { 292 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
281 ClearPageReserved(page); 293 ClearPageReserved(page);
282 free_new_highpage(page); 294 init_page_count(page);
295 __free_page(page);
296 totalhigh_pages++;
283 } else 297 } else
284 SetPageReserved(page); 298 SetPageReserved(page);
285} 299}
286 300
287static int __meminit
288add_one_highpage_hotplug(struct page *page, unsigned long pfn)
289{
290 free_new_highpage(page);
291 totalram_pages++;
292#ifdef CONFIG_FLATMEM
293 max_mapnr = max(pfn, max_mapnr);
294#endif
295 num_physpages++;
296
297 return 0;
298}
299
300/*
301 * Not currently handling the NUMA case.
302 * Assuming single node and all memory that
303 * has been added dynamically that would be
304 * onlined here is in HIGHMEM.
305 */
306void __meminit online_page(struct page *page)
307{
308 ClearPageReserved(page);
309 add_one_highpage_hotplug(page, page_to_pfn(page));
310}
311
312#ifndef CONFIG_NUMA 301#ifndef CONFIG_NUMA
313static void __init set_highmem_pages_init(int bad_ppro) 302static void __init set_highmem_pages_init(int bad_ppro)
314{ 303{
@@ -365,7 +354,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
365 354
366 pte_clear(NULL, va, pte); 355 pte_clear(NULL, va, pte);
367 } 356 }
368 paravirt_alloc_pd(&init_mm, __pa(base) >> PAGE_SHIFT); 357 paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
369} 358}
370 359
371void __init native_pagetable_setup_done(pgd_t *base) 360void __init native_pagetable_setup_done(pgd_t *base)
@@ -457,7 +446,7 @@ void zap_low_mappings(void)
457 * Note that "pgd_clear()" doesn't do it for 446 * Note that "pgd_clear()" doesn't do it for
458 * us, because pgd_clear() is a no-op on i386. 447 * us, because pgd_clear() is a no-op on i386.
459 */ 448 */
460 for (i = 0; i < USER_PTRS_PER_PGD; i++) { 449 for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) {
461#ifdef CONFIG_X86_PAE 450#ifdef CONFIG_X86_PAE
462 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page))); 451 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
463#else 452#else
@@ -547,9 +536,9 @@ void __init paging_init(void)
547 536
548/* 537/*
549 * Test if the WP bit works in supervisor mode. It isn't supported on 386's 538 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
550 * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This 539 * and also on some strange 486's. All 586+'s are OK. This used to involve
551 * used to involve black magic jumps to work around some nasty CPU bugs, 540 * black magic jumps to work around some nasty CPU bugs, but fortunately the
552 * but fortunately the switch to using exceptions got rid of all that. 541 * switch to using exceptions got rid of all that.
553 */ 542 */
554static void __init test_wp_bit(void) 543static void __init test_wp_bit(void)
555{ 544{