diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-09-22 07:08:57 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-22 07:08:57 -0400 |
commit | 0b88641f1bafdbd087d5e63987a30cc0eadd63b9 (patch) | |
tree | 81dcf756db373444140bb2623584710c628e3048 /arch/x86/mm/init_64.c | |
parent | fbdbf709938d155c719c76b9894d28342632c797 (diff) | |
parent | 72d31053f62c4bc464c2783974926969614a8649 (diff) |
Merge commit 'v2.6.27-rc7' into x86/debug
Diffstat (limited to 'arch/x86/mm/init_64.c')
-rw-r--r-- | arch/x86/mm/init_64.c | 85 |
1 files changed, 30 insertions, 55 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index ec37121f6709..d3746efb060d 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -60,7 +60,7 @@ static unsigned long dma_reserve __initdata; | |||
60 | 60 | ||
61 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 61 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
62 | 62 | ||
63 | int direct_gbpages __meminitdata | 63 | int direct_gbpages |
64 | #ifdef CONFIG_DIRECT_GBPAGES | 64 | #ifdef CONFIG_DIRECT_GBPAGES |
65 | = 1 | 65 | = 1 |
66 | #endif | 66 | #endif |
@@ -86,46 +86,13 @@ early_param("gbpages", parse_direct_gbpages_on); | |||
86 | * around without checking the pgd every time. | 86 | * around without checking the pgd every time. |
87 | */ | 87 | */ |
88 | 88 | ||
89 | void show_mem(void) | ||
90 | { | ||
91 | long i, total = 0, reserved = 0; | ||
92 | long shared = 0, cached = 0; | ||
93 | struct page *page; | ||
94 | pg_data_t *pgdat; | ||
95 | |||
96 | printk(KERN_INFO "Mem-info:\n"); | ||
97 | show_free_areas(); | ||
98 | for_each_online_pgdat(pgdat) { | ||
99 | for (i = 0; i < pgdat->node_spanned_pages; ++i) { | ||
100 | /* | ||
101 | * This loop can take a while with 256 GB and | ||
102 | * 4k pages so defer the NMI watchdog: | ||
103 | */ | ||
104 | if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) | ||
105 | touch_nmi_watchdog(); | ||
106 | |||
107 | if (!pfn_valid(pgdat->node_start_pfn + i)) | ||
108 | continue; | ||
109 | |||
110 | page = pfn_to_page(pgdat->node_start_pfn + i); | ||
111 | total++; | ||
112 | if (PageReserved(page)) | ||
113 | reserved++; | ||
114 | else if (PageSwapCache(page)) | ||
115 | cached++; | ||
116 | else if (page_count(page)) | ||
117 | shared += page_count(page) - 1; | ||
118 | } | ||
119 | } | ||
120 | printk(KERN_INFO "%lu pages of RAM\n", total); | ||
121 | printk(KERN_INFO "%lu reserved pages\n", reserved); | ||
122 | printk(KERN_INFO "%lu pages shared\n", shared); | ||
123 | printk(KERN_INFO "%lu pages swap cached\n", cached); | ||
124 | } | ||
125 | |||
126 | int after_bootmem; | 89 | int after_bootmem; |
127 | 90 | ||
128 | static __init void *spp_getpage(void) | 91 | /* |
92 | * NOTE: This function is marked __ref because it calls __init function | ||
93 | * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. | ||
94 | */ | ||
95 | static __ref void *spp_getpage(void) | ||
129 | { | 96 | { |
130 | void *ptr; | 97 | void *ptr; |
131 | 98 | ||
@@ -274,7 +241,7 @@ static unsigned long __initdata table_start; | |||
274 | static unsigned long __meminitdata table_end; | 241 | static unsigned long __meminitdata table_end; |
275 | static unsigned long __meminitdata table_top; | 242 | static unsigned long __meminitdata table_top; |
276 | 243 | ||
277 | static __meminit void *alloc_low_page(unsigned long *phys) | 244 | static __ref void *alloc_low_page(unsigned long *phys) |
278 | { | 245 | { |
279 | unsigned long pfn = table_end++; | 246 | unsigned long pfn = table_end++; |
280 | void *adr; | 247 | void *adr; |
@@ -295,7 +262,7 @@ static __meminit void *alloc_low_page(unsigned long *phys) | |||
295 | return adr; | 262 | return adr; |
296 | } | 263 | } |
297 | 264 | ||
298 | static __meminit void unmap_low_page(void *adr) | 265 | static __ref void unmap_low_page(void *adr) |
299 | { | 266 | { |
300 | if (after_bootmem) | 267 | if (after_bootmem) |
301 | return; | 268 | return; |
@@ -351,6 +318,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | |||
351 | { | 318 | { |
352 | unsigned long pages = 0; | 319 | unsigned long pages = 0; |
353 | unsigned long last_map_addr = end; | 320 | unsigned long last_map_addr = end; |
321 | unsigned long start = address; | ||
354 | 322 | ||
355 | int i = pmd_index(address); | 323 | int i = pmd_index(address); |
356 | 324 | ||
@@ -368,16 +336,24 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | |||
368 | } | 336 | } |
369 | 337 | ||
370 | if (pmd_val(*pmd)) { | 338 | if (pmd_val(*pmd)) { |
371 | if (!pmd_large(*pmd)) | 339 | if (!pmd_large(*pmd)) { |
340 | spin_lock(&init_mm.page_table_lock); | ||
372 | last_map_addr = phys_pte_update(pmd, address, | 341 | last_map_addr = phys_pte_update(pmd, address, |
373 | end); | 342 | end); |
343 | spin_unlock(&init_mm.page_table_lock); | ||
344 | } | ||
345 | /* Count entries we're using from level2_ident_pgt */ | ||
346 | if (start == 0) | ||
347 | pages++; | ||
374 | continue; | 348 | continue; |
375 | } | 349 | } |
376 | 350 | ||
377 | if (page_size_mask & (1<<PG_LEVEL_2M)) { | 351 | if (page_size_mask & (1<<PG_LEVEL_2M)) { |
378 | pages++; | 352 | pages++; |
353 | spin_lock(&init_mm.page_table_lock); | ||
379 | set_pte((pte_t *)pmd, | 354 | set_pte((pte_t *)pmd, |
380 | pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); | 355 | pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); |
356 | spin_unlock(&init_mm.page_table_lock); | ||
381 | last_map_addr = (address & PMD_MASK) + PMD_SIZE; | 357 | last_map_addr = (address & PMD_MASK) + PMD_SIZE; |
382 | continue; | 358 | continue; |
383 | } | 359 | } |
@@ -386,7 +362,9 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | |||
386 | last_map_addr = phys_pte_init(pte, address, end); | 362 | last_map_addr = phys_pte_init(pte, address, end); |
387 | unmap_low_page(pte); | 363 | unmap_low_page(pte); |
388 | 364 | ||
365 | spin_lock(&init_mm.page_table_lock); | ||
389 | pmd_populate_kernel(&init_mm, pmd, __va(pte_phys)); | 366 | pmd_populate_kernel(&init_mm, pmd, __va(pte_phys)); |
367 | spin_unlock(&init_mm.page_table_lock); | ||
390 | } | 368 | } |
391 | update_page_count(PG_LEVEL_2M, pages); | 369 | update_page_count(PG_LEVEL_2M, pages); |
392 | return last_map_addr; | 370 | return last_map_addr; |
@@ -399,9 +377,7 @@ phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end, | |||
399 | pmd_t *pmd = pmd_offset(pud, 0); | 377 | pmd_t *pmd = pmd_offset(pud, 0); |
400 | unsigned long last_map_addr; | 378 | unsigned long last_map_addr; |
401 | 379 | ||
402 | spin_lock(&init_mm.page_table_lock); | ||
403 | last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask); | 380 | last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask); |
404 | spin_unlock(&init_mm.page_table_lock); | ||
405 | __flush_tlb_all(); | 381 | __flush_tlb_all(); |
406 | return last_map_addr; | 382 | return last_map_addr; |
407 | } | 383 | } |
@@ -437,20 +413,21 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, | |||
437 | 413 | ||
438 | if (page_size_mask & (1<<PG_LEVEL_1G)) { | 414 | if (page_size_mask & (1<<PG_LEVEL_1G)) { |
439 | pages++; | 415 | pages++; |
416 | spin_lock(&init_mm.page_table_lock); | ||
440 | set_pte((pte_t *)pud, | 417 | set_pte((pte_t *)pud, |
441 | pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); | 418 | pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); |
419 | spin_unlock(&init_mm.page_table_lock); | ||
442 | last_map_addr = (addr & PUD_MASK) + PUD_SIZE; | 420 | last_map_addr = (addr & PUD_MASK) + PUD_SIZE; |
443 | continue; | 421 | continue; |
444 | } | 422 | } |
445 | 423 | ||
446 | pmd = alloc_low_page(&pmd_phys); | 424 | pmd = alloc_low_page(&pmd_phys); |
447 | |||
448 | spin_lock(&init_mm.page_table_lock); | ||
449 | last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask); | 425 | last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask); |
450 | unmap_low_page(pmd); | 426 | unmap_low_page(pmd); |
427 | |||
428 | spin_lock(&init_mm.page_table_lock); | ||
451 | pud_populate(&init_mm, pud, __va(pmd_phys)); | 429 | pud_populate(&init_mm, pud, __va(pmd_phys)); |
452 | spin_unlock(&init_mm.page_table_lock); | 430 | spin_unlock(&init_mm.page_table_lock); |
453 | |||
454 | } | 431 | } |
455 | __flush_tlb_all(); | 432 | __flush_tlb_all(); |
456 | update_page_count(PG_LEVEL_1G, pages); | 433 | update_page_count(PG_LEVEL_1G, pages); |
@@ -542,16 +519,14 @@ static unsigned long __init kernel_physical_mapping_init(unsigned long start, | |||
542 | continue; | 519 | continue; |
543 | } | 520 | } |
544 | 521 | ||
545 | if (after_bootmem) | 522 | pud = alloc_low_page(&pud_phys); |
546 | pud = pud_offset(pgd, start & PGDIR_MASK); | ||
547 | else | ||
548 | pud = alloc_low_page(&pud_phys); | ||
549 | |||
550 | last_map_addr = phys_pud_init(pud, __pa(start), __pa(next), | 523 | last_map_addr = phys_pud_init(pud, __pa(start), __pa(next), |
551 | page_size_mask); | 524 | page_size_mask); |
552 | unmap_low_page(pud); | 525 | unmap_low_page(pud); |
553 | pgd_populate(&init_mm, pgd_offset_k(start), | 526 | |
554 | __va(pud_phys)); | 527 | spin_lock(&init_mm.page_table_lock); |
528 | pgd_populate(&init_mm, pgd, __va(pud_phys)); | ||
529 | spin_unlock(&init_mm.page_table_lock); | ||
555 | } | 530 | } |
556 | 531 | ||
557 | return last_map_addr; | 532 | return last_map_addr; |