diff options
Diffstat (limited to 'arch/x86/mm/init_64.c')
| -rw-r--r-- | arch/x86/mm/init_64.c | 56 |
1 files changed, 34 insertions, 22 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 129618ca0ea2..770536ebf7e9 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
| @@ -60,7 +60,7 @@ static unsigned long dma_reserve __initdata; | |||
| 60 | 60 | ||
| 61 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 61 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
| 62 | 62 | ||
| 63 | int direct_gbpages __meminitdata | 63 | int direct_gbpages |
| 64 | #ifdef CONFIG_DIRECT_GBPAGES | 64 | #ifdef CONFIG_DIRECT_GBPAGES |
| 65 | = 1 | 65 | = 1 |
| 66 | #endif | 66 | #endif |
| @@ -88,7 +88,11 @@ early_param("gbpages", parse_direct_gbpages_on); | |||
| 88 | 88 | ||
| 89 | int after_bootmem; | 89 | int after_bootmem; |
| 90 | 90 | ||
| 91 | static __init void *spp_getpage(void) | 91 | /* |
| 92 | * NOTE: This function is marked __ref because it calls __init function | ||
| 93 | * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. | ||
| 94 | */ | ||
| 95 | static __ref void *spp_getpage(void) | ||
| 92 | { | 96 | { |
| 93 | void *ptr; | 97 | void *ptr; |
| 94 | 98 | ||
| @@ -221,7 +225,7 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) | |||
| 221 | void __init cleanup_highmap(void) | 225 | void __init cleanup_highmap(void) |
| 222 | { | 226 | { |
| 223 | unsigned long vaddr = __START_KERNEL_map; | 227 | unsigned long vaddr = __START_KERNEL_map; |
| 224 | unsigned long end = round_up((unsigned long)_end, PMD_SIZE) - 1; | 228 | unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1; |
| 225 | pmd_t *pmd = level2_kernel_pgt; | 229 | pmd_t *pmd = level2_kernel_pgt; |
| 226 | pmd_t *last_pmd = pmd + PTRS_PER_PMD; | 230 | pmd_t *last_pmd = pmd + PTRS_PER_PMD; |
| 227 | 231 | ||
| @@ -237,7 +241,7 @@ static unsigned long __initdata table_start; | |||
| 237 | static unsigned long __meminitdata table_end; | 241 | static unsigned long __meminitdata table_end; |
| 238 | static unsigned long __meminitdata table_top; | 242 | static unsigned long __meminitdata table_top; |
| 239 | 243 | ||
| 240 | static __meminit void *alloc_low_page(unsigned long *phys) | 244 | static __ref void *alloc_low_page(unsigned long *phys) |
| 241 | { | 245 | { |
| 242 | unsigned long pfn = table_end++; | 246 | unsigned long pfn = table_end++; |
| 243 | void *adr; | 247 | void *adr; |
| @@ -258,7 +262,7 @@ static __meminit void *alloc_low_page(unsigned long *phys) | |||
| 258 | return adr; | 262 | return adr; |
| 259 | } | 263 | } |
| 260 | 264 | ||
| 261 | static __meminit void unmap_low_page(void *adr) | 265 | static __ref void unmap_low_page(void *adr) |
| 262 | { | 266 | { |
| 263 | if (after_bootmem) | 267 | if (after_bootmem) |
| 264 | return; | 268 | return; |
| @@ -314,6 +318,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | |||
| 314 | { | 318 | { |
| 315 | unsigned long pages = 0; | 319 | unsigned long pages = 0; |
| 316 | unsigned long last_map_addr = end; | 320 | unsigned long last_map_addr = end; |
| 321 | unsigned long start = address; | ||
| 317 | 322 | ||
| 318 | int i = pmd_index(address); | 323 | int i = pmd_index(address); |
| 319 | 324 | ||
| @@ -331,16 +336,24 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | |||
| 331 | } | 336 | } |
| 332 | 337 | ||
| 333 | if (pmd_val(*pmd)) { | 338 | if (pmd_val(*pmd)) { |
| 334 | if (!pmd_large(*pmd)) | 339 | if (!pmd_large(*pmd)) { |
| 340 | spin_lock(&init_mm.page_table_lock); | ||
| 335 | last_map_addr = phys_pte_update(pmd, address, | 341 | last_map_addr = phys_pte_update(pmd, address, |
| 336 | end); | 342 | end); |
| 343 | spin_unlock(&init_mm.page_table_lock); | ||
| 344 | } | ||
| 345 | /* Count entries we're using from level2_ident_pgt */ | ||
| 346 | if (start == 0) | ||
| 347 | pages++; | ||
| 337 | continue; | 348 | continue; |
| 338 | } | 349 | } |
| 339 | 350 | ||
| 340 | if (page_size_mask & (1<<PG_LEVEL_2M)) { | 351 | if (page_size_mask & (1<<PG_LEVEL_2M)) { |
| 341 | pages++; | 352 | pages++; |
| 353 | spin_lock(&init_mm.page_table_lock); | ||
| 342 | set_pte((pte_t *)pmd, | 354 | set_pte((pte_t *)pmd, |
| 343 | pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); | 355 | pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); |
| 356 | spin_unlock(&init_mm.page_table_lock); | ||
| 344 | last_map_addr = (address & PMD_MASK) + PMD_SIZE; | 357 | last_map_addr = (address & PMD_MASK) + PMD_SIZE; |
| 345 | continue; | 358 | continue; |
| 346 | } | 359 | } |
| @@ -349,7 +362,9 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, | |||
| 349 | last_map_addr = phys_pte_init(pte, address, end); | 362 | last_map_addr = phys_pte_init(pte, address, end); |
| 350 | unmap_low_page(pte); | 363 | unmap_low_page(pte); |
| 351 | 364 | ||
| 365 | spin_lock(&init_mm.page_table_lock); | ||
| 352 | pmd_populate_kernel(&init_mm, pmd, __va(pte_phys)); | 366 | pmd_populate_kernel(&init_mm, pmd, __va(pte_phys)); |
| 367 | spin_unlock(&init_mm.page_table_lock); | ||
| 353 | } | 368 | } |
| 354 | update_page_count(PG_LEVEL_2M, pages); | 369 | update_page_count(PG_LEVEL_2M, pages); |
| 355 | return last_map_addr; | 370 | return last_map_addr; |
| @@ -362,9 +377,7 @@ phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end, | |||
| 362 | pmd_t *pmd = pmd_offset(pud, 0); | 377 | pmd_t *pmd = pmd_offset(pud, 0); |
| 363 | unsigned long last_map_addr; | 378 | unsigned long last_map_addr; |
| 364 | 379 | ||
| 365 | spin_lock(&init_mm.page_table_lock); | ||
| 366 | last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask); | 380 | last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask); |
| 367 | spin_unlock(&init_mm.page_table_lock); | ||
| 368 | __flush_tlb_all(); | 381 | __flush_tlb_all(); |
| 369 | return last_map_addr; | 382 | return last_map_addr; |
| 370 | } | 383 | } |
| @@ -400,20 +413,21 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, | |||
| 400 | 413 | ||
| 401 | if (page_size_mask & (1<<PG_LEVEL_1G)) { | 414 | if (page_size_mask & (1<<PG_LEVEL_1G)) { |
| 402 | pages++; | 415 | pages++; |
| 416 | spin_lock(&init_mm.page_table_lock); | ||
| 403 | set_pte((pte_t *)pud, | 417 | set_pte((pte_t *)pud, |
| 404 | pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); | 418 | pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); |
| 419 | spin_unlock(&init_mm.page_table_lock); | ||
| 405 | last_map_addr = (addr & PUD_MASK) + PUD_SIZE; | 420 | last_map_addr = (addr & PUD_MASK) + PUD_SIZE; |
| 406 | continue; | 421 | continue; |
| 407 | } | 422 | } |
| 408 | 423 | ||
| 409 | pmd = alloc_low_page(&pmd_phys); | 424 | pmd = alloc_low_page(&pmd_phys); |
| 410 | |||
| 411 | spin_lock(&init_mm.page_table_lock); | ||
| 412 | last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask); | 425 | last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask); |
| 413 | unmap_low_page(pmd); | 426 | unmap_low_page(pmd); |
| 427 | |||
| 428 | spin_lock(&init_mm.page_table_lock); | ||
| 414 | pud_populate(&init_mm, pud, __va(pmd_phys)); | 429 | pud_populate(&init_mm, pud, __va(pmd_phys)); |
| 415 | spin_unlock(&init_mm.page_table_lock); | 430 | spin_unlock(&init_mm.page_table_lock); |
| 416 | |||
| 417 | } | 431 | } |
| 418 | __flush_tlb_all(); | 432 | __flush_tlb_all(); |
| 419 | update_page_count(PG_LEVEL_1G, pages); | 433 | update_page_count(PG_LEVEL_1G, pages); |
| @@ -437,14 +451,14 @@ static void __init find_early_table_space(unsigned long end) | |||
| 437 | unsigned long puds, pmds, ptes, tables, start; | 451 | unsigned long puds, pmds, ptes, tables, start; |
| 438 | 452 | ||
| 439 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; | 453 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; |
| 440 | tables = round_up(puds * sizeof(pud_t), PAGE_SIZE); | 454 | tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); |
| 441 | if (direct_gbpages) { | 455 | if (direct_gbpages) { |
| 442 | unsigned long extra; | 456 | unsigned long extra; |
| 443 | extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); | 457 | extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); |
| 444 | pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; | 458 | pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; |
| 445 | } else | 459 | } else |
| 446 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; | 460 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; |
| 447 | tables += round_up(pmds * sizeof(pmd_t), PAGE_SIZE); | 461 | tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); |
| 448 | 462 | ||
| 449 | if (cpu_has_pse) { | 463 | if (cpu_has_pse) { |
| 450 | unsigned long extra; | 464 | unsigned long extra; |
| @@ -452,7 +466,7 @@ static void __init find_early_table_space(unsigned long end) | |||
| 452 | ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; | 466 | ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 453 | } else | 467 | } else |
| 454 | ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; | 468 | ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 455 | tables += round_up(ptes * sizeof(pte_t), PAGE_SIZE); | 469 | tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); |
| 456 | 470 | ||
| 457 | /* | 471 | /* |
| 458 | * RED-PEN putting page tables only on node 0 could | 472 | * RED-PEN putting page tables only on node 0 could |
| @@ -505,16 +519,14 @@ static unsigned long __init kernel_physical_mapping_init(unsigned long start, | |||
| 505 | continue; | 519 | continue; |
| 506 | } | 520 | } |
| 507 | 521 | ||
| 508 | if (after_bootmem) | 522 | pud = alloc_low_page(&pud_phys); |
| 509 | pud = pud_offset(pgd, start & PGDIR_MASK); | ||
| 510 | else | ||
| 511 | pud = alloc_low_page(&pud_phys); | ||
| 512 | |||
| 513 | last_map_addr = phys_pud_init(pud, __pa(start), __pa(next), | 523 | last_map_addr = phys_pud_init(pud, __pa(start), __pa(next), |
| 514 | page_size_mask); | 524 | page_size_mask); |
| 515 | unmap_low_page(pud); | 525 | unmap_low_page(pud); |
| 516 | pgd_populate(&init_mm, pgd_offset_k(start), | 526 | |
| 517 | __va(pud_phys)); | 527 | spin_lock(&init_mm.page_table_lock); |
| 528 | pgd_populate(&init_mm, pgd, __va(pud_phys)); | ||
| 529 | spin_unlock(&init_mm.page_table_lock); | ||
| 518 | } | 530 | } |
| 519 | 531 | ||
| 520 | return last_map_addr; | 532 | return last_map_addr; |
