aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorJan Beulich <JBeulich@suse.com>2012-05-16 09:06:26 -0400
committerIngo Molnar <mingo@kernel.org>2012-05-18 04:13:37 -0400
commit20167d3421a089a1bf1bd680b150dc69c9506810 (patch)
tree3338cb5455b31473630679061fcd96addc8ac7e2 /arch/x86/mm
parent3e7f3db001de6133db1c385c92eec944409a8b4f (diff)
x86-64: Fix accounting in kernel_physical_mapping_init()
When finding a present and acceptable 2M/1G mapping, the number of pages mapped this way shouldn't be incremented (as it was already incremented when the earlier part of the mapping was established). Instead, last_map_addr needs to be updated in this case. Further, address increments were wrong in one place each in both phys_pmd_init() and phys_pud_init() (lacking the aligning down to the respective page boundary). As we're now doing the same calculation several times, fold it into a single instance using a local variable (matching how kernel_physical_mapping_init() itself does it at the PGD level). Observed during code inspection, not because of an actual problem. Signed-off-by: Jan Beulich <jbeulich@suse.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/4FB3C27202000078000841A0@nat28.tlf.novell.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/init_64.c23
1 files changed, 13 insertions, 10 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 436a0309db33..f9476a0f8cb6 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -408,12 +408,12 @@ static unsigned long __meminit
408phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, 408phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
409 unsigned long page_size_mask, pgprot_t prot) 409 unsigned long page_size_mask, pgprot_t prot)
410{ 410{
411 unsigned long pages = 0; 411 unsigned long pages = 0, next;
412 unsigned long last_map_addr = end; 412 unsigned long last_map_addr = end;
413 413
414 int i = pmd_index(address); 414 int i = pmd_index(address);
415 415
416 for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) { 416 for (; i < PTRS_PER_PMD; i++, address = next) {
417 unsigned long pte_phys; 417 unsigned long pte_phys;
418 pmd_t *pmd = pmd_page + pmd_index(address); 418 pmd_t *pmd = pmd_page + pmd_index(address);
419 pte_t *pte; 419 pte_t *pte;
@@ -427,6 +427,8 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
427 break; 427 break;
428 } 428 }
429 429
430 next = (address & PMD_MASK) + PMD_SIZE;
431
430 if (pmd_val(*pmd)) { 432 if (pmd_val(*pmd)) {
431 if (!pmd_large(*pmd)) { 433 if (!pmd_large(*pmd)) {
432 spin_lock(&init_mm.page_table_lock); 434 spin_lock(&init_mm.page_table_lock);
@@ -450,7 +452,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
450 * attributes. 452 * attributes.
451 */ 453 */
452 if (page_size_mask & (1 << PG_LEVEL_2M)) { 454 if (page_size_mask & (1 << PG_LEVEL_2M)) {
453 pages++; 455 last_map_addr = next;
454 continue; 456 continue;
455 } 457 }
456 new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); 458 new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
@@ -463,7 +465,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
463 pfn_pte(address >> PAGE_SHIFT, 465 pfn_pte(address >> PAGE_SHIFT,
464 __pgprot(pgprot_val(prot) | _PAGE_PSE))); 466 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
465 spin_unlock(&init_mm.page_table_lock); 467 spin_unlock(&init_mm.page_table_lock);
466 last_map_addr = (address & PMD_MASK) + PMD_SIZE; 468 last_map_addr = next;
467 continue; 469 continue;
468 } 470 }
469 471
@@ -483,11 +485,11 @@ static unsigned long __meminit
483phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, 485phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
484 unsigned long page_size_mask) 486 unsigned long page_size_mask)
485{ 487{
486 unsigned long pages = 0; 488 unsigned long pages = 0, next;
487 unsigned long last_map_addr = end; 489 unsigned long last_map_addr = end;
488 int i = pud_index(addr); 490 int i = pud_index(addr);
489 491
490 for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) { 492 for (; i < PTRS_PER_PUD; i++, addr = next) {
491 unsigned long pmd_phys; 493 unsigned long pmd_phys;
492 pud_t *pud = pud_page + pud_index(addr); 494 pud_t *pud = pud_page + pud_index(addr);
493 pmd_t *pmd; 495 pmd_t *pmd;
@@ -496,8 +498,9 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
496 if (addr >= end) 498 if (addr >= end)
497 break; 499 break;
498 500
499 if (!after_bootmem && 501 next = (addr & PUD_MASK) + PUD_SIZE;
500 !e820_any_mapped(addr, addr+PUD_SIZE, 0)) { 502
503 if (!after_bootmem && !e820_any_mapped(addr, next, 0)) {
501 set_pud(pud, __pud(0)); 504 set_pud(pud, __pud(0));
502 continue; 505 continue;
503 } 506 }
@@ -524,7 +527,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
524 * attributes. 527 * attributes.
525 */ 528 */
526 if (page_size_mask & (1 << PG_LEVEL_1G)) { 529 if (page_size_mask & (1 << PG_LEVEL_1G)) {
527 pages++; 530 last_map_addr = next;
528 continue; 531 continue;
529 } 532 }
530 prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); 533 prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
@@ -536,7 +539,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
536 set_pte((pte_t *)pud, 539 set_pte((pte_t *)pud,
537 pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); 540 pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
538 spin_unlock(&init_mm.page_table_lock); 541 spin_unlock(&init_mm.page_table_lock);
539 last_map_addr = (addr & PUD_MASK) + PUD_SIZE; 542 last_map_addr = next;
540 continue; 543 continue;
541 } 544 }
542 545