aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2013-01-24 15:19:46 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2013-01-29 18:12:24 -0500
commitc2bdee594ebcf4a531afe795baf18da509438392 (patch)
tree751aa4141c5cdb5a6420a7a7c4ace64f72cd221b /arch/x86
parentb422a3091748c38b68052e8ba021652590b1f25c (diff)
x86, 64bit, mm: Make pgd next calculation consistent with pud/pmd
Just like the way we calculate next for pud and pmd, aka round down and add size. Also, do not do boundary-checking with 'next', and just pass 'end' down to phys_pud_init() instead. Because the loop in phys_pud_init() stops at PTRS_PER_PUD and thus can handle a possibly bigger 'end' properly. Signed-off-by: Yinghai Lu <yinghai@kernel.org> Link: http://lkml.kernel.org/r/1359058816-7615-6-git-send-email-yinghai@kernel.org Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/mm/init_64.c6
1 files changed, 2 insertions, 4 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 191ab12f5ff3..d7af907c07f4 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -530,9 +530,7 @@ kernel_physical_mapping_init(unsigned long start,
530 pgd_t *pgd = pgd_offset_k(start); 530 pgd_t *pgd = pgd_offset_k(start);
531 pud_t *pud; 531 pud_t *pud;
532 532
533 next = (start + PGDIR_SIZE) & PGDIR_MASK; 533 next = (start & PGDIR_MASK) + PGDIR_SIZE;
534 if (next > end)
535 next = end;
536 534
537 if (pgd_val(*pgd)) { 535 if (pgd_val(*pgd)) {
538 pud = (pud_t *)pgd_page_vaddr(*pgd); 536 pud = (pud_t *)pgd_page_vaddr(*pgd);
@@ -542,7 +540,7 @@ kernel_physical_mapping_init(unsigned long start,
542 } 540 }
543 541
544 pud = alloc_low_page(); 542 pud = alloc_low_page();
545 last_map_addr = phys_pud_init(pud, __pa(start), __pa(next), 543 last_map_addr = phys_pud_init(pud, __pa(start), __pa(end),
546 page_size_mask); 544 page_size_mask);
547 545
548 spin_lock(&init_mm.page_table_lock); 546 spin_lock(&init_mm.page_table_lock);