aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2012-11-16 22:38:54 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2012-11-17 14:59:15 -0500
commit960ddb4fe7832b559897e8b26ec805839b706905 (patch)
tree92e7cf34c23f263ff50ddd7ca3bfc14f42d30312 /arch/x86/mm
parent74f27655dda84604d8bab47872020dcce5c88731 (diff)
x86, mm: Align start address to correct big page size
We are going to use buffer in BRK to map small range just under memory top, and use those new mapped ram to map ram range under it. The ram range that will be mapped at first could be only page aligned, but ranges around it are ram too, we could use bigger page to map it to avoid small page size. We will adjust page_size_mask in following patch: x86, mm: Use big page size for small memory range to use big page size for small ram range. Before that patch, this patch will make sure start address to be aligned down according to bigger page size, otherwise entry in page page will not have correct value. Signed-off-by: Yinghai Lu <yinghai@kernel.org> Link: http://lkml.kernel.org/r/1353123563-3103-18-git-send-email-yinghai@kernel.org Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/init_32.c1
-rw-r--r--arch/x86/mm/init_64.c5
2 files changed, 4 insertions, 2 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 11a58001b4ce..27f7fc69cf8a 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -310,6 +310,7 @@ repeat:
310 __pgprot(PTE_IDENT_ATTR | 310 __pgprot(PTE_IDENT_ATTR |
311 _PAGE_PSE); 311 _PAGE_PSE);
312 312
313 pfn &= PMD_MASK >> PAGE_SHIFT;
313 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + 314 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
314 PAGE_OFFSET + PAGE_SIZE-1; 315 PAGE_OFFSET + PAGE_SIZE-1;
315 316
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 32c7e3847cf6..869372a5d3cf 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -464,7 +464,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
464 pages++; 464 pages++;
465 spin_lock(&init_mm.page_table_lock); 465 spin_lock(&init_mm.page_table_lock);
466 set_pte((pte_t *)pmd, 466 set_pte((pte_t *)pmd,
467 pfn_pte(address >> PAGE_SHIFT, 467 pfn_pte((address & PMD_MASK) >> PAGE_SHIFT,
468 __pgprot(pgprot_val(prot) | _PAGE_PSE))); 468 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
469 spin_unlock(&init_mm.page_table_lock); 469 spin_unlock(&init_mm.page_table_lock);
470 last_map_addr = next; 470 last_map_addr = next;
@@ -541,7 +541,8 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
541 pages++; 541 pages++;
542 spin_lock(&init_mm.page_table_lock); 542 spin_lock(&init_mm.page_table_lock);
543 set_pte((pte_t *)pud, 543 set_pte((pte_t *)pud,
544 pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); 544 pfn_pte((addr & PUD_MASK) >> PAGE_SHIFT,
545 PAGE_KERNEL_LARGE));
545 spin_unlock(&init_mm.page_table_lock); 546 spin_unlock(&init_mm.page_table_lock);
546 last_map_addr = next; 547 last_map_addr = next;
547 continue; 548 continue;