aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorStefano Stabellini <stefano.stabellini@eu.citrix.com>2011-06-03 05:51:34 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-06-09 09:08:53 -0400
commita91d92875ee94e4703fd017ccaadb48cfb344994 (patch)
tree60028033704c83b1ceef8b2fb872d2c94a26ed0d /arch/x86
parentf124c6ae59e193705c9ddac57684d50006d710e6 (diff)
xen: partially revert "xen: set max_pfn_mapped to the last pfn mapped"
We only need to set max_pfn_mapped to the last pfn mapped on x86_64 to make sure that cleanup_highmap doesn't remove important mappings at _end. We don't need to do this on x86_32 because cleanup_highmap is not called on x86_32. Besides lowering max_pfn_mapped on x86_32 has the unwanted side effect of limiting the amount of memory available for the 1:1 kernel pagetable allocation. This patch reverts the x86_32 part of the original patch. CC: stable@kernel.org Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/xen/mmu.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index dc708dcc62f1..afe1d54f980c 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1599,6 +1599,11 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1599 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { 1599 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1600 pte_t pte; 1600 pte_t pte;
1601 1601
1602#ifdef CONFIG_X86_32
1603 if (pfn > max_pfn_mapped)
1604 max_pfn_mapped = pfn;
1605#endif
1606
1602 if (!pte_none(pte_page[pteidx])) 1607 if (!pte_none(pte_page[pteidx]))
1603 continue; 1608 continue;
1604 1609
@@ -1766,7 +1771,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
1766 initial_kernel_pmd = 1771 initial_kernel_pmd =
1767 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); 1772 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
1768 1773
1769 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); 1774 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
1775 xen_start_info->nr_pt_frames * PAGE_SIZE +
1776 512*1024);
1770 1777
1771 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); 1778 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
1772 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); 1779 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);