aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2012-11-16 22:39:03 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2012-11-17 14:59:26 -0500
commit6f80b68e9e515547edbacb0c37491730bf766db5 (patch)
tree40d5f129bff2042d53354c0aa5b7b748265a3d13 /arch/x86/xen
parent9985b4c6fa7d660f685918a58282275e9e35d8e0 (diff)
x86, mm, Xen: Remove mapping_pagetable_reserve()
Page table area are pre-mapped now after x86, mm: setup page table in top-down x86, mm: Remove early_memremap workaround for page table accessing on 64bit mapping_pagetable_reserve is not used anymore, so remove it. Also remove operation in mask_rw_pte(), as modified allow_low_page always return pages that are already mapped, moreover xen_alloc_pte_init, xen_alloc_pmd_init, etc, will mark the page RO before hooking it into the pagetable automatically. -v2: add changelog about mask_rw_pte() from Stefano. Signed-off-by: Yinghai Lu <yinghai@kernel.org> Link: http://lkml.kernel.org/r/1353123563-3103-27-git-send-email-yinghai@kernel.org Cc: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/xen')
-rw-r--r--arch/x86/xen/mmu.c28
1 files changed, 0 insertions, 28 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index dcf5f2dd91ec..bbb883f58bc4 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1178,20 +1178,6 @@ static void xen_exit_mmap(struct mm_struct *mm)
1178 1178
1179static void xen_post_allocator_init(void); 1179static void xen_post_allocator_init(void);
1180 1180
1181static __init void xen_mapping_pagetable_reserve(u64 start, u64 end)
1182{
1183 /* reserve the range used */
1184 native_pagetable_reserve(start, end);
1185
1186 /* set as RW the rest */
1187 printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end,
1188 PFN_PHYS(pgt_buf_top));
1189 while (end < PFN_PHYS(pgt_buf_top)) {
1190 make_lowmem_page_readwrite(__va(end));
1191 end += PAGE_SIZE;
1192 }
1193}
1194
1195#ifdef CONFIG_X86_64 1181#ifdef CONFIG_X86_64
1196static void __init xen_cleanhighmap(unsigned long vaddr, 1182static void __init xen_cleanhighmap(unsigned long vaddr,
1197 unsigned long vaddr_end) 1183 unsigned long vaddr_end)
@@ -1503,19 +1489,6 @@ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
1503#else /* CONFIG_X86_64 */ 1489#else /* CONFIG_X86_64 */
1504static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) 1490static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
1505{ 1491{
1506 unsigned long pfn = pte_pfn(pte);
1507
1508 /*
1509 * If the new pfn is within the range of the newly allocated
1510 * kernel pagetable, and it isn't being mapped into an
1511 * early_ioremap fixmap slot as a freshly allocated page, make sure
1512 * it is RO.
1513 */
1514 if (((!is_early_ioremap_ptep(ptep) &&
1515 pfn >= pgt_buf_start && pfn < pgt_buf_top)) ||
1516 (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1)))
1517 pte = pte_wrprotect(pte);
1518
1519 return pte; 1492 return pte;
1520} 1493}
1521#endif /* CONFIG_X86_64 */ 1494#endif /* CONFIG_X86_64 */
@@ -2197,7 +2170,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2197 2170
2198void __init xen_init_mmu_ops(void) 2171void __init xen_init_mmu_ops(void)
2199{ 2172{
2200 x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve;
2201 x86_init.paging.pagetable_init = xen_pagetable_init; 2173 x86_init.paging.pagetable_init = xen_pagetable_init;
2202 pv_mmu_ops = xen_mmu_ops; 2174 pv_mmu_ops = xen_mmu_ops;
2203 2175