aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/xen/mmu.c')
-rw-r--r--arch/x86/xen/mmu.c30
1 files changed, 25 insertions, 5 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index a991b57f91fe..0684f3c74d53 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1275,6 +1275,20 @@ static __init void xen_pagetable_setup_start(pgd_t *base)
1275{ 1275{
1276} 1276}
1277 1277
1278static __init void xen_mapping_pagetable_reserve(u64 start, u64 end)
1279{
1280 /* reserve the range used */
1281 native_pagetable_reserve(start, end);
1282
1283 /* set as RW the rest */
1284 printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end,
1285 PFN_PHYS(pgt_buf_top));
1286 while (end < PFN_PHYS(pgt_buf_top)) {
1287 make_lowmem_page_readwrite(__va(end));
1288 end += PAGE_SIZE;
1289 }
1290}
1291
1278static void xen_post_allocator_init(void); 1292static void xen_post_allocator_init(void);
1279 1293
1280static __init void xen_pagetable_setup_done(pgd_t *base) 1294static __init void xen_pagetable_setup_done(pgd_t *base)
@@ -1473,16 +1487,20 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1473#endif 1487#endif
1474} 1488}
1475 1489
1490#ifdef CONFIG_X86_32
1476static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) 1491static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1477{ 1492{
1478 unsigned long pfn = pte_pfn(pte);
1479
1480#ifdef CONFIG_X86_32
1481 /* If there's an existing pte, then don't allow _PAGE_RW to be set */ 1493 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1482 if (pte_val_ma(*ptep) & _PAGE_PRESENT) 1494 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1483 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & 1495 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1484 pte_val_ma(pte)); 1496 pte_val_ma(pte));
1485#endif 1497
1498 return pte;
1499}
1500#else /* CONFIG_X86_64 */
1501static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1502{
1503 unsigned long pfn = pte_pfn(pte);
1486 1504
1487 /* 1505 /*
1488 * If the new pfn is within the range of the newly allocated 1506 * If the new pfn is within the range of the newly allocated
@@ -1491,12 +1509,13 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1491 * it is RO. 1509 * it is RO.
1492 */ 1510 */
1493 if (((!is_early_ioremap_ptep(ptep) && 1511 if (((!is_early_ioremap_ptep(ptep) &&
1494 pfn >= pgt_buf_start && pfn < pgt_buf_end)) || 1512 pfn >= pgt_buf_start && pfn < pgt_buf_top)) ||
1495 (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1))) 1513 (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1)))
1496 pte = pte_wrprotect(pte); 1514 pte = pte_wrprotect(pte);
1497 1515
1498 return pte; 1516 return pte;
1499} 1517}
1518#endif /* CONFIG_X86_64 */
1500 1519
1501/* Init-time set_pte while constructing initial pagetables, which 1520/* Init-time set_pte while constructing initial pagetables, which
1502 doesn't allow RO pagetable pages to be remapped RW */ 1521 doesn't allow RO pagetable pages to be remapped RW */
@@ -2100,6 +2119,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
2100 2119
2101void __init xen_init_mmu_ops(void) 2120void __init xen_init_mmu_ops(void)
2102{ 2121{
2122 x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve;
2103 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start; 2123 x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
2104 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; 2124 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
2105 pv_mmu_ops = xen_mmu_ops; 2125 pv_mmu_ops = xen_mmu_ops;